prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import time
from typing import List
import tushare as ts
import pandas as pd
from pandas import DataFrame, Series
import os
import csv
import random
from datetime import datetime, timedelta
from utils.config_reader import ConfigReader
from utils.log import logger
from utils.common import get_file_list
config_reader = ConfigReader('cfg/api.ini')
token = config_reader.read_config('tushare', 'token')
pro = ts.pro_api(token)
def get_symbols_tushare():
df_list = []
t_d = datetime.today()
print(t_d)
exchanges = ['DCE', 'SHFE', 'CZCE', 'CFFEX', 'INE']
fields = 'ts_code,symbol,exchange,name,fut_code,multiplier,trade_unit,per_unit,quote_unit,list_date,delist_date'
for e in exchanges:
df = pro.fut_basic(exchange=e, fut_type='1', fields=fields)
df['delist_date'] = pd.to_datetime(df['delist_date'])
# df = df.loc[df['delist_date'] > t_d]
df_list.append(df)
combined_df = pd.concat(df_list)
return combined_df
def get_dominant_name_tushare(df: DataFrame, path, start_date: str, end_date: str):
name_list = []
# https://tushare.pro/document/2?doc_id=134
e_d = {'CZCE': 'ZCE', 'SHFE': 'SHF', 'DCE': 'DCE', 'CFFEX': 'CFX', 'INE': 'INE'}
df['exchange'] = df['exchange'].map(e_d)
for e in df['exchange'].drop_duplicates():
if os.path.exists(os.path.join(path, e)):
pass
else:
os.makedirs(os.path.join(path, e))
temp_df = df.loc[df.exchange == e]
for f in temp_df['fut_code'].drop_duplicates():
dominant_symbol = f'{f}.{e}'
name = pro.fut_mapping(ts_code=dominant_symbol, start_date=start_date, end_date=end_date)
name = name.sort_values(by='trade_date', ascending=True)
name = name.set_index(name['trade_date'])
name.drop(columns=['trade_date'], inplace=True)
name['mapping_ts_code'] = tushare_domin_adj(name['mapping_ts_code'])
name_list.append(name)
names = | pd.concat(name_list) | pandas.concat |
# -*- coding: utf-8 -*-
"""
13 July 2020
Author: <NAME>
Dataset version update 03
Adding newly released datasets.
"""
import pandas as pd
# Adding the new datasets released in June 2020
df = pd.read_csv(r"filepath\Aggregate-API.csv", sep = ";")
df1 = pd.read_csv(r"filepath\API_Melaka_2019_cleaned.csv")
df2 = | pd.read_csv(r"filepath\API_NS_2019_cleaned.csv") | pandas.read_csv |
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from datetime import time
from os.path import abspath, dirname, join
from unittest import TestCase
import typing
import re
import functools
import itertools
import pathlib
from collections import abc
import pytest
import numpy as np
import pandas as pd
import pandas.testing as tm
from pandas import Timedelta, read_csv
from parameterized import parameterized
import pytz
from pytz import UTC
from toolz import concat
from exchange_calendars import get_calendar
from exchange_calendars.calendar_utils import (
ExchangeCalendarDispatcher,
_default_calendar_aliases,
_default_calendar_factories,
)
from exchange_calendars.errors import (
CalendarNameCollision,
InvalidCalendarName,
NoSessionsError,
)
from exchange_calendars.exchange_calendar import ExchangeCalendar, days_at_time
from .test_utils import T
class FakeCalendar(ExchangeCalendar):
name = "DMY"
tz = "Asia/Ulaanbaatar"
open_times = ((None, time(11, 13)),)
close_times = ((None, time(11, 49)),)
class CalendarRegistrationTestCase(TestCase):
def setup_method(self, method):
self.dummy_cal_type = FakeCalendar
self.dispatcher = ExchangeCalendarDispatcher({}, {}, {})
def teardown_method(self, method):
self.dispatcher.clear_calendars()
def test_register_calendar(self):
# Build a fake calendar
dummy_cal = self.dummy_cal_type()
# Try to register and retrieve the calendar
self.dispatcher.register_calendar("DMY", dummy_cal)
retr_cal = self.dispatcher.get_calendar("DMY")
self.assertEqual(dummy_cal, retr_cal)
# Try to register again, expecting a name collision
with self.assertRaises(CalendarNameCollision):
self.dispatcher.register_calendar("DMY", dummy_cal)
# Deregister the calendar and ensure that it is removed
self.dispatcher.deregister_calendar("DMY")
with self.assertRaises(InvalidCalendarName):
self.dispatcher.get_calendar("DMY")
def test_register_calendar_type(self):
self.dispatcher.register_calendar_type("DMY", self.dummy_cal_type)
retr_cal = self.dispatcher.get_calendar("DMY")
self.assertEqual(self.dummy_cal_type, type(retr_cal))
def test_both_places_are_checked(self):
dummy_cal = self.dummy_cal_type()
# if instance is registered, can't register type with same name
self.dispatcher.register_calendar("DMY", dummy_cal)
with self.assertRaises(CalendarNameCollision):
self.dispatcher.register_calendar_type("DMY", type(dummy_cal))
self.dispatcher.deregister_calendar("DMY")
# if type is registered, can't register instance with same name
self.dispatcher.register_calendar_type("DMY", type(dummy_cal))
with self.assertRaises(CalendarNameCollision):
self.dispatcher.register_calendar("DMY", dummy_cal)
def test_force_registration(self):
self.dispatcher.register_calendar("DMY", self.dummy_cal_type())
first_dummy = self.dispatcher.get_calendar("DMY")
# force-register a new instance
self.dispatcher.register_calendar("DMY", self.dummy_cal_type(), force=True)
second_dummy = self.dispatcher.get_calendar("DMY")
self.assertNotEqual(first_dummy, second_dummy)
class DefaultsTestCase(TestCase):
def test_default_calendars(self):
dispatcher = ExchangeCalendarDispatcher(
calendars={},
calendar_factories=_default_calendar_factories,
aliases=_default_calendar_aliases,
)
# These are ordered aliases first, so that we can deregister the
# canonical factories when we're done with them, and we'll be done with
# them after they've been used by all aliases and by canonical name.
for name in concat([_default_calendar_aliases, _default_calendar_factories]):
self.assertIsNotNone(
dispatcher.get_calendar(name), "get_calendar(%r) returned None" % name
)
dispatcher.deregister_calendar(name)
class DaysAtTimeTestCase(TestCase):
@parameterized.expand(
[
# NYSE standard day
(
"2016-07-19",
0,
time(9, 31),
pytz.timezone("America/New_York"),
"2016-07-19 9:31",
),
# CME standard day
(
"2016-07-19",
-1,
time(17, 1),
pytz.timezone("America/Chicago"),
"2016-07-18 17:01",
),
# CME day after DST start
(
"2004-04-05",
-1,
time(17, 1),
pytz.timezone("America/Chicago"),
"2004-04-04 17:01",
),
# ICE day after DST start
(
"1990-04-02",
-1,
time(19, 1),
pytz.timezone("America/Chicago"),
"1990-04-01 19:01",
),
]
)
def test_days_at_time(self, day, day_offset, time_offset, tz, expected):
days = pd.DatetimeIndex([pd.Timestamp(day, tz=tz)])
result = days_at_time(days, time_offset, tz, day_offset)[0]
expected = pd.Timestamp(expected, tz=tz).tz_convert(UTC)
self.assertEqual(result, expected)
class ExchangeCalendarTestBase(object):
# Override in subclasses.
answer_key_filename = None
calendar_class = None
# Affects test_start_bound. Should be set to earliest date for which
# calendar can be instantiated, or None if no start bound.
START_BOUND: pd.Timestamp | None = None
# Affects test_end_bound. Should be set to latest date for which
# calendar can be instantiated, or None if no end bound.
END_BOUND: pd.Timestamp | None = None
# Affects tests that care about the empty periods between sessions. Should
# be set to False for 24/7 calendars.
GAPS_BETWEEN_SESSIONS = True
# Affects tests that care about early closes. Should be set to False for
# calendars that don't have any early closes.
HAVE_EARLY_CLOSES = True
# Affects tests that care about late opens. Since most do not, defaulting
# to False.
HAVE_LATE_OPENS = False
# Affects test_for_breaks. True if one or more calendar sessions has a
# break.
HAVE_BREAKS = False
# Affects test_session_has_break.
SESSION_WITH_BREAK = None # None if no session has a break
SESSION_WITHOUT_BREAK = T("2011-06-15") # None if all sessions have breaks
# Affects test_sanity_check_session_lengths. Should be set to the largest
# number of hours that ever appear in a single session.
MAX_SESSION_HOURS = 0
# Affects test_minute_index_to_session_labels.
# Change these if the start/end dates of your test suite don't contain the
# defaults.
MINUTE_INDEX_TO_SESSION_LABELS_START = pd.Timestamp("2011-01-04", tz=UTC)
MINUTE_INDEX_TO_SESSION_LABELS_END = pd.Timestamp("2011-04-04", tz=UTC)
# Affects tests around daylight savings. If possible, should contain two
# dates that are not both in the same daylight savings regime.
DAYLIGHT_SAVINGS_DATES = ["2004-04-05", "2004-11-01"]
# Affects test_start_end. Change these if your calendar start/end
# dates between 2010-01-03 and 2010-01-10 don't match the defaults.
TEST_START_END_FIRST = pd.Timestamp("2010-01-03", tz=UTC)
TEST_START_END_LAST = pd.Timestamp("2010-01-10", tz=UTC)
TEST_START_END_EXPECTED_FIRST = pd.Timestamp("2010-01-04", tz=UTC)
TEST_START_END_EXPECTED_LAST = pd.Timestamp("2010-01-08", tz=UTC)
@staticmethod
def load_answer_key(filename):
"""
Load a CSV from tests/resources/{filename}.csv
"""
fullpath = join(
dirname(abspath(__file__)),
"./resources",
filename + ".csv",
)
return read_csv(
fullpath,
index_col=0,
# NOTE: Merely passing parse_dates=True doesn't cause pandas to set
# the dtype correctly, and passing all reasonable inputs to the
# dtype kwarg cause read_csv to barf.
parse_dates=[0, 1, 2],
date_parser=lambda x: pd.Timestamp(x, tz=UTC),
)
@classmethod
def setup_class(cls):
cls.answers = cls.load_answer_key(cls.answer_key_filename)
cls.start_date = cls.answers.index[0]
cls.end_date = cls.answers.index[-1]
cls.calendar = cls.calendar_class(cls.start_date, cls.end_date)
cls.one_minute = pd.Timedelta(1, "T")
cls.one_hour = pd.Timedelta(1, "H")
cls.one_day = pd.Timedelta(1, "D")
cls.today = pd.Timestamp.now(tz="UTC").floor("D")
@classmethod
def teardown_class(cls):
cls.calendar = None
cls.answers = None
def test_bound_start(self):
if self.START_BOUND is not None:
cal = self.calendar_class(self.START_BOUND, self.today)
self.assertIsInstance(cal, ExchangeCalendar)
start = self.START_BOUND - pd.DateOffset(days=1)
with pytest.raises(ValueError, match=re.escape(f"{start}")):
self.calendar_class(start, self.today)
else:
# verify no bound imposed
cal = self.calendar_class(pd.Timestamp("1902-01-01", tz="UTC"), self.today)
self.assertIsInstance(cal, ExchangeCalendar)
def test_bound_end(self):
if self.END_BOUND is not None:
cal = self.calendar_class(self.today, self.END_BOUND)
self.assertIsInstance(cal, ExchangeCalendar)
end = self.END_BOUND + pd.DateOffset(days=1)
with pytest.raises(ValueError, match=re.escape(f"{end}")):
self.calendar_class(self.today, end)
else:
# verify no bound imposed
cal = self.calendar_class(self.today, pd.Timestamp("2050-01-01", tz="UTC"))
self.assertIsInstance(cal, ExchangeCalendar)
def test_sanity_check_session_lengths(self):
# make sure that no session is longer than self.MAX_SESSION_HOURS hours
for session in self.calendar.all_sessions:
o, c = self.calendar.open_and_close_for_session(session)
delta = c - o
self.assertLessEqual(delta.seconds / 3600, self.MAX_SESSION_HOURS)
def test_calculated_against_csv(self):
tm.assert_index_equal(self.calendar.schedule.index, self.answers.index)
def test_adhoc_holidays_specification(self):
"""adhoc holidays should be tz-naive (#33, #39)."""
dti = pd.DatetimeIndex(self.calendar.adhoc_holidays)
assert dti.tz is None
def test_is_open_on_minute(self):
one_minute = pd.Timedelta(minutes=1)
m = self.calendar.is_open_on_minute
for market_minute in self.answers.market_open[1:]:
market_minute_utc = market_minute
# The exchange should be classified as open on its first minute
self.assertTrue(m(market_minute_utc, _parse=False))
if self.GAPS_BETWEEN_SESSIONS:
# Decrement minute by one, to minute where the market was not
# open
pre_market = market_minute_utc - one_minute
self.assertFalse(m(pre_market, _parse=False))
for market_minute in self.answers.market_close[:-1]:
close_minute_utc = market_minute
# should be open on its last minute
self.assertTrue(m(close_minute_utc, _parse=False))
if self.GAPS_BETWEEN_SESSIONS:
# increment minute by one minute, should be closed
post_market = close_minute_utc + one_minute
self.assertFalse(m(post_market, _parse=False))
def _verify_minute(
self,
calendar,
minute,
next_open_answer,
prev_open_answer,
next_close_answer,
prev_close_answer,
):
next_open = calendar.next_open(minute, _parse=False)
self.assertEqual(next_open, next_open_answer)
prev_open = self.calendar.previous_open(minute, _parse=False)
self.assertEqual(prev_open, prev_open_answer)
next_close = self.calendar.next_close(minute, _parse=False)
self.assertEqual(next_close, next_close_answer)
prev_close = self.calendar.previous_close(minute, _parse=False)
self.assertEqual(prev_close, prev_close_answer)
def test_next_prev_open_close(self):
# for each session, check:
# - the minute before the open (if gaps exist between sessions)
# - the first minute of the session
# - the second minute of the session
# - the minute before the close
# - the last minute of the session
# - the first minute after the close (if gaps exist between sessions)
opens = self.answers.market_open.iloc[1:-2]
closes = self.answers.market_close.iloc[1:-2]
previous_opens = self.answers.market_open.iloc[:-1]
previous_closes = self.answers.market_close.iloc[:-1]
next_opens = self.answers.market_open.iloc[2:]
next_closes = self.answers.market_close.iloc[2:]
for (
open_minute,
close_minute,
previous_open,
previous_close,
next_open,
next_close,
) in zip(
opens, closes, previous_opens, previous_closes, next_opens, next_closes
):
minute_before_open = open_minute - self.one_minute
# minute before open
if self.GAPS_BETWEEN_SESSIONS:
self._verify_minute(
self.calendar,
minute_before_open,
open_minute,
previous_open,
close_minute,
previous_close,
)
# open minute
self._verify_minute(
self.calendar,
open_minute,
next_open,
previous_open,
close_minute,
previous_close,
)
# second minute of session
self._verify_minute(
self.calendar,
open_minute + self.one_minute,
next_open,
open_minute,
close_minute,
previous_close,
)
# minute before the close
self._verify_minute(
self.calendar,
close_minute - self.one_minute,
next_open,
open_minute,
close_minute,
previous_close,
)
# the close
self._verify_minute(
self.calendar,
close_minute,
next_open,
open_minute,
next_close,
previous_close,
)
# minute after the close
if self.GAPS_BETWEEN_SESSIONS:
self._verify_minute(
self.calendar,
close_minute + self.one_minute,
next_open,
open_minute,
next_close,
close_minute,
)
def test_next_prev_minute(self):
all_minutes = self.calendar.all_minutes
# test 20,000 minutes because it takes too long to do the rest.
for idx, minute in enumerate(all_minutes[1:20000]):
self.assertEqual(
all_minutes[idx + 2], self.calendar.next_minute(minute, _parse=False)
)
self.assertEqual(
all_minutes[idx], self.calendar.previous_minute(minute, _parse=False)
)
# test a couple of non-market minutes
if self.GAPS_BETWEEN_SESSIONS:
for open_minute in self.answers.market_open[1:]:
hour_before_open = open_minute - self.one_hour
self.assertEqual(
open_minute,
self.calendar.next_minute(hour_before_open, _parse=False),
)
for close_minute in self.answers.market_close[1:]:
hour_after_close = close_minute + self.one_hour
self.assertEqual(
close_minute,
self.calendar.previous_minute(hour_after_close, _parse=False),
)
def test_date_to_session_label(self):
m = self.calendar.date_to_session_label
sessions = self.answers.index[:30] # first 30 sessions
# test for error if request session prior to first calendar session.
date = self.answers.index[0] - self.one_day
error_msg = (
"Cannot get a session label prior to the first calendar"
f" session ('{self.answers.index[0]}'). Consider passing"
" `direction` as 'next'."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(date, "previous", _parse=False)
# direction as "previous"
dates = pd.date_range(sessions[0], sessions[-1], freq="D")
last_session = None
for date in dates:
session_label = m(date, "previous", _parse=False)
if date in sessions:
assert session_label == date
last_session = session_label
else:
assert session_label == last_session
# direction as "next"
last_session = None
for date in dates.sort_values(ascending=False):
session_label = m(date, "next", _parse=False)
if date in sessions:
assert session_label == date
last_session = session_label
else:
assert session_label == last_session
# test for error if request session after last calendar session.
date = self.answers.index[-1] + self.one_day
error_msg = (
"Cannot get a session label later than the last calendar"
f" session ('{self.answers.index[-1]}'). Consider passing"
" `direction` as 'previous'."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(date, "next", _parse=False)
if self.GAPS_BETWEEN_SESSIONS:
not_sessions = dates[~dates.isin(sessions)][:5]
for not_session in not_sessions:
error_msg = (
f"`date` '{not_session}' does not represent a session. Consider"
" passing a `direction`."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(not_session, "none", _parse=False)
# test default behaviour
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(not_session, _parse=False)
# non-valid direction (can only be thrown if gaps between sessions)
error_msg = (
"'not a direction' is not a valid `direction`. Valid `direction`"
' values are "next", "previous" and "none".'
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(not_session, "not a direction", _parse=False)
def test_minute_to_session_label(self):
m = self.calendar.minute_to_session_label
# minute is prior to first session's open
minute_before_first_open = self.answers.iloc[0].market_open - self.one_minute
session_label = self.answers.index[0]
minutes_that_resolve_to_this_session = [
m(minute_before_first_open, _parse=False),
m(minute_before_first_open, direction="next", _parse=False),
]
unique_session_labels = set(minutes_that_resolve_to_this_session)
self.assertTrue(len(unique_session_labels) == 1)
self.assertIn(session_label, unique_session_labels)
with self.assertRaises(ValueError):
m(minute_before_first_open, direction="previous", _parse=False)
with self.assertRaises(ValueError):
m(minute_before_first_open, direction="none", _parse=False)
# minute is between first session's open and last session's close
for idx, (session_label, open_minute, close_minute, _, _) in enumerate(
self.answers.iloc[1:-2].itertuples(name=None)
):
hour_into_session = open_minute + self.one_hour
minute_before_session = open_minute - self.one_minute
minute_after_session = close_minute + self.one_minute
next_session_label = self.answers.index[idx + 2]
previous_session_label = self.answers.index[idx]
# verify that minutes inside a session resolve correctly
minutes_that_resolve_to_this_session = [
m(open_minute, _parse=False),
m(open_minute, direction="next", _parse=False),
m(open_minute, direction="previous", _parse=False),
m(open_minute, direction="none", _parse=False),
m(hour_into_session, _parse=False),
m(hour_into_session, direction="next", _parse=False),
m(hour_into_session, direction="previous", _parse=False),
m(hour_into_session, direction="none", _parse=False),
m(close_minute),
m(close_minute, direction="next", _parse=False),
m(close_minute, direction="previous", _parse=False),
m(close_minute, direction="none", _parse=False),
session_label,
]
if self.GAPS_BETWEEN_SESSIONS:
minutes_that_resolve_to_this_session.append(
m(minute_before_session, _parse=False)
)
minutes_that_resolve_to_this_session.append(
m(minute_before_session, direction="next", _parse=False)
)
minutes_that_resolve_to_this_session.append(
m(minute_after_session, direction="previous", _parse=False)
)
self.assertTrue(
all(
x == minutes_that_resolve_to_this_session[0]
for x in minutes_that_resolve_to_this_session
)
)
minutes_that_resolve_to_next_session = [
m(minute_after_session, _parse=False),
m(minute_after_session, direction="next", _parse=False),
next_session_label,
]
self.assertTrue(
all(
x == minutes_that_resolve_to_next_session[0]
for x in minutes_that_resolve_to_next_session
)
)
self.assertEqual(
m(minute_before_session, direction="previous", _parse=False),
previous_session_label,
)
if self.GAPS_BETWEEN_SESSIONS:
# Make sure we use the cache correctly
minutes_that_resolve_to_different_sessions = [
m(minute_after_session, direction="next", _parse=False),
m(minute_after_session, direction="previous", _parse=False),
m(minute_after_session, direction="next", _parse=False),
]
self.assertEqual(
minutes_that_resolve_to_different_sessions,
[next_session_label, session_label, next_session_label],
)
# make sure that exceptions are raised at the right time
with self.assertRaises(ValueError):
m(open_minute, "asdf", _parse=False)
if self.GAPS_BETWEEN_SESSIONS:
with self.assertRaises(ValueError):
m(minute_before_session, direction="none", _parse=False)
# minute is later than last session's close
minute_after_last_close = self.answers.iloc[-1].market_close + self.one_minute
session_label = self.answers.index[-1]
minute_that_resolves_to_session_label = m(
minute_after_last_close, direction="previous", _parse=False
)
self.assertEqual(session_label, minute_that_resolves_to_session_label)
with self.assertRaises(ValueError):
m(minute_after_last_close, _parse=False)
with self.assertRaises(ValueError):
m(minute_after_last_close, direction="next", _parse=False)
with self.assertRaises(ValueError):
m(minute_after_last_close, direction="none", _parse=False)
@parameterized.expand(
[
(1, 0),
(2, 0),
(2, 1),
]
)
def test_minute_index_to_session_labels(self, interval, offset):
minutes = self.calendar.minutes_for_sessions_in_range(
self.MINUTE_INDEX_TO_SESSION_LABELS_START,
self.MINUTE_INDEX_TO_SESSION_LABELS_END,
)
minutes = minutes[range(offset, len(minutes), interval)]
np.testing.assert_array_equal(
pd.DatetimeIndex(minutes.map(self.calendar.minute_to_session_label)),
self.calendar.minute_index_to_session_labels(minutes),
)
def test_next_prev_session(self):
session_labels = self.answers.index[1:-2]
max_idx = len(session_labels) - 1
# the very first session
first_session_label = self.answers.index[0]
with self.assertRaises(ValueError):
self.calendar.previous_session_label(first_session_label, _parse=False)
# all the sessions in the middle
for idx, session_label in enumerate(session_labels):
if idx < max_idx:
self.assertEqual(
self.calendar.next_session_label(session_label, _parse=False),
session_labels[idx + 1],
)
if idx > 0:
self.assertEqual(
self.calendar.previous_session_label(session_label, _parse=False),
session_labels[idx - 1],
)
# the very last session
last_session_label = self.answers.index[-1]
with self.assertRaises(ValueError):
self.calendar.next_session_label(last_session_label, _parse=False)
@staticmethod
def _find_full_session(calendar):
for session_label in calendar.schedule.index:
if session_label not in calendar.early_closes:
return session_label
return None
def test_minutes_for_period(self):
# full session
# find a session that isn't an early close. start from the first
# session, should be quick.
full_session_label = self._find_full_session(self.calendar)
if full_session_label is None:
raise ValueError("Cannot find a full session to test!")
minutes = self.calendar.minutes_for_session(full_session_label)
_open, _close = self.calendar.open_and_close_for_session(full_session_label)
_break_start, _break_end = self.calendar.break_start_and_end_for_session(
full_session_label
)
if not pd.isnull(_break_start):
constructed_minutes = np.concatenate(
[
pd.date_range(start=_open, end=_break_start, freq="min"),
pd.date_range(start=_break_end, end=_close, freq="min"),
]
)
else:
constructed_minutes = pd.date_range(start=_open, end=_close, freq="min")
np.testing.assert_array_equal(
minutes,
constructed_minutes,
)
# early close period
if self.HAVE_EARLY_CLOSES:
early_close_session_label = self.calendar.early_closes[0]
minutes_for_early_close = self.calendar.minutes_for_session(
early_close_session_label
)
_open, _close = self.calendar.open_and_close_for_session(
early_close_session_label
)
np.testing.assert_array_equal(
minutes_for_early_close,
pd.date_range(start=_open, end=_close, freq="min"),
)
# late open period
if self.HAVE_LATE_OPENS:
late_open_session_label = self.calendar.late_opens[0]
minutes_for_late_open = self.calendar.minutes_for_session(
late_open_session_label
)
_open, _close = self.calendar.open_and_close_for_session(
late_open_session_label
)
np.testing.assert_array_equal(
minutes_for_late_open,
pd.date_range(start=_open, end=_close, freq="min"),
)
def test_sessions_in_range(self):
# pick two sessions
session_count = len(self.calendar.schedule.index)
first_idx = session_count // 3
second_idx = 2 * first_idx
first_session_label = self.calendar.schedule.index[first_idx]
second_session_label = self.calendar.schedule.index[second_idx]
answer_key = self.calendar.schedule.index[first_idx : second_idx + 1]
rtrn = self.calendar.sessions_in_range(
first_session_label, second_session_label, _parse=False
)
np.testing.assert_array_equal(answer_key, rtrn)
def get_session_block(self):
"""
Get an "interesting" range of three sessions in a row. By default this
tries to find and return a (full session, early close session, full
session) block.
"""
if not self.HAVE_EARLY_CLOSES:
# If we don't have any early closes, just return a "random" chunk
# of three sessions.
return self.calendar.all_sessions[10:13]
shortened_session = self.calendar.early_closes[0]
shortened_session_idx = self.calendar.schedule.index.get_loc(shortened_session)
session_before = self.calendar.schedule.index[shortened_session_idx - 1]
session_after = self.calendar.schedule.index[shortened_session_idx + 1]
return [session_before, shortened_session, session_after]
def test_minutes_in_range(self):
sessions = self.get_session_block()
first_open, first_close = self.calendar.open_and_close_for_session(sessions[0])
minute_before_first_open = first_open - self.one_minute
middle_open, middle_close = self.calendar.open_and_close_for_session(
sessions[1]
)
last_open, last_close = self.calendar.open_and_close_for_session(sessions[-1])
minute_after_last_close = last_close + self.one_minute
# get all the minutes between first_open and last_close
minutes1 = self.calendar.minutes_in_range(first_open, last_close, _parse=False)
minutes2 = self.calendar.minutes_in_range(
minute_before_first_open, minute_after_last_close, _parse=False
)
if self.GAPS_BETWEEN_SESSIONS:
np.testing.assert_array_equal(minutes1, minutes2)
else:
# if no gaps, then minutes2 should have 2 extra minutes
np.testing.assert_array_equal(minutes1, minutes2[1:-1])
# manually construct the minutes
(
first_break_start,
first_break_end,
) = self.calendar.break_start_and_end_for_session(sessions[0])
(
middle_break_start,
middle_break_end,
) = self.calendar.break_start_and_end_for_session(sessions[1])
(
last_break_start,
last_break_end,
) = self.calendar.break_start_and_end_for_session(sessions[-1])
intervals = [
(first_open, first_break_start, first_break_end, first_close),
(middle_open, middle_break_start, middle_break_end, middle_close),
(last_open, last_break_start, last_break_end, last_close),
]
all_minutes = []
for _open, _break_start, _break_end, _close in intervals:
if pd.isnull(_break_start):
all_minutes.append(
pd.date_range(start=_open, end=_close, freq="min"),
)
else:
all_minutes.append(
pd.date_range(start=_open, end=_break_start, freq="min"),
)
all_minutes.append(
pd.date_range(start=_break_end, end=_close, freq="min"),
)
all_minutes = np.concatenate(all_minutes)
np.testing.assert_array_equal(all_minutes, minutes1)
def test_minutes_for_sessions_in_range(self):
sessions = self.get_session_block()
minutes = self.calendar.minutes_for_sessions_in_range(sessions[0], sessions[-1])
# do it manually
session0_minutes = self.calendar.minutes_for_session(sessions[0])
session1_minutes = self.calendar.minutes_for_session(sessions[1])
session2_minutes = self.calendar.minutes_for_session(sessions[2])
concatenated_minutes = np.concatenate(
[session0_minutes.values, session1_minutes.values, session2_minutes.values]
)
np.testing.assert_array_equal(concatenated_minutes, minutes.values)
def test_sessions_window(self):
sessions = self.get_session_block()
np.testing.assert_array_equal(
self.calendar.sessions_window(sessions[0], len(sessions) - 1, _parse=False),
self.calendar.sessions_in_range(sessions[0], sessions[-1], _parse=False),
)
np.testing.assert_array_equal(
self.calendar.sessions_window(
sessions[-1], -1 * (len(sessions) - 1), _parse=False
),
self.calendar.sessions_in_range(sessions[0], sessions[-1], _parse=False),
)
def test_session_distance(self):
sessions = self.get_session_block()
forward_distance = self.calendar.session_distance(
sessions[0],
sessions[-1],
_parse=False,
)
self.assertEqual(forward_distance, len(sessions))
backward_distance = self.calendar.session_distance(
sessions[-1],
sessions[0],
_parse=False,
)
self.assertEqual(backward_distance, -len(sessions))
one_day_distance = self.calendar.session_distance(
sessions[0],
sessions[0],
_parse=False,
)
self.assertEqual(one_day_distance, 1)
def test_open_and_close_for_session(self):
for session_label, open_answer, close_answer, _, _ in self.answers.itertuples(
name=None
):
found_open, found_close = self.calendar.open_and_close_for_session(
session_label, _parse=False
)
# Test that the methods for just session open and close produce the
# same values as the method for getting both.
alt_open = self.calendar.session_open(session_label, _parse=False)
self.assertEqual(alt_open, found_open)
alt_close = self.calendar.session_close(session_label, _parse=False)
self.assertEqual(alt_close, found_close)
self.assertEqual(open_answer, found_open)
self.assertEqual(close_answer, found_close)
def test_session_opens_in_range(self):
found_opens = self.calendar.session_opens_in_range(
self.answers.index[0],
self.answers.index[-1],
_parse=False,
)
found_opens.index.freq = None
tm.assert_series_equal(found_opens, self.answers["market_open"])
def test_session_closes_in_range(self):
found_closes = self.calendar.session_closes_in_range(
self.answers.index[0],
self.answers.index[-1],
_parse=False,
)
found_closes.index.freq = None
tm.assert_series_equal(found_closes, self.answers["market_close"])
def test_daylight_savings(self):
# 2004 daylight savings switches:
# Sunday 2004-04-04 and Sunday 2004-10-31
# make sure there's no weirdness around calculating the next day's
# session's open time.
m = dict(self.calendar.open_times)
m[pd.Timestamp.min] = m.pop(None)
open_times = pd.Series(m)
for date in self.DAYLIGHT_SAVINGS_DATES:
next_day = pd.Timestamp(date, tz=UTC)
open_date = next_day + Timedelta(days=self.calendar.open_offset)
the_open = self.calendar.schedule.loc[next_day].market_open
localized_open = the_open.tz_localize(UTC).tz_convert(self.calendar.tz)
self.assertEqual(
(open_date.year, open_date.month, open_date.day),
(localized_open.year, localized_open.month, localized_open.day),
)
open_ix = open_times.index.searchsorted(pd.Timestamp(date), side="right")
if open_ix == len(open_times):
open_ix -= 1
self.assertEqual(open_times.iloc[open_ix].hour, localized_open.hour)
self.assertEqual(open_times.iloc[open_ix].minute, localized_open.minute)
def test_start_end(self):
"""
Check ExchangeCalendar with defined start/end dates.
"""
calendar = self.calendar_class(
start=self.TEST_START_END_FIRST,
end=self.TEST_START_END_LAST,
)
self.assertEqual(
calendar.first_trading_session,
self.TEST_START_END_EXPECTED_FIRST,
)
self.assertEqual(
calendar.last_trading_session,
self.TEST_START_END_EXPECTED_LAST,
)
def test_has_breaks(self):
has_breaks = self.calendar.has_breaks()
self.assertEqual(has_breaks, self.HAVE_BREAKS)
def test_session_has_break(self):
if self.SESSION_WITHOUT_BREAK is not None:
self.assertFalse(
self.calendar.session_has_break(self.SESSION_WITHOUT_BREAK)
)
if self.SESSION_WITH_BREAK is not None:
self.assertTrue(self.calendar.session_has_break(self.SESSION_WITH_BREAK))
# TODO remove this class when all calendars migrated. No longer requried as
# `minute_index_to_session_labels` comprehensively tested under new suite.
class OpenDetectionTestCase(TestCase):
# This is an extra set of unit tests that were added during a rewrite of
# `minute_index_to_session_labels` to ensure that the existing
# calendar-generic test suite correctly covered edge cases around
# non-market minutes.
def test_detect_non_market_minutes(self):
cal = get_calendar("NYSE")
# NOTE: This test is here instead of being on the base class for all
# calendars because some of our calendars are 24/7, which means there
# aren't any non-market minutes to find.
day0 = cal.minutes_for_sessions_in_range(
pd.Timestamp("2013-07-03", tz=UTC),
pd.Timestamp("2013-07-03", tz=UTC),
)
for minute in day0:
self.assertTrue(cal.is_open_on_minute(minute))
day1 = cal.minutes_for_sessions_in_range(
pd.Timestamp("2013-07-05", tz=UTC),
pd.Timestamp("2013-07-05", tz=UTC),
)
for minute in day1:
self.assertTrue(cal.is_open_on_minute(minute))
def NYSE_timestamp(s):
return pd.Timestamp(s, tz="America/New_York").tz_convert(UTC)
non_market = [
# After close.
NYSE_timestamp("2013-07-03 16:01"),
# Holiday.
NYSE_timestamp("2013-07-04 10:00"),
# Before open.
NYSE_timestamp("2013-07-05 9:29"),
]
for minute in non_market:
self.assertFalse(cal.is_open_on_minute(minute), minute)
input_ = pd.to_datetime(
np.hstack([day0.values, minute.asm8, day1.values]),
utc=True,
)
with self.assertRaises(ValueError) as e:
cal.minute_index_to_session_labels(input_)
exc_str = str(e.exception)
self.assertIn("First Bad Minute: {}".format(minute), exc_str)
# TODO remove this class when all calendars migrated. No longer requried as
# this case is handled by new test base internally.
class NoDSTExchangeCalendarTestBase(ExchangeCalendarTestBase):
def test_daylight_savings(self):
"""
Several countries in Africa / Asia do not observe DST
so we need to skip over this test for those markets
"""
pass
def get_csv(name: str) -> pd.DataFrame:
"""Get csv file as DataFrame for given calendar `name`."""
filename = name.replace("/", "-").lower() + ".csv"
path = pathlib.Path(__file__).parent.joinpath("resources", filename)
df = pd.read_csv(
path,
index_col=0,
parse_dates=[0, 1, 2, 3, 4],
infer_datetime_format=True,
)
df.index = df.index.tz_localize("UTC")
for col in df:
df[col] = df[col].dt.tz_localize("UTC")
return df
class Answers:
"""Inputs and expected output for testing a given calendar and side.
Inputs and expected outputs are provided by public instance methods and
properties. These either read directly from the corresponding .csv file
or are evaluated from the .csv file contents. NB Properites / methods
MUST NOT make evaluations by way of repeating the code of the
ExchangeCalendar method they are intended to test!
Parameters
----------
calendar_name
Canonical name of calendar for which require answer info. For
example, 'XNYS'.
side {'both', 'left', 'right', 'neither'}
Side of sessions to treat as trading minutes.
"""
ONE_MIN = pd.Timedelta(1, "T")
TWO_MIN = pd.Timedelta(2, "T")
ONE_DAY = pd.Timedelta(1, "D")
LEFT_SIDES = ["left", "both"]
RIGHT_SIDES = ["right", "both"]
def __init__(
self,
calendar_name: str,
side: str,
):
self._name = calendar_name.upper()
self._side = side
# --- Exposed constructor arguments ---
@property
def name(self) -> str:
"""Name of corresponding calendar."""
return self._name
@property
def side(self) -> str:
"""Side of calendar for which answers valid."""
return self._side
# --- Properties read (indirectly) from csv file ---
@functools.lru_cache(maxsize=4)
def _answers(self) -> pd.DataFrame:
return get_csv(self.name)
@property
def answers(self) -> pd.DataFrame:
"""Answers as correspoding csv."""
return self._answers()
@property
def sessions(self) -> pd.DatetimeIndex:
"""Session labels."""
return self.answers.index
@property
def opens(self) -> pd.Series:
"""Market open time for each session."""
return self.answers.market_open
@property
def closes(self) -> pd.Series:
"""Market close time for each session."""
return self.answers.market_close
@property
def break_starts(self) -> pd.Series:
"""Break start time for each session."""
return self.answers.break_start
@property
def break_ends(self) -> pd.Series:
"""Break end time for each session."""
return self.answers.break_end
# --- get and helper methods ---
def get_next_session(self, session: pd.Timestamp) -> pd.Timestamp:
"""Get session that immediately follows `session`."""
assert (
session != self.last_session
), "Cannot get session later than last answers' session."
idx = self.sessions.get_loc(session) + 1
return self.sessions[idx]
def session_has_break(self, session: pd.Timestamp) -> bool:
"""Query if `session` has a break."""
return session in self.sessions_with_break
@staticmethod
def get_sessions_sample(sessions: pd.DatetimeIndex):
"""Return sample of given `sessions`.
Sample includes:
All sessions within first two years of `sessions`.
All sessions within last two years of `sessions`.
All sessions falling:
within first 3 days of any month.
from 28th of any month.
from 14th through 16th of any month.
"""
if sessions.empty:
return sessions
mask = (
(sessions < sessions[0] + pd.DateOffset(years=2))
| (sessions > sessions[-1] - pd.DateOffset(years=2))
| (sessions.day <= 3)
| (sessions.day >= 28)
| (14 <= sessions.day) & (sessions.day <= 16)
)
return sessions[mask]
def get_sessions_minutes(
self, start: pd.Timestamp, end: pd.Timestamp | int = 1
) -> pd.DatetimeIndex:
"""Get trading minutes for 1 or more consecutive sessions.
Parameters
----------
start
Session from which to get trading minutes.
end
Session through which to get trading mintues. Can be passed as:
pd.Timestamp: return will include trading minutes for `end`
session.
int: where int represents number of consecutive sessions
inclusive of `start`, for which require trading
minutes. Default is 1, such that by default will return
trading minutes for only `start` session.
"""
idx = self.sessions.get_loc(start)
stop = idx + end if isinstance(end, int) else self.sessions.get_loc(end) + 1
indexer = slice(idx, stop)
dtis = []
for first, last, last_am, first_pm in zip(
self.first_minutes[indexer],
self.last_minutes[indexer],
self.last_am_minutes[indexer],
self.first_pm_minutes[indexer],
):
if pd.isna(last_am):
dtis.append(pd.date_range(first, last, freq="T"))
else:
dtis.append(pd.date_range(first, last_am, freq="T"))
dtis.append(pd.date_range(first_pm, last, freq="T"))
return dtis[0].union_many(dtis[1:])
# --- Evaluated general calendar properties ---
@functools.lru_cache(maxsize=4)
def _has_a_session_with_break(self) -> pd.DatetimeIndex:
return self.break_starts.notna().any()
@property
def has_a_session_with_break(self) -> bool:
"""Does any session of answers have a break."""
return self._has_a_session_with_break()
@property
def has_a_session_without_break(self) -> bool:
"""Does any session of answers not have a break."""
return self.break_starts.isna().any()
# --- Evaluated properties for first and last sessions ---
@property
def first_session(self) -> pd.Timestamp:
"""First session covered by answers."""
return self.sessions[0]
@property
def last_session(self) -> pd.Timestamp:
"""Last session covered by answers."""
return self.sessions[-1]
@property
def sessions_range(self) -> tuple[pd.Timestamp, pd.Timestamp]:
"""First and last sessions covered by answers."""
return self.first_session, self.last_session
@property
def first_session_open(self) -> pd.Timestamp:
"""Open time of first session covered by answers."""
return self.opens[0]
@property
def last_session_close(self) -> pd.Timestamp:
"""Close time of last session covered by answers."""
return self.closes[-1]
@property
def first_trading_minute(self) -> pd.Timestamp:
open_ = self.first_session_open
return open_ if self.side in self.LEFT_SIDES else open_ + self.ONE_MIN
@property
def last_trading_minute(self) -> pd.Timestamp:
close = self.last_session_close
return close if self.side in self.RIGHT_SIDES else close - self.ONE_MIN
@property
def trading_minutes_range(self) -> tuple[pd.Timestamp, pd.Timestamp]:
"""First and last trading minutes covered by answers."""
return self.first_trading_minute, self.last_trading_minute
# --- out-of-bounds properties ---
@property
def minute_too_early(self) -> pd.Timestamp:
"""Minute earlier than first trading minute."""
return self.first_trading_minute - self.ONE_MIN
@property
def minute_too_late(self) -> pd.Timestamp:
"""Minute later than last trading minute."""
return self.last_trading_minute + self.ONE_MIN
@property
def session_too_early(self) -> pd.Timestamp:
"""Date earlier than first session."""
return self.first_session - self.ONE_DAY
@property
def session_too_late(self) -> pd.Timestamp:
"""Date later than last session."""
return self.last_session + self.ONE_DAY
# --- Evaluated properties covering every session. ---
@functools.lru_cache(maxsize=4)
def _first_minutes(self) -> pd.Series:
if self.side in self.LEFT_SIDES:
minutes = self.opens.copy()
else:
minutes = self.opens + self.ONE_MIN
minutes.name = "first_minutes"
return minutes
@property
def first_minutes(self) -> pd.Series:
"""First trading minute of each session (UTC)."""
return self._first_minutes()
@property
def first_minutes_plus_one(self) -> pd.Series:
"""First trading minute of each session plus one minute."""
return self.first_minutes + self.ONE_MIN
@property
def first_minutes_less_one(self) -> pd.Series:
"""First trading minute of each session less one minute."""
return self.first_minutes - self.ONE_MIN
@functools.lru_cache(maxsize=4)
def _last_minutes(self) -> pd.Series:
if self.side in self.RIGHT_SIDES:
minutes = self.closes.copy()
else:
minutes = self.closes - self.ONE_MIN
minutes.name = "last_minutes"
return minutes
@property
def last_minutes(self) -> pd.Series:
"""Last trading minute of each session."""
return self._last_minutes()
@property
def last_minutes_plus_one(self) -> pd.Series:
"""Last trading minute of each session plus one minute."""
return self.last_minutes + self.ONE_MIN
@property
def last_minutes_less_one(self) -> pd.Series:
"""Last trading minute of each session less one minute."""
return self.last_minutes - self.ONE_MIN
@functools.lru_cache(maxsize=4)
def _last_am_minutes(self) -> pd.Series:
if self.side in self.RIGHT_SIDES:
minutes = self.break_starts.copy()
else:
minutes = self.break_starts - self.ONE_MIN
minutes.name = "last_am_minutes"
return minutes
@property
def last_am_minutes(self) -> pd.Series:
"""Last pre-break trading minute of each session.
NaT if session does not have a break.
"""
return self._last_am_minutes()
@property
def last_am_minutes_plus_one(self) -> pd.Series:
"""Last pre-break trading minute of each session plus one minute."""
return self.last_am_minutes + self.ONE_MIN
@property
def last_am_minutes_less_one(self) -> pd.Series:
"""Last pre-break trading minute of each session less one minute."""
return self.last_am_minutes - self.ONE_MIN
@functools.lru_cache(maxsize=4)
def _first_pm_minutes(self) -> pd.Series:
if self.side in self.LEFT_SIDES:
minutes = self.break_ends.copy()
else:
minutes = self.break_ends + self.ONE_MIN
minutes.name = "first_pm_minutes"
return minutes
@property
def first_pm_minutes(self) -> pd.Series:
"""First post-break trading minute of each session.
NaT if session does not have a break.
"""
return self._first_pm_minutes()
@property
def first_pm_minutes_plus_one(self) -> pd.Series:
"""First post-break trading minute of each session plus one minute."""
return self.first_pm_minutes + self.ONE_MIN
@property
def first_pm_minutes_less_one(self) -> pd.Series:
"""First post-break trading minute of each session less one minute."""
return self.first_pm_minutes - self.ONE_MIN
# --- Evaluated session sets and ranges that meet a specific condition ---
@property
def _mask_breaks(self) -> pd.Series:
return self.break_starts.notna()
@functools.lru_cache(maxsize=4)
def _sessions_with_break(self) -> pd.DatetimeIndex:
return self.sessions[self._mask_breaks]
@property
def sessions_with_break(self) -> pd.DatetimeIndex:
return self._sessions_with_break()
@functools.lru_cache(maxsize=4)
def _sessions_without_break(self) -> pd.DatetimeIndex:
return self.sessions[~self._mask_breaks]
@property
def sessions_without_break(self) -> pd.DatetimeIndex:
return self._sessions_without_break()
@property
def sessions_without_break_run(self) -> pd.DatetimeIndex:
"""Longest run of consecutive sessions without a break."""
s = self.break_starts.isna()
if s.empty:
return pd.DatetimeIndex([], tz="UTC")
trues_grouped = (~s).cumsum()[s]
group_sizes = trues_grouped.value_counts()
max_run_size = group_sizes.max()
max_run_group_id = group_sizes[group_sizes == max_run_size].index[0]
run_without_break = trues_grouped[trues_grouped == max_run_group_id].index
return run_without_break
@property
def sessions_without_break_range(self) -> tuple[pd.Timestamp, pd.Timestamp] | None:
"""Longest session range that does not include a session with a break.
Returns None if all sessions have a break.
"""
sessions = self.sessions_without_break_run
if sessions.empty:
return None
return sessions[0], sessions[-1]
@property
def _mask_sessions_without_gap_after(self) -> pd.Series:
if self.side == "neither":
# will always have gap after if neither open or close are trading
# minutes (assuming sessions cannot overlap)
return pd.Series(False, index=self.sessions)
elif self.side == "both":
# a trading minute cannot be a minute of more than one session.
assert not (self.closes == self.opens.shift(-1)).any()
# there will be no gap if next open is one minute after previous close
closes_plus_min = self.closes + pd.Timedelta(1, "T")
return self.opens.shift(-1) == closes_plus_min
else:
return self.opens.shift(-1) == self.closes
@property
def _mask_sessions_without_gap_before(self) -> pd.Series:
if self.side == "neither":
# will always have gap before if neither open or close are trading
# minutes (assuming sessions cannot overlap)
return pd.Series(False, index=self.sessions)
elif self.side == "both":
# a trading minute cannot be a minute of more than one session.
assert not (self.closes == self.opens.shift(-1)).any()
# there will be no gap if previous close is one minute before next open
opens_minus_one = self.opens - pd.Timedelta(1, "T")
return self.closes.shift(1) == opens_minus_one
else:
return self.closes.shift(1) == self.opens
@functools.lru_cache(maxsize=4)
def _sessions_without_gap_after(self) -> pd.DatetimeIndex:
mask = self._mask_sessions_without_gap_after
return self.sessions[mask][:-1]
@property
def sessions_without_gap_after(self) -> pd.DatetimeIndex:
"""Sessions not followed by a non-trading minute.
Rather, sessions immediately followed by first trading minute of
next session.
"""
return self._sessions_without_gap_after()
@functools.lru_cache(maxsize=4)
def _sessions_with_gap_after(self) -> pd.DatetimeIndex:
mask = self._mask_sessions_without_gap_after
return self.sessions[~mask][:-1]
@property
def sessions_with_gap_after(self) -> pd.DatetimeIndex:
"""Sessions followed by a non-trading minute."""
return self._sessions_with_gap_after()
@functools.lru_cache(maxsize=4)
def _sessions_without_gap_before(self) -> pd.DatetimeIndex:
mask = self._mask_sessions_without_gap_before
return self.sessions[mask][1:]
@property
def sessions_without_gap_before(self) -> pd.DatetimeIndex:
"""Sessions not preceeded by a non-trading minute.
Rather, sessions immediately preceeded by last trading minute of
previous session.
"""
return self._sessions_without_gap_before()
@functools.lru_cache(maxsize=4)
def _sessions_with_gap_before(self) -> pd.DatetimeIndex:
mask = self._mask_sessions_without_gap_before
return self.sessions[~mask][1:]
@property
def sessions_with_gap_before(self) -> pd.DatetimeIndex:
"""Sessions preceeded by a non-trading minute."""
return self._sessions_with_gap_before()
# times are changing...
@functools.lru_cache(maxsize=16)
def _get_sessions_with_times_different_to_next_session(
self,
column: str, # typing.Literal["opens", "closes", "break_starts", "break_ends"]
) -> list[pd.DatetimeIndex]:
"""For a given answers column, get session labels where time differs
from time of next session.
Where `column` is a break time ("break_starts" or "break_ends"), return
will not include sessions when next session has a different `has_break`
status. For example, if session_0 has a break and session_1 does not have
a break, or vice versa, then session_0 will not be included to return. For
sessions followed by a session with a different `has_break` status, see
`_get_sessions_with_has_break_different_to_next_session`.
Returns
-------
list of pd.Datetimeindex
[0] sessions with earlier next session
[1] sessions with later next session
"""
# column takes string to allow lru_cache (Series not hashable)
is_break_col = column[0] == "b"
column_ = getattr(self, column)
if is_break_col:
if column_.isna().all():
return [pd.DatetimeIndex([], tz="UTC")] * 4
column_ = column_.fillna(method="ffill").fillna(method="bfill")
diff = (column_.shift(-1) - column_)[:-1]
remainder = diff % pd.Timedelta(hours=24)
mask = remainder != pd.Timedelta(0)
sessions = self.sessions[:-1][mask]
next_session_earlier_mask = remainder[mask] > pd.Timedelta(hours=12)
next_session_earlier = sessions[next_session_earlier_mask]
next_session_later = sessions[~next_session_earlier_mask]
if is_break_col:
mask = next_session_earlier.isin(self.sessions_without_break)
next_session_earlier = next_session_earlier.drop(next_session_earlier[mask])
mask = next_session_later.isin(self.sessions_without_break)
next_session_later = next_session_later.drop(next_session_later[mask])
return [next_session_earlier, next_session_later]
@property
def _sessions_with_opens_different_to_next_session(
self,
) -> list[pd.DatetimeIndex]:
return self._get_sessions_with_times_different_to_next_session("opens")
@property
def _sessions_with_closes_different_to_next_session(
self,
) -> list[pd.DatetimeIndex]:
return self._get_sessions_with_times_different_to_next_session("closes")
@property
def _sessions_with_break_start_different_to_next_session(
self,
) -> list[pd.DatetimeIndex]:
return self._get_sessions_with_times_different_to_next_session("break_starts")
@property
def _sessions_with_break_end_different_to_next_session(
self,
) -> list[pd.DatetimeIndex]:
return self._get_sessions_with_times_different_to_next_session("break_ends")
@property
def sessions_next_open_earlier(self) -> pd.DatetimeIndex:
return self._sessions_with_opens_different_to_next_session[0]
@property
def sessions_next_open_later(self) -> pd.DatetimeIndex:
return self._sessions_with_opens_different_to_next_session[1]
@property
def sessions_next_open_different(self) -> pd.DatetimeIndex:
return self.sessions_next_open_earlier.union(self.sessions_next_open_later)
@property
def sessions_next_close_earlier(self) -> pd.DatetimeIndex:
return self._sessions_with_closes_different_to_next_session[0]
@property
def sessions_next_close_later(self) -> pd.DatetimeIndex:
return self._sessions_with_closes_different_to_next_session[1]
@property
def sessions_next_close_different(self) -> pd.DatetimeIndex:
return self.sessions_next_close_earlier.union(self.sessions_next_close_later)
@property
def sessions_next_break_start_earlier(self) -> pd.DatetimeIndex:
return self._sessions_with_break_start_different_to_next_session[0]
@property
def sessions_next_break_start_later(self) -> pd.DatetimeIndex:
return self._sessions_with_break_start_different_to_next_session[1]
@property
def sessions_next_break_start_different(self) -> pd.DatetimeIndex:
earlier = self.sessions_next_break_start_earlier
later = self.sessions_next_break_start_later
return earlier.union(later)
@property
def sessions_next_break_end_earlier(self) -> pd.DatetimeIndex:
return self._sessions_with_break_end_different_to_next_session[0]
@property
def sessions_next_break_end_later(self) -> pd.DatetimeIndex:
return self._sessions_with_break_end_different_to_next_session[1]
@property
def sessions_next_break_end_different(self) -> pd.DatetimeIndex:
earlier = self.sessions_next_break_end_earlier
later = self.sessions_next_break_end_later
return earlier.union(later)
@functools.lru_cache(maxsize=4)
def _get_sessions_with_has_break_different_to_next_session(
self,
) -> tuple[pd.DatetimeIndex, pd.DatetimeIndex]:
"""Get sessions with 'has_break' different to next session.
Returns
-------
tuple[pd.DatetimeIndex, pd.DatetimeIndex]
[0] Sessions that have a break and are immediately followed by
a session which does not have a break.
[1] Sessions that do not have a break and are immediately
followed by a session which does have a break.
"""
mask = (self.break_starts.notna() & self.break_starts.shift(-1).isna())[:-1]
sessions_with_break_next_session_without_break = self.sessions[:-1][mask]
mask = (self.break_starts.isna() & self.break_starts.shift(-1).notna())[:-1]
sessions_without_break_next_session_with_break = self.sessions[:-1][mask]
return (
sessions_with_break_next_session_without_break,
sessions_without_break_next_session_with_break,
)
@property
def sessions_with_break_next_session_without_break(self) -> pd.DatetimeIndex:
return self._get_sessions_with_has_break_different_to_next_session()[0]
@property
def sessions_without_break_next_session_with_break(self) -> pd.DatetimeIndex:
return self._get_sessions_with_has_break_different_to_next_session()[1]
@functools.lru_cache(maxsize=4)
def _sessions_next_time_different(self) -> pd.DatetimeIndex:
return self.sessions_next_open_different.union_many(
[
self.sessions_next_close_different,
self.sessions_next_break_start_different,
self.sessions_next_break_end_different,
self.sessions_with_break_next_session_without_break,
self.sessions_without_break_next_session_with_break,
]
)
@property
def sessions_next_time_different(self) -> pd.DatetimeIndex:
"""Sessions where next session has a different time for any column.
Includes sessions where next session has a different `has_break`
status.
"""
return self._sessions_next_time_different()
# session blocks...
def _create_changing_times_session_block(
self, session: pd.Timestamp
) -> pd.DatetimeIndex:
"""Create block of sessions with changing times.
Given a `session` known to have at least one time (open, close,
break_start or break_end) different from the next session, returns
a block of consecutive sessions ending with the first session after
`session` that has the same times as the session that immediately
preceeds it (i.e. the last two sessions of the block will have the
same times), or the last calendar session.
"""
start_idx = self.sessions.get_loc(session)
end_idx = start_idx + 1
while self.sessions[end_idx] in self.sessions_next_time_different:
end_idx += 1
end_idx += 2 # +1 to include session with same times, +1 to serve as end index
return self.sessions[start_idx:end_idx]
def _get_normal_session_block(self) -> pd.DatetimeIndex:
"""Block of 3 sessions with unchanged timings."""
start_idx = len(self.sessions) // 3
end_idx = start_idx + 21
for i in range(start_idx, end_idx):
times_1 = self.answers.iloc[i].dt.time
times_2 = self.answers.iloc[i + 1].dt.time
times_3 = self.answers.iloc[i + 2].dt.time
one_and_two_equal = (times_1 == times_2) | (times_1.isna() & times_2.isna())
one_and_three_equal = (times_1 == times_3) | (
times_1.isna() & times_3.isna()
)
if (one_and_two_equal & one_and_three_equal).all():
break
assert i < (end_idx - 1), "Unable to evaluate a normal session block!"
return self.sessions[i : i + 3]
def _get_session_block(
self, from_session_of: pd.DatetimeIndex, to_session_of: pd.DatetimeIndex
) -> pd.DatetimeIndex:
"""Get session block with bounds defined by sessions of given indexes.
Block will start with middle session of `from_session_of`.
Block will run to the nearest subsequent session of `to_session_of`
(or `self.final_session` if this comes first). Block will end with
the session that immedidately follows this session.
"""
i = len(from_session_of) // 2
start_session = from_session_of[i]
start_idx = self.sessions.get_loc(start_session)
end_idx = start_idx + 1
end_session = self.sessions[end_idx]
while end_session not in to_session_of and end_session != self.last_session:
end_idx += 1
end_session = self.sessions[end_idx]
return self.sessions[start_idx : end_idx + 2]
@functools.lru_cache(maxsize=4)
def _session_blocks(self) -> dict[str, pd.DatetimeIndex]:
blocks = {}
blocks["normal"] = self._get_normal_session_block()
blocks["first_three"] = self.sessions[:3]
blocks["last_three"] = self.sessions[-3:]
# blocks here include where:
# session 1 has at least one different time from session 0
# session 0 has a break and session 1 does not (and vice versa)
sessions_indexes = (
("next_open_earlier", self.sessions_next_open_earlier),
("next_open_later", self.sessions_next_open_later),
("next_close_earlier", self.sessions_next_close_earlier),
("next_close_later", self.sessions_next_close_later),
("next_break_start_earlier", self.sessions_next_break_start_earlier),
("next_break_start_later", self.sessions_next_break_start_later),
("next_break_end_earlier", self.sessions_next_break_end_earlier),
("next_break_end_later", self.sessions_next_break_end_later),
(
"with_break_to_without_break",
self.sessions_with_break_next_session_without_break,
),
(
"without_break_to_with_break",
self.sessions_without_break_next_session_with_break,
),
)
for name, index in sessions_indexes:
if index.empty:
blocks[name] = pd.DatetimeIndex([], tz="UTC")
else:
session = index[0]
blocks[name] = self._create_changing_times_session_block(session)
# blocks here move from session with gap to session without gap and vice versa
if (not self.sessions_with_gap_after.empty) and (
not self.sessions_without_gap_after.empty
):
without_gap_to_with_gap = self._get_session_block(
self.sessions_without_gap_after, self.sessions_with_gap_after
)
with_gap_to_without_gap = self._get_session_block(
self.sessions_with_gap_after, self.sessions_without_gap_after
)
else:
without_gap_to_with_gap = pd.DatetimeIndex([], tz="UTC")
with_gap_to_without_gap = pd.DatetimeIndex([], tz="UTC")
blocks["without_gap_to_with_gap"] = without_gap_to_with_gap
blocks["with_gap_to_without_gap"] = with_gap_to_without_gap
# blocks that adjoin or contain a non_session date
follows_non_session = pd.DatetimeIndex([], tz="UTC")
preceeds_non_session = pd.DatetimeIndex([], tz="UTC")
contains_non_session = pd.DatetimeIndex([], tz="UTC")
if len(self.non_sessions) > 1:
diff = self.non_sessions[1:] - self.non_sessions[:-1]
mask = diff != pd.Timedelta(
1, "D"
) # non_session dates followed by a session
valid_non_sessions = self.non_sessions[:-1][mask]
if len(valid_non_sessions) > 1:
slce = self.sessions.slice_indexer(
valid_non_sessions[0], valid_non_sessions[1]
)
sessions_between_non_sessions = self.sessions[slce]
block_length = min(2, len(sessions_between_non_sessions))
follows_non_session = sessions_between_non_sessions[:block_length]
preceeds_non_session = sessions_between_non_sessions[-block_length:]
# take session before and session after non-session
contains_non_session = self.sessions[slce.stop - 1 : slce.stop + 1]
blocks["follows_non_session"] = follows_non_session
blocks["preceeds_non_session"] = preceeds_non_session
blocks["contains_non_session"] = contains_non_session
return blocks
@property
def session_blocks(self) -> dict[str, pd.DatetimeIndex]:
"""Dictionary of session blocks of a particular behaviour.
A block comprises either a single session or multiple contiguous
sessions.
Keys:
"normal" - three sessions with unchanging timings.
"first_three" - answers' first three sessions.
"last_three" - answers's last three sessions.
"next_open_earlier" - session 1 open is earlier than session 0
open.
"next_open_later" - session 1 open is later than session 0
open.
"next_close_earlier" - session 1 close is earlier than session
0 close.
"next_close_later" - session 1 close is later than session 0
close.
"next_break_start_earlier" - session 1 break_start is earlier
than session 0 break_start.
"next_break_start_later" - session 1 break_start is later than
session 0 break_start.
"next_break_end_earlier" - session 1 break_end is earlier than
session 0 break_end.
"next_break_end_later" - session 1 break_end is later than
session 0 break_end.
"with_break_to_without_break" - session 0 has a break, session
1 does not have a break.
"without_break_to_with_break" - session 0 does not have a
break, session 1 does have a break.
"without_gap_to_with_gap" - session 0 is not followed by a
gap, session -2 is followed by a gap, session -1 is
preceeded by a gap.
"with_gap_to_without_gap" - session 0 is followed by a gap,
session -2 is not followed by a gap, session -1 is not
preceeded by a gap.
"follows_non_session" - one or two sessions where session 0
is preceeded by a date that is a non-session.
"follows_non_session" - one or two sessions where session -1
is followed by a date that is a non-session.
"contains_non_session" = two sessions with at least one
non-session date in between.
If no such session block exists for any key then value will take an
empty DatetimeIndex (UTC).
"""
return self._session_blocks()
def session_block_generator(self) -> abc.Iterator[tuple[str, pd.DatetimeIndex]]:
"""Generator of session blocks of a particular behaviour."""
for name, block in self.session_blocks.items():
if not block.empty:
yield (name, block)
@functools.lru_cache(maxsize=4)
def _session_block_minutes(self) -> dict[str, pd.DatetimeIndex]:
d = {}
for name, block in self.session_blocks.items():
if block.empty:
d[name] = pd.DatetimeIndex([], tz="UTC")
continue
d[name] = self.get_sessions_minutes(block[0], len(block))
return d
@property
def session_block_minutes(self) -> dict[str, pd.DatetimeIndex]:
"""Trading minutes for each `session_block`.
Key:
Session block name as documented to `session_blocks`.
Value:
Trading minutes of corresponding session block.
"""
return self._session_block_minutes()
@property
def sessions_sample(self) -> pd.DatetimeIndex:
"""Sample of normal and unusual sessions.
Sample comprises set of sessions of all `session_blocks` (see
`session_blocks` doc). In this way sample includes at least one
sample of every indentified unique circumstance.
"""
dtis = list(self.session_blocks.values())
return dtis[0].union_many(dtis[1:])
# non-sessions...
@functools.lru_cache(maxsize=4)
def _non_sessions(self) -> pd.DatetimeIndex:
all_dates = pd.date_range(
start=self.first_session, end=self.last_session, freq="D"
)
return all_dates.difference(self.sessions)
@property
def non_sessions(self) -> pd.DatetimeIndex:
"""Dates (UTC midnight) within answers range that are not sessions."""
return self._non_sessions()
@property
def sessions_range_defined_by_non_sessions(
self,
) -> tuple[tuple[pd.Timestamp, pd.Timestamp], pd.Datetimeindex] | None:
"""Range containing sessions although defined with non-sessions.
Returns
-------
tuple[tuple[pd.Timestamp, pd.Timestamp], pd.Datetimeindex]:
[0] tuple[pd.Timestamp, pd.Timestamp]:
[0] range start as non-session date.
[1] range end as non-session date.
[1] pd.DatetimeIndex:
Sessions in range.
"""
non_sessions = self.non_sessions
if len(non_sessions) <= 1:
return None
limit = len(self.non_sessions) - 2
i = 0
start, end = non_sessions[i], non_sessions[i + 1]
while (end - start) < pd.Timedelta(4, "D"):
i += 1
start, end = non_sessions[i], non_sessions[i + 1]
if i == limit:
# Unable to evaluate range from consecutive non-sessions
# that covers >= 3 sessions. Just go with max range...
start, end = non_sessions[0], non_sessions[-1]
slice_start, slice_end = self.sessions.searchsorted((start, end))
return (start, end), self.sessions[slice_start:slice_end]
@property
def non_sessions_run(self) -> pd.DatetimeIndex:
"""Longest run of non_sessions."""
ser = self.sessions.to_series()
diff = ser.shift(-1) - ser
max_diff = diff.max()
if max_diff == pd.Timedelta(1, "D"):
return pd.DatetimeIndex([])
session_before_run = diff[diff == max_diff].index[-1]
run = pd.date_range(
start=session_before_run + pd.Timedelta(1, "D"),
periods=(max_diff // pd.Timedelta(1, "D")) - 1,
freq="D",
)
assert run.isin(self.non_sessions).all()
assert run[0] > self.first_session
assert run[-1] < self.last_session
return run
@property
def non_sessions_range(self) -> tuple[pd.Timestamp, pd.Timestamp] | None:
"""Longest range covering a period without a session."""
non_sessions_run = self.non_sessions_run
if non_sessions_run.empty:
return None
else:
return self.non_sessions_run[0], self.non_sessions_run[-1]
# --- Evaluated sets of minutes ---
@functools.lru_cache(maxsize=4)
def _evaluate_trading_and_break_minutes(self) -> tuple[tuple, tuple]:
sessions = self.sessions_sample
first_mins = self.first_minutes[sessions]
first_mins_plus_one = first_mins + self.ONE_MIN
last_mins = self.last_minutes[sessions]
last_mins_less_one = last_mins - self.ONE_MIN
trading_mins = []
break_mins = []
for session, mins_ in zip(
sessions,
zip(first_mins, first_mins_plus_one, last_mins, last_mins_less_one),
):
trading_mins.append((mins_, session))
if self.has_a_session_with_break:
last_am_mins = self.last_am_minutes[sessions]
last_am_mins = last_am_mins[last_am_mins.notna()]
first_pm_mins = self.first_pm_minutes[last_am_mins.index]
last_am_mins_less_one = last_am_mins - self.ONE_MIN
last_am_mins_plus_one = last_am_mins + self.ONE_MIN
last_am_mins_plus_two = last_am_mins + self.TWO_MIN
first_pm_mins_plus_one = first_pm_mins + self.ONE_MIN
first_pm_mins_less_one = first_pm_mins - self.ONE_MIN
first_pm_mins_less_two = first_pm_mins - self.TWO_MIN
for session, mins_ in zip(
last_am_mins.index,
zip(
last_am_mins,
last_am_mins_less_one,
first_pm_mins,
first_pm_mins_plus_one,
),
):
trading_mins.append((mins_, session))
for session, mins_ in zip(
last_am_mins.index,
zip(
last_am_mins_plus_one,
last_am_mins_plus_two,
first_pm_mins_less_one,
first_pm_mins_less_two,
),
):
break_mins.append((mins_, session))
return (tuple(trading_mins), tuple(break_mins))
@property
def trading_minutes(self) -> tuple[tuple[tuple[pd.Timestamp], pd.Timestamp]]:
"""Edge trading minutes of `sessions_sample`.
Returns
-------
tuple of tuple[tuple[trading_minutes], session]
tuple[trading_minutes] includes:
first two trading minutes of a session.
last two trading minutes of a session.
If breaks:
last two trading minutes of session's am subsession.
first two trading minutes of session's pm subsession.
session
Session of trading_minutes
"""
return self._evaluate_trading_and_break_minutes()[0]
def trading_minutes_only(self) -> abc.Iterator[pd.Timestamp]:
"""Generator of trading minutes of `self.trading_minutes`."""
for mins, _ in self.trading_minutes:
for minute in mins:
yield minute
@property
def trading_minute(self) -> pd.Timestamp:
"""A single trading minute."""
return self.trading_minutes[0][0][0]
@property
def break_minutes(self) -> tuple[tuple[tuple[pd.Timestamp], pd.Timestamp]]:
"""Sample of break minutes of `sessions_sample`.
Returns
-------
tuple of tuple[tuple[break_minutes], session]
tuple[break_minutes]:
first two minutes of a break.
last two minutes of a break.
session
Session of break_minutes
"""
return self._evaluate_trading_and_break_minutes()[1]
def break_minutes_only(self) -> abc.Iterator[pd.Timestamp]:
"""Generator of break minutes of `self.break_minutes`."""
for mins, _ in self.break_minutes:
for minute in mins:
yield minute
@functools.lru_cache(maxsize=4)
def _non_trading_minutes(
self,
) -> tuple[tuple[tuple[pd.Timestamp], pd.Timestamp, pd.Timestamp]]:
non_trading_mins = []
sessions = self.sessions_sample
sessions = prev_sessions = sessions[sessions.isin(self.sessions_with_gap_after)]
next_sessions = self.sessions[self.sessions.get_indexer(sessions) + 1]
last_mins_plus_one = self.last_minutes[sessions] + self.ONE_MIN
first_mins_less_one = self.first_minutes[next_sessions] - self.ONE_MIN
for prev_session, next_session, mins_ in zip(
prev_sessions, next_sessions, zip(last_mins_plus_one, first_mins_less_one)
):
non_trading_mins.append((mins_, prev_session, next_session))
return tuple(non_trading_mins)
@property
def non_trading_minutes(
self,
) -> tuple[tuple[tuple[pd.Timestamp], pd.Timestamp, pd.Timestamp]]:
"""non_trading_minutes that edge `sessions_sample`.
NB. Does not include break minutes.
Returns
-------
tuple of tuple[tuple[non-trading minute], previous session, next session]
tuple[non-trading minute]
Two non-trading minutes.
[0] first non-trading minute to follow a session.
[1] last non-trading minute prior to the next session.
previous session
Session that preceeds non-trading minutes.
next session
Session that follows non-trading minutes.
See Also
--------
break_minutes
"""
return self._non_trading_minutes()
def non_trading_minutes_only(self) -> abc.Iterator[pd.Timestamp]:
"""Generator of non-trading minutes of `self.non_trading_minutes`."""
for mins, _, _ in self.non_trading_minutes:
for minute in mins:
yield minute
# --- method-specific inputs/outputs ---
def prev_next_open_close_minutes(
self,
) -> abc.Iterator[
tuple[
pd.Timestamp,
tuple[
pd.Timestamp | None,
pd.Timestamp | None,
pd.Timestamp | None,
pd.Timestamp | None,
],
]
]:
"""Generator of test parameters for prev/next_open/close methods.
Inputs include following minutes of each session:
open
one minute prior to open (not included for first session)
one minute after open
close
one minute before close
one minute after close (not included for last session)
NB Assumed that minutes prior to first open and after last close
will be handled via parse_timestamp.
Yields
------
2-tuple:
[0] Input a minute sd pd.Timestamp
[1] 4 tuple of expected output of corresponding method:
[0] previous_open as pd.Timestamp | None
[1] previous_close as pd.Timestamp | None
[2] next_open as pd.Timestamp | None
[3] next_close as pd.Timestamp | None
NB None indicates that corresponding method is expected to
raise a ValueError for this input.
"""
close_is_next_open_bv = self.closes == self.opens.shift(-1)
open_was_prev_close_bv = self.opens == self.closes.shift(+1)
close_is_next_open = close_is_next_open_bv[0]
# minutes for session 0
minute = self.opens[0]
yield (minute, (None, None, self.opens[1], self.closes[0]))
minute = minute + self.ONE_MIN
yield (minute, (self.opens[0], None, self.opens[1], self.closes[0]))
minute = self.closes[0]
next_open = self.opens[2] if close_is_next_open else self.opens[1]
yield (minute, (self.opens[0], None, next_open, self.closes[1]))
minute += self.ONE_MIN
prev_open = self.opens[1] if close_is_next_open else self.opens[0]
yield (minute, (prev_open, self.closes[0], next_open, self.closes[1]))
minute = self.closes[0] - self.ONE_MIN
yield (minute, (self.opens[0], None, self.opens[1], self.closes[0]))
# minutes for sessions over [1:-1] except for -1 close and 'close + one_min'
opens = self.opens[1:-1]
closes = self.closes[1:-1]
prev_opens = self.opens[:-2]
prev_closes = self.closes[:-2]
next_opens = self.opens[2:]
next_closes = self.closes[2:]
opens_after_next = self.opens[3:]
# add dummy row to equal lengths (won't be used)
_ = pd.Series(pd.Timestamp("2200-01-01", tz="UTC"))
opens_after_next = opens_after_next.append(_)
stop = closes[-1]
for (
open_,
close,
prev_open,
prev_close,
next_open,
next_close,
open_after_next,
close_is_next_open,
open_was_prev_close,
) in zip(
opens,
closes,
prev_opens,
prev_closes,
next_opens,
next_closes,
opens_after_next,
close_is_next_open_bv[1:-2],
open_was_prev_close_bv[1:-2],
):
if not open_was_prev_close:
# only include open minutes if not otherwise duplicating
# evaluations already made for prior close.
yield (open_, (prev_open, prev_close, next_open, close))
yield (open_ - self.ONE_MIN, (prev_open, prev_close, open_, close))
yield (open_ + self.ONE_MIN, (open_, prev_close, next_open, close))
yield (close - self.ONE_MIN, (open_, prev_close, next_open, close))
if close != stop:
next_open_ = open_after_next if close_is_next_open else next_open
yield (close, (open_, prev_close, next_open_, next_close))
open_ = next_open if close_is_next_open else open_
yield (close + self.ONE_MIN, (open_, close, next_open_, next_close))
# close and 'close + one_min' for session -2
minute = self.closes[-2]
next_open = None if close_is_next_open_bv[-2] else self.opens[-1]
yield (minute, (self.opens[-2], self.closes[-3], next_open, self.closes[-1]))
minute += self.ONE_MIN
prev_open = self.opens[-1] if close_is_next_open_bv[-2] else self.opens[-2]
yield (minute, (prev_open, self.closes[-2], next_open, self.closes[-1]))
# minutes for session -1
if not open_was_prev_close_bv[-1]:
open_ = self.opens[-1]
prev_open = self.opens[-2]
prev_close = self.closes[-2]
next_open = None
close = self.closes[-1]
yield (open_, (prev_open, prev_close, next_open, close))
yield (open_ - self.ONE_MIN, (prev_open, prev_close, open_, close))
yield (open_ + self.ONE_MIN, (open_, prev_close, next_open, close))
minute = self.closes[-1]
next_open = self.opens[2] if close_is_next_open_bv[-1] else self.opens[1]
yield (minute, (self.opens[-1], self.closes[-2], None, None))
minute -= self.ONE_MIN
yield (minute, (self.opens[-1], self.closes[-2], None, self.closes[-1]))
# dunder
def __repr__(self) -> str:
return f"<Answers: calendar {self.name}, side {self.side}>"
def no_parsing(f: typing.Callable):
"""Wrap a method under test so that it skips input parsing."""
return lambda *args, **kwargs: f(*args, _parse=False, **kwargs)
class ExchangeCalendarTestBaseNew:
"""Test base for an ExchangeCalendar.
Notes
-----
=== Fixtures ===
In accordance with the pytest framework, whilst methods are requried to
have `self` as their first argument, no method should use `self`.
All required inputs should come by way of fixtures received to the
test method's arguments.
Methods that are directly or indirectly dependent on the evaluation of
trading minutes should be tested against the parameterized
`all_calendars_with_answers` fixture. This fixture will execute the
test against multiple calendar instances, one for each viable `side`.
The following methods directly evaluate trading minutes:
all_minutes
_last_minute_nanos()
_last_am_minute_nanos()
_first_minute_nanos()
_first_pm_minute_nanos()
NB this list does not include methods that indirectly evaluate methods
by way of calling (directly or indirectly) one of the above methods.
Methods that are not dependent on the evaluation of trading minutes
should only be tested against only the `default_calendar_with_answers`
or `default_calendar` fixture.
Calendar instances provided by fixtures should be used exclusively to
call the method being tested. NO TEST INPUT OR EXPECTED OUTPUT SHOULD
BE EVALUATED BY WAY OF CALLING A CALENDAR METHOD. Rather, test
inputs and expected output should be taken directly, or evaluated from,
properties/methods of the corresponding Answers fixture.
Subclasses are required to override a limited number of fixtures and
may be required to override others. Refer to the block comments.
"""
# subclass must override the following fixtures
@pytest.fixture(scope="class")
def calendar_cls(self) -> abc.Iterator[typing.Type[ExchangeCalendar]]:
"""ExchangeCalendar class to be tested.
Examples:
XNYSExchangeCalendar
AlwaysOpenCalendar
"""
raise NotImplementedError("fixture must be implemented on subclass")
@pytest.fixture
def max_session_hours(self) -> abc.Iterator[int | float]:
"""Largest number of hours that can comprise a single session.
Examples:
8
6.5
"""
raise NotImplementedError("fixture must be implemented on subclass")
# if subclass has a 24h session then subclass must override this fixture.
# Define on subclass as is here with only difference being passing
# ["left", "right"] to decorator's 'params' arg (24h calendars cannot
# have a side defined as 'both' or 'neither'.).
@pytest.fixture(scope="class", params=["both", "left", "right", "neither"])
def all_calendars_with_answers(
self, request, calendars, answers
) -> abc.Iterator[tuple[ExchangeCalendar, Answers]]:
"""Parameterized calendars and answers for each side."""
yield (calendars[request.param], answers[request.param])
# subclass should override the following fixtures in the event that the
# default defined here does not apply.
@pytest.fixture
def start_bound(self) -> abc.Iterator[pd.Timestamp | None]:
"""Earliest date for which calendar can be instantiated, or None if
there is no start bound."""
yield None
@pytest.fixture
def end_bound(self) -> abc.Iterator[pd.Timestamp | None]:
"""Latest date for which calendar can be instantiated, or None if
there is no end bound."""
yield None
# Subclass can optionally override the following fixtures. By overriding
# a fixture the associated test will be executed with input as yielded
# by the fixture. Where fixtures are not overriden the associated tests
# will be skipped.
@pytest.fixture
def regular_holidays_sample(self) -> abc.Iterator[list[str]]:
"""Sample of known regular calendar holidays. Empty list if no holidays.
`test_regular_holidays_sample` will check that each date does not
represent a calendar session.
Example return:
["2020-12-25", "2021-01-01", ...]
"""
yield []
@pytest.fixture
def adhoc_holidays_sample(self) -> abc.Iterator[list[str]]:
"""Sample of adhoc calendar holidays. Empty list if no adhoc holidays.
`test_adhoc_holidays_sample` will check that each date does not
represent a calendar session.
Example return:
["2015-04-17", "2021-09-12", ...]
"""
yield []
@pytest.fixture
def non_holidays_sample(self) -> abc.Iterator[list[str]]:
"""Sample of known dates that are not holidays.
`test_non_holidays_sample` will check that each date represents a
calendar session.
Subclass should use this fixture if wishes to test edge cases, for
example where a session is an exception to a rule, or where session
preceeds/follows a holiday that is an exception to a rule.
Example return:
["2019-12-27", "2020-01-02", ...]
"""
yield []
@pytest.fixture
def late_opens_sample(self) -> abc.Iterator[list[str]]:
"""Sample of known calendar sessions with late opens.
`test_late_opens_sample` will check that each date represents a
session with a late open.
Example returns:
["2022-01-03", "2022-04-22", ...]
"""
yield []
@pytest.fixture
def early_closes_sample(self) -> abc.Iterator[list[str]]:
"""Sample of known calendar sessions with early closes.
`test_early_closes_sample` will check that each date represents a
session with an early close.
Example returns:
["2019-12-24", "2019-12-31", ...]
"""
yield []
@pytest.fixture
def early_closes_sample_time(self) -> abc.Iterator[pd.Timedelta | None]:
"""Local close time of sessions of `early_closes_sample` fixture.
`test_early_closes_sample_time` will check all sessions of
`early_closes_sample` have this close time.
Only override fixture if:
- `early_closes_sample` is overriden by subclass
- ALL sessions of `early_closes_sample` have the same local
close time (if sessions of `early_closes_sample` have
different local close times then the subclass should
instead check close times with a test defined on the
subclass).
Example returns:
pd.Timedelta(14, "H") # 14:00 local time
pd.Timedelta(hours=13, minutes=15) # 13:15 local time
"""
yield None
@pytest.fixture
def non_early_closes_sample(self) -> abc.Iterator[list[str]]:
"""Sample of known calendar sessions with normal close times.
`test_non_early_closes_sample` will check each date does not
represent a calendar session with an early close.
Subclass should use this fixture to test edge cases, for example
where an otherwise early close is an exception to a rule.
Example return:
["2022-12-23", "2022-12-30]
"""
yield []
@pytest.fixture
def non_early_closes_sample_time(self) -> abc.Iterator[pd.Timedelta | None]:
"""Local close time of sessions of `non_early_closes_sample` fixture.
`test_non_early_closes_sample_time` will check all sessions of
`non_early_closes_sample` have this close time.
Only override fixture if:
- `non_early_closes_sample` is overriden by subclass.
- ALL sessions of `non_early_closes_sample` have the same local
close time (if sessions of `non_early_closes_sample` have
different local close times then the subclass should
instead check close times with a test defined on the
subclass).
Example returns:
pd.Timedelta(17, "H") # 17:00 local time
pd.Timedelta(hours=16, minutes=30) # 16:30 local time
"""
yield None
# --- NO FIXTURE BELOW THIS LINE SHOULD BE OVERRIDEN ON A SUBCLASS ---
def test_testbase_integrity(self):
"""Ensure integrity of TestBase.
Raises error if a reserved fixture is overriden by the subclass.
"""
cls = self.__class__
for fixture in [
"test_testbase_integrity",
"name",
"has_24h_session",
"default_side",
"sides",
"answers",
"default_answers",
"calendars",
"default_calendar",
"calendars_with_answers",
"default_calendar_with_answers",
"one_minute",
"today",
"all_directions",
"valid_overrides",
"non_valid_overrides",
"daylight_savings_dates",
"late_opens",
"early_closes",
]:
if getattr(cls, fixture) != getattr(ExchangeCalendarTestBaseNew, fixture):
raise RuntimeError(f"fixture '{fixture}' should not be overriden!")
# Base class fixtures
@pytest.fixture(scope="class")
def name(self, calendar_cls) -> abc.Iterator[str]:
"""Calendar name."""
yield calendar_cls.name
@pytest.fixture(scope="class")
def has_24h_session(self, name) -> abc.Iterator[bool]:
df = get_csv(name)
yield (df.market_close == df.market_open.shift(-1)).any()
@pytest.fixture(scope="class")
def default_side(self, has_24h_session) -> abc.Iterator[str]:
"""Default calendar side."""
if has_24h_session:
yield "left"
else:
yield "both"
@pytest.fixture(scope="class")
def sides(self, has_24h_session) -> abc.Iterator[list[str]]:
"""All valid sides options for calendar."""
if has_24h_session:
yield ["left", "right"]
else:
yield ["both", "left", "right", "neither"]
# Calendars and answers
@pytest.fixture(scope="class")
def answers(self, name, sides) -> abc.Iterator[dict[str, Answers]]:
"""Dict of answers, key as side, value as corresoponding answers."""
yield {side: Answers(name, side) for side in sides}
@pytest.fixture(scope="class")
def default_answers(self, answers, default_side) -> abc.Iterator[Answers]:
yield answers[default_side]
@pytest.fixture(scope="class")
def calendars(
self, calendar_cls, default_answers, sides
) -> abc.Iterator[dict[str, ExchangeCalendar]]:
"""Dict of calendars, key as side, value as corresoponding calendar."""
start = default_answers.first_session
end = default_answers.last_session
yield {side: calendar_cls(start, end, side) for side in sides}
@pytest.fixture(scope="class")
def default_calendar(
self, calendars, default_side
) -> abc.Iterator[ExchangeCalendar]:
yield calendars[default_side]
@pytest.fixture(scope="class")
def calendars_with_answers(
self, calendars, answers, sides
) -> abc.Iterator[dict[str, tuple[ExchangeCalendar, Answers]]]:
"""Dict of calendars and answers, key as side."""
yield {side: (calendars[side], answers[side]) for side in sides}
@pytest.fixture(scope="class")
def default_calendar_with_answers(
self, calendars_with_answers, default_side
) -> abc.Iterator[tuple[ExchangeCalendar, Answers]]:
yield calendars_with_answers[default_side]
# General use fixtures.
@pytest.fixture(scope="class")
def one_minute(self) -> abc.Iterator[pd.Timedelta]:
yield pd.Timedelta(1, "T")
@pytest.fixture(scope="class")
def today(self) -> abc.Iterator[pd.Timedelta]:
yield pd.Timestamp.now(tz="UTC").floor("D")
@pytest.fixture(scope="class", params=["next", "previous", "none"])
def all_directions(self, request) -> abc.Iterator[str]:
"""Parameterised fixture of direction to go if minute is not a trading minute"""
yield request.param
@pytest.fixture(scope="class")
def valid_overrides(self) -> abc.Iterator[list[str]]:
"""Names of methods that can be overriden by a subclass."""
yield [
"name",
"bound_start",
"bound_end",
"_bound_start_error_msg",
"_bound_end_error_msg",
"default_start",
"default_end",
"tz",
"open_times",
"break_start_times",
"break_end_times",
"close_times",
"weekmask",
"open_offset",
"close_offset",
"regular_holidays",
"adhoc_holidays",
"special_opens",
"special_opens_adhoc",
"special_closes",
"special_closes_adhoc",
"special_weekmasks",
"special_offsets",
"special_offsets_adhoc",
]
@pytest.fixture(scope="class")
def non_valid_overrides(self, valid_overrides) -> abc.Iterator[list[str]]:
"""Names of methods that cannot be overriden by a subclass."""
yield [
name
for name in dir(ExchangeCalendar)
if name not in valid_overrides
and not name.startswith("__")
and not name == "_abc_impl"
]
@pytest.fixture(scope="class")
def daylight_savings_dates(
self, default_calendar
) -> abc.Iterator[list[pd.Timestamp]]:
"""All dates in a specific year that mark the first day of a new
time regime.
Yields empty list if timezone's UCT offset does not change.
Notes
-----
NB Any test that employs this fixture assumes the accuarcy of the
default calendar's `tz` property.
"""
cal = default_calendar
year = cal.last_session.year - 1
days = pd.date_range(str(year), str(year + 1), freq="D")
tzinfo = pytz.timezone(cal.tz.zone)
prev_offset = tzinfo.utcoffset(days[0])
dates = []
for day in days[1:]:
try:
offset = tzinfo.utcoffset(day)
except pytz.NonExistentTimeError:
offset = tzinfo.utcoffset(day + pd.Timedelta(1, "H"))
if offset != prev_offset:
dates.append(day)
if len(dates) == 2:
break
prev_offset = offset
yield dates
@pytest.fixture(scope="class")
def late_opens(
self, default_calendar_with_answers
) -> abc.Iterator[pd.DatetimeIndex]:
"""Calendar sessions with a late open.
Late opens evaluated as those that are later than the prevailing
open time as defined by `default_calendar.open_times`.
Notes
-----
NB Any test that employs this fixture ASSUMES the accuarcy of the
following calendar properties:
`open_times`
`tz`
"""
cal, ans = default_calendar_with_answers
d = dict(cal.open_times)
d[pd.Timestamp.min] = d.pop(None)
s = | pd.Series(d) | pandas.Series |
#Preliminaries
import numpy as np
import numpy
import pandas as pd
import random
import statsmodels.api as sm
import math
from sklearn.utils import resample
from scipy import percentile
from scipy import stats
from matplotlib import pyplot as plt
import requests
import io
import seaborn as sns
from matplotlib.patches import Rectangle
import time
sns.set()
#-------------------------------------------------------------------------------
# FUNCTIONS
#-------------------------------------------------------------------------------
def title_print(title,sep='-',*args,**kwargs):
print(sep*len(title))
print(title,*args,**kwargs)
print(sep*len(title))
# Create function to calculate the lag selection parameter for the standard HAC Newey-West
# (1994) plug-in procedure
def mLag(no_obs):
'''Calculates the lag selection parameter for the standard HAC Newey-West
(1994) plug-in procedure.
INPUT
-----
no_obs: int
- number of observations in the endogenous variable for a regression
OUTPUT
------
lag_select: int
- max number of lags
'''
return np.floor((4*no_obs/100)**(2/9)).astype(int)
# Heteroskedasticity and Autocorrelation Newey-West standard errors
def HAC_BSE(y,x,b,maxLag=mLag):
'''Calculates Heteroskedasticity and Autocorrelation (HAC) Newey-West
standard errors. Default lag procedure is below:
maxLags = np.floor((4*no_obs/100)**(2/9)).astype(int)
If you want a different lag procedure, pass in a new
function under mLag=func, and make sure that the function
only takes 'n_obs', an int for number of observations,
as an input.
INPUT
-----
y: n x 1 ndarray
- dependent variable array
x: n x k ndarray
- independent variables array (include constant in x)
b: k x 1 ndarray
- OLS regression coefficients
OUTPUT
------
hac_bse: k x 1 ndarray
- HAC coefficient standard errors
For more info on HAC Newey-West check out this link:
https://www.stata.com/manuals13/tsnewey.pdf
'''
n,k = x.shape
m = mLag(n)
r = (y - x.dot(b)).reshape(n,)
XXI = np.linalg.inv(x.T.dot(x))
w = np.diag(r**2)
XWX = x.T.dot(w).dot(x)
for l in range(1,m+1):
w = np.diag(r[l:]*r[:-l])
XWX_l = np.zeros((k,k))
XWX_l += x[:-l,:].T.dot(w).dot(x[l:,:])
XWX_l += x[l:,:].T.dot(w).dot(x[:-l,:])
XWX_l *= (1-l/(m+1))
XWX += XWX_l
XWX *= n/(n-k)
var_B = XXI.dot(XWX).dot(XXI)
return np.sqrt(abs(np.diag(var_B)))
# Set up regression function with Newey-West Standard Errors (HAC)
def OLS_HAC(Y, X, add_const=True,maxLag=mLag):
'''Runs OLS regression with a the standard HAC Newey-West (1994) plug-in
procedure.
INPUT
-----
y: ndarray, (n,)
- dependent variable in regression
x: ndarray, (no_obs,k)
- k regressors (including constant)
add_const: bool, Default = True
- If True, this function adds constant to regressors. If False,
this function doesn't add the constant.
maxLag: func, Default = lambda x: numpy.floor((4*x/100)**(2/9)).astype(int)
- Lag selection function for HAC-NW SE's
NOTE: no NaN values in y or x will work.
OUTPUT: (beta,hac_bse)
------
beta: ndarray, (k,1)
- OLS coefficients
hac_bse: ndarray, (k,1)
- HAC-NW standard errors
'''
# drop missing values
exist = ~np.isnan(Y)
y,x = Y[exist],X[exist,:]
# add constant if necessary
if add_const:
x = sm.add_constant(x)
# Get Results
beta = np.linalg.inv(x.T.dot(x)).dot(x.T.dot(y)) # OLS coefficients
hac_bse = HAC_BSE(y=y,x=x,b=beta,maxLag=maxLag) # HAC standard errors
t_stats = beta/hac_bse
return beta, hac_bse#, t_stats
def ecdf(sample):
sample = np.atleast_1d(sample)
quantiles, counts = np.unique(sample, return_counts=True)
cumprob = np.cumsum(counts).astype(np.double) / sample.size
return quantiles, cumprob
#-------------------------------------------------------------------------------
# Useful Classes
#-------------------------------------------------------------------------------
class AlphaEvaluator:
'''
A class used to evaluate the alpha of funds. Calculates alpha of funds
from provided datasets, runs simulations of fund returns, and compares
actual observations to simulated fund returns using a KAPM model.
'''
# initialize object
def __init__(self,fund_data=None,factor_data=None,
parse_dates=[['Dates'],['Dates']],fund_names=None,factor_names=None):
'''AlphaEvaluator is a class used to evaluate the alpha of funds.
Calculates alpha of funds from provided datasets, runs simulations of
fund returns, and compares actual observations to simulated fund returns
using a KAPM model.
INPUT
-----
fund_data, factor_data: None, str, np.array, pd.DataFrame
- must be same type. If None, load data later with fit() method or
load_data() method.
fund_names, factor_names: list, iterable
- contains names of funds and factors as strings
parse_dates: [fund_data.date,factor_data.date]
- colname as string for each dataset that corresponds to datetime
'''
self._is_fit = False
self._has_sim = False
self._has_percentiles = None
self.parse_dates = parse_dates
self.maxLag = mLag
if (fund_data is None) and (factor_data is None):
self.X_raw = None
self.Y_raw = None
else:
self.load_data(fund_data=fund_data,factor_data=factor_data,
fund_names=fund_names,factor_names=factor_names,
parse_dates=parse_dates)
# load dataframe into object
def load_data(self,fund_data,factor_data,fund_names=None,factor_names=None,
parse_dates=[['Dates'],['Dates']]):
'''Function for loading observed fund and factor data into the AlphaEvaluator
INPUT
-----
fund_data: str, numpy.array, or pandas.DataFrame
- str is a path to a csv file
factor_data: str, numpy.array, or pandas.DataFrame
- str is a path to a csv file
fund_names, factor_names: None or list
- only needed if np.array data is passed in
One of the factor names must be 'RF'
'''
# check for updates to parameters
self.parse_dates = parse_dates
self._is_fit = False
self._has_sim = False
self._has_percentiles = None
# check for data
if (fund_data is None) or (factor_data is None):
raise ValueError("Funds data AND factor data must be submitted!")
elif type(fund_data) is str:
self.Y_raw = pd.read_csv(fund_data,parse_dates=parse_dates[0])
self.X_raw = pd.read_csv(factor_data,parse_dates=parse_dates[1])
elif type(fund_data) is pd.DataFrame:
if fund_data.shape[0] != factor_data.shape[0]:
raise ValueError("Both datasets should have same number of observations")
self.Y_raw = fund_data
self.X_raw = factor_data
elif type(fund_data) is type(np.array([])):
if (fund_names is None) or (factor_names is None):
raise ValueError("Must input fund names and factor names")
elif fund_data.shape[0] != factor_data.shape[0]:
raise ValueError("Both datasets should have same number of observations")
else:
self.Y_raw = pd.DataFrame(data=fund_data,columns=fund_names)
self.X_raw = | pd.DataFrame(data=factor_data,columns=factor_names) | pandas.DataFrame |
import pytest
import numpy as np
import pandas as pd
from pandas import Categorical, Series, CategoricalIndex
from pandas.core.dtypes.concat import union_categoricals
from pandas.util import testing as tm
class TestUnionCategoricals(object):
def test_union_categorical(self):
# GH 13361
data = [
(list('abc'), list('abd'), list('abcabd')),
([0, 1, 2], [2, 3, 4], [0, 1, 2, 2, 3, 4]),
([0, 1.2, 2], [2, 3.4, 4], [0, 1.2, 2, 2, 3.4, 4]),
(['b', 'b', np.nan, 'a'], ['a', np.nan, 'c'],
['b', 'b', np.nan, 'a', 'a', np.nan, 'c']),
(pd.date_range('2014-01-01', '2014-01-05'),
pd.date_range('2014-01-06', '2014-01-07'),
pd.date_range('2014-01-01', '2014-01-07')),
(pd.date_range('2014-01-01', '2014-01-05', tz='US/Central'),
pd.date_range('2014-01-06', '2014-01-07', tz='US/Central'),
pd.date_range('2014-01-01', '2014-01-07', tz='US/Central')),
(pd.period_range('2014-01-01', '2014-01-05'),
pd.period_range('2014-01-06', '2014-01-07'),
pd.period_range('2014-01-01', '2014-01-07')),
]
for a, b, combined in data:
for box in [Categorical, CategoricalIndex, Series]:
result = union_categoricals([box(Categorical(a)),
box(Categorical(b))])
expected = Categorical(combined)
tm.assert_categorical_equal(result, expected,
check_category_order=True)
# new categories ordered by appearance
s = Categorical(['x', 'y', 'z'])
s2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([s, s2])
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['x', 'y', 'z', 'a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
s = Categorical([0, 1.2, 2], ordered=True)
s2 = Categorical([0, 1.2, 2], ordered=True)
result = union_categoricals([s, s2])
expected = Categorical([0, 1.2, 2, 0, 1.2, 2], ordered=True)
tm.assert_categorical_equal(result, expected)
# must exactly match types
s = Categorical([0, 1.2, 2])
s2 = Categorical([2, 3, 4])
msg = 'dtype of categories must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([s, s2])
msg = 'No Categoricals to union'
with tm.assert_raises_regex(ValueError, msg):
union_categoricals([])
def test_union_categoricals_nan(self):
# GH 13759
res = union_categoricals([pd.Categorical([1, 2, np.nan]),
pd.Categorical([3, 2, np.nan])])
exp = Categorical([1, 2, np.nan, 3, 2, np.nan])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical(['A', 'B']),
pd.Categorical(['B', 'B', np.nan])])
exp = Categorical(['A', 'B', 'B', 'B', np.nan])
tm.assert_categorical_equal(res, exp)
val1 = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-03-01'),
pd.NaT]
val2 = [pd.NaT, pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-02-01')]
res = union_categoricals([pd.Categorical(val1), pd.Categorical(val2)])
exp = Categorical(val1 + val2,
categories=[pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-03-01'),
pd.Timestamp('2011-02-01')])
tm.assert_categorical_equal(res, exp)
# all NaN
res = union_categoricals([pd.Categorical([np.nan, np.nan]),
pd.Categorical(['X'])])
exp = Categorical([np.nan, np.nan, 'X'])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical([np.nan, np.nan]),
pd.Categorical([np.nan, np.nan])])
exp = Categorical([np.nan, np.nan, np.nan, np.nan])
tm.assert_categorical_equal(res, exp)
def test_union_categoricals_empty(self):
# GH 13759
res = union_categoricals([pd.Categorical([]),
pd.Categorical([])])
exp = Categorical([])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical([]),
pd.Categorical([1.0])])
exp = Categorical([1.0])
tm.assert_categorical_equal(res, exp)
# to make dtype equal
nanc = pd.Categorical(np.array([np.nan], dtype=np.float64))
res = union_categoricals([nanc,
pd.Categorical([])])
tm.assert_categorical_equal(res, nanc)
def test_union_categorical_same_category(self):
# check fastpath
c1 = Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4])
c2 = Categorical([3, 2, 1, np.nan], categories=[1, 2, 3, 4])
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, 4, 3, 2, 1, np.nan],
categories=[1, 2, 3, 4])
tm.assert_categorical_equal(res, exp)
c1 = Categorical(['z', 'z', 'z'], categories=['x', 'y', 'z'])
c2 = Categorical(['x', 'x', 'x'], categories=['x', 'y', 'z'])
res = union_categoricals([c1, c2])
exp = Categorical(['z', 'z', 'z', 'x', 'x', 'x'],
categories=['x', 'y', 'z'])
tm.assert_categorical_equal(res, exp)
def test_union_categoricals_ordered(self):
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
msg = 'Categorical.ordered must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
res = union_categoricals([c1, c1])
exp = Categorical([1, 2, 3, 1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, np.nan, 3, 2], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
msg = "to union ordered Categoricals, all categories must be the same"
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
def test_union_categoricals_ignore_order(self):
# GH 15219
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
msg = 'Categorical.ordered must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2], ignore_order=False)
res = union_categoricals([c1, c1], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([c1, c1], ignore_order=False)
exp = Categorical([1, 2, 3, 1, 2, 3],
categories=[1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, np.nan, 3, 2])
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([c2, c1], ignore_order=True,
sort_categories=True)
exp = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([4, 5, 6], ordered=True)
result = union_categoricals([c1, c2], ignore_order=True)
expected = Categorical([1, 2, 3, 4, 5, 6])
tm.assert_categorical_equal(result, expected)
msg = "to union ordered Categoricals, all categories must be the same"
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2], ignore_order=False)
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
def test_union_categoricals_sort(self):
# GH 13846
c1 = Categorical(['x', 'y', 'z'])
c2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['a', 'b', 'c', 'x', 'y', 'z'])
tm.assert_categorical_equal(result, expected)
# fastpath
c1 = Categorical(['a', 'b'], categories=['b', 'a', 'c'])
c2 = Categorical(['b', 'c'], categories=['b', 'a', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['a', 'b'], categories=['c', 'a', 'b'])
c2 = Categorical(['b', 'c'], categories=['c', 'a', 'b'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
# fastpath - skip resort
c1 = Categorical(['a', 'b'], categories=['a', 'b', 'c'])
c2 = Categorical(['b', 'c'], categories=['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['x', np.nan])
c2 = Categorical([np.nan, 'b'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['x', np.nan, np.nan, 'b'],
categories=['b', 'x'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([np.nan])
c2 = Categorical([np.nan])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical([np.nan, np.nan], categories=[])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([])
c2 = Categorical([])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical([])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['b', 'a'], categories=['b', 'a', 'c'], ordered=True)
c2 = Categorical(['a', 'c'], categories=['b', 'a', 'c'], ordered=True)
with pytest.raises(TypeError):
union_categoricals([c1, c2], sort_categories=True)
def test_union_categoricals_sort_false(self):
# GH 13846
c1 = Categorical(['x', 'y', 'z'])
c2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['x', 'y', 'z', 'a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
# fastpath
c1 = Categorical(['a', 'b'], categories=['b', 'a', 'c'])
c2 = Categorical(['b', 'c'], categories=['b', 'a', 'c'])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['b', 'a', 'c'])
tm.assert_categorical_equal(result, expected)
# fastpath - skip resort
c1 = Categorical(['a', 'b'], categories=['a', 'b', 'c'])
c2 = Categorical(['b', 'c'], categories=['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['x', np.nan])
c2 = Categorical([np.nan, 'b'])
result = | union_categoricals([c1, c2], sort_categories=False) | pandas.core.dtypes.concat.union_categoricals |
import os
import re
import numpy as np
import pandas as pd
from pyd3d.utils import formatSci
from pyd3d.mdf import read
from IPython.display import Markdown as md
# from https://github.com/Carlisle345748/Delft3D-Toolbox/blob/master/delft3d/TimeSeriesFile.py
class TimeSeries(object):
"""Read, modify and export Delft3D time series."""
def __init__(self, time_series: list):
self.time_series = None
self.header = None
self.load_header(time_series)
self.load_time_series(time_series)
def load_header(self, time_series: list):
"""Read and interpret the header of a time series."""
header_dict = {}
parameter = {}
records_in_table = None
header_re = re.compile(r"^([^-][\w-]+)\s+('?[\w\d (),./:-]+'?)")
unit_re = re.compile(r"([\s]+unit '\[[\w/]+\]')")
for line in time_series:
matches = header_re.search(line) # search for header
if matches:
if matches[1] == 'parameter':
# parameters have the same header name. So store all parameters
# in one dict
unit_match = unit_re.search(line) # search for unit
key_name = matches[2].strip('\'') # reformat unit
key_name = key_name.strip(' ')
parameter[key_name] = Parameter(matches[2], unit_match[1])
elif matches[1] == 'records-in-table':
# records-in-table should be the last header. Store it hera and
# then put it at the end of headers by the end.
records_in_table = Parameter(matches[2])
else:
# regular header
header_dict[matches[1]] = Parameter(matches[2])
else: # end of the header
header_dict['parameter'] = parameter
header_dict['records-in-table'] = records_in_table
break
self.header = header_dict
def load_time_series(self, time_series: list):
"""Read and interpret time series"""
is_header = True # whether the pointer at the header
reference_time = pd.to_datetime(self.header['reference-time'].value)
# read the time series data
time, relative_time, parm1, parm2 = [], [], [], []
for line in time_series:
if not is_header:
# prepossess
data = [float(i) for i in line.split()]
time.append(reference_time + pd.to_timedelta(data[0], unit="minutes"))
# store the data
relative_time.append(data[0])
parm1.append(data[1])
parm2.append(data[2])
if 'records-in-table' in line:
is_header = False
else:
# converts lists to DataFrame
colname = list(self.header['parameter'].keys())
time_series = pd.DataFrame(
{colname[0]: relative_time, colname[1]: parm1, colname[2]: parm2}, index=time)
self.time_series = time_series
def set_header(self, data: dict, unit=False) -> None:
"""Set new content of header. Called by TimeSeriesFile.set_header()"""
header = self.header.copy()
for key, new_parm in data.items():
if key != 'parameter':
# regular header
header[key].value = str(new_parm)
else:
# parameter
for key_, new_parm_ in new_parm.items():
if unit:
header[key][key_].unit = str(new_parm_)
else:
header[key][key_].value = str(new_parm_)
self.header = header
def set_time_series(self, reference_time: str,
data1: pd.core.frame.Series,
data2: pd.core.frame.Series):
"""
Replace the old time series with the new one. Called by TimeSeriesFile.set_time_series()
"""
time_series = pd.concat([data1, data2], axis=1)
# calculate the absolute time and relative time
reference_time = pd.to_datetime(reference_time)
relative_time = time_series.index - reference_time
relative_time = [time.total_seconds() / 60 for time in relative_time] # 单位:minute
relative_time = pd.Series(relative_time, index=time_series.index, name='time')
# combine time absolute time, relative time and data
time_series = | pd.concat([relative_time, time_series], axis=1) | pandas.concat |
# Copyright 2016 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas
import numpy.testing
from pyfora.algorithms.logistic.TrustRegionConjugateGradientSolver \
import TrustRegionConjugateGradientSolver
class TrustRegionTests(object):
def trustRegionData(self):
X = pandas.DataFrame({'A': [-1,0,1], 'B': [0,1,1]})
y = pandas.Series([1,-1,-1])
return X, y
def solve(self, X, y, classZeroLabel, C=1):
def f(X, y, C):
return TrustRegionConjugateGradientSolver(
X, y, classZeroLabel, C).solve()
return self.evaluateWithExecutor(f, X, y, C)
def test_trust_region_1(self):
X, y = self.trustRegionData()
res = self.solve(X, y, 1)
# results checked against liblinear, scikit themselves.
scikit_weights = [-0.59106183, -0.59106183]
numpy.testing.assert_allclose(res.weights, scikit_weights,
atol=1e-4, rtol=1e-4)
self.assertEqual(res.iterations, 2)
def test_trust_region_2(self):
X, y = self.trustRegionData()
C = 1.0 / len(y)
res = self.solve(X, y, 1, C)
numpy.testing.assert_allclose(
res.weights,
[-0.26760031, -0.26760031]
)
self.assertEqual(res.iterations, 2)
def test_trust_region_3(self):
# corresponds to the fora test: logisticregressionTests.basic_2
X = pandas.DataFrame(
[[-0.25091976, 0.90142861],
[ 0.46398788, 0.19731697],
[-0.68796272, -0.68801096],
[-0.88383278, 0.73235229],
[ 0.20223002, 0.41614516]]
)
y = pandas.Series([1,-1,-1,-1,1])
C = 1.0 / len(X) / 0.01
res = self.solve(X, y, 1, C)
numpy.testing.assert_allclose(
res.weights,
[1.55340616, 1.28486523]
)
def test_trust_region_4(self):
# corresponds to the fora test: logisticregressionTests.basic_3
X = pandas.DataFrame(
{'A': [-0.25091976, 0.46398788, -0.68796272],
'B': [0.90142861, 0.19731697, -0.68801096]})
y = pandas.Series([1, -1, -1])
C = 1.0 / len(X) / 0.01
res = self.solve(X, y, 1, C)
numpy.testing.assert_allclose(
res.weights,
[-1.78096818, 3.42088899]
)
def test_trust_region_5(self):
# corresponds to the fora test: logisticregressionTests.basic_4
X = pandas.DataFrame(
[[-0.25091976, 0.90142861],
[ 0.46398788, 0.19731697],
[-0.68796272, -0.68801096],
[-0.88383278, 0.73235229],
[ 0.20223002, 0.41614516],
[-0.95883101, 0.9398197 ],
[ 0.66488528, -0.57532178],
[-0.63635007, -0.63319098],
[-0.39151551, 0.04951286],
[-0.13610996, -0.41754172]]
)
y = | pandas.Series([1, -1, -1, 1, 1, 1, -1, 1, 1, -1]) | pandas.Series |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import rfft, irfft, rfftfreq
# Fast Fourier Transform (FFT)
def fast_fourier_transform(data_measured, data_desired, n, t):
# Define function on which the FFT shall be executed
dm_pos = data_measured["pos"]
dm_vel = data_measured["vel"]
dm_tau = data_measured["torque"]
dml = [dm_pos, dm_vel, dm_tau] # dml is of type list and will later be reconverted to type dataframe
dd_pos = data_desired["pos"]
dd_vel = data_desired["vel"]
dd_tau = data_desired["torque"]
ddl = [dd_pos, dd_vel, dd_tau] # ddl is of type list and will later be reconverted to type dataframe
# Compute the FFT
dm_hat = np.fft.fft(dml, n)
# Compute the power spectrum density for pos, vel, tau
psd_measured = dm_hat * np.conj(dm_hat) / n # power spectrum density
# Use the PSD to filter out noise
bounds = [30, 100, 300] # frequencies with lower PSD than bounds get cut off
indices = np.empty_like(psd_measured)
for i in range(len(indices)):
indices[i] = psd_measured[i] > bounds[i] # find all freqs with large power
psd_filtered = psd_measured * indices # zero out all others
dm_hat = indices * dm_hat # zero out small Fourier coeffs. in Y
f = np.fft.ifft(dm_hat) # inverse FFT for filtered time signal
# Convert lists back to dataframes
dm_t = pd.DataFrame(dml) # convert lists dml, ddl and f back to type dataframe
dd_t = | pd.DataFrame(ddl) | pandas.DataFrame |
'''
Urban-PLUMBER processing code
Associated with the manuscript: Harmonized, gap-filled dataset from 20 urban flux tower sites
Copyright (c) 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License").
You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
'''
__title__ = "site-specific processing wrapper"
__version__ = "2021-09-20"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__description__ = 'Wrapper for processing individual sites. Includes setting site-specific information, importing raw site data, calling pipeline functions, creating site plots and webpages etc.'
import numpy as np
import xarray as xr
import pandas as pd
import matplotlib.pyplot as plt
import os
import sys
import argparse
import importlib
import glob
# paths
oshome=os.getenv('HOME')
projpath = f'{oshome}/git/urban-plumber_pipeline' # root of repository
datapath = f'{oshome}/git/urban-plumber_pipeline/input_data' # raw data path (site data, global data)
sys.path.append(projpath)
import pipeline_functions
importlib.reload(pipeline_functions)
##########################################################################
# MANUAL USER INPUTS
##########################################################################
# these are overridden with --existing flag (i.e. python create_dataset_XX.py --existing)
create_raw_obs_nc = True # create obs nc from original format
create_rain_file = True # find and load nearest GHCND
qcplotdetail = True # plot quality control diurnal and timeseries
forcingplots = True # plot forcing and analysis period obs and gap-filling
create_outofsample_obs = True # for testing bias-correction on half of available obs
fullpipeline = True # undertake full pipeline e.g. cleaning, bias correction, data creation
##########################################################################
# COMMAND LINE ARGUMENT PARSING
##########################################################################
# Initiate the parser
parser = argparse.ArgumentParser()
parser.add_argument('--log', help='log print statements to file', action='store_true')
parser.add_argument('--projpath', help='replaces projpath with new path')
parser.add_argument('--datapath', help='replaces datapath with new path')
parser.add_argument('--existing', help='use existing outputs (processing already run)', action='store_true')
parser.add_argument('--globaldata',help='output site characteristics from global dataset (if available)', action='store_true')
args = parser.parse_args()
log_stout = False
if args.log:
log_stout = True
if args.projpath:
print(f'updating projpath to {projpath}')
projpath = args.projpath
if args.datapath:
print(f'updating datapath to {datapath}')
datapath = args.datapath
if args.existing:
print('using existing files')
create_raw_obs_nc = False
create_rain_file = False
qcplotdetail = False
forcingplots = False
create_outofsample_obs = False
fullpipeline = False
##########################################################################
# SITE SPECIFIC INFORMATION
##########################################################################
sitename = 'JP-Yoyogi'
out_suffix = 'v0.9'
sitedata_suffix = 'v1'
local_utc_offset_hours = 9.0
long_sitename = 'Yoyogi, Tokyo, Japan'
obs_contact = '<NAME> (<EMAIL>)'
obs_reference = 'Hirano, Sugawara, Murayama, Kondo (2015): https://doi.org/10.2151/sola.2015-024; Sugawara, Shimizu, Hirano, Murayama and Kondo (2014): https://doi.org/10.14887/kazekosymp.23.0_49; Ishidoya, Sugawara,Terao, Kaneyasu, Aoki, Tsuboi and Kondo. (2020): https://doi.org/10.5194/acp-20-5293-2020'
obs_comment = 'Observations for turbulent fluxes removed from sectors 170°-260° because of land surface inhomogeneities. Gap-filling for the forcing data using data at nearby observatories (less than 8 km distance).'
photo_source='<NAME>'
history = 'v0.9 (2021-09-08): beta issue'
##########################################################################
# MAIN
##########################################################################
def main():
sitepath = f'{projpath}/sites/{sitename}'
print('preparing site data and attributes')
sitedata, siteattrs = pipeline_functions.prep_site(
sitename, sitepath, out_suffix, sitedata_suffix, long_sitename,
local_utc_offset_hours, obs_contact, obs_reference, obs_comment,
history, photo_source, args.globaldata, datapath)
print('getting observation netcdf\n')
if create_raw_obs_nc:
print(f'creating observational NetCDF in ALMA format\n')
raw_ds = import_obs(sitedata,siteattrs)
raw_ds = pipeline_functions.set_raw_attributes(raw_ds, siteattrs)
else:
fpath = f'{sitepath}/timeseries/{sitename}_raw_observations_{siteattrs["out_suffix"]}.nc'
raw_ds = xr.open_dataset(fpath)
if create_rain_file:
syear, eyear = raw_ds.time.dt.year.values[0] - 10, raw_ds.time.dt.year.values[-1]
nearest = pipeline_functions.find_ghcnd_closest_stations(syear,eyear,sitedata,datapath,nshow=3)
print('nearest stations, see: https://www.ncdc.noaa.gov/cdo-web/search:\n',nearest)
rain_sites = ['JA000047662', # TOKYO, JA 35.683 139.767
'Tokyo_monthly_fill'] # fill gaps with monthly totals difference from https://www.data.jma.go.jp/obd/stats/etrn/view/monthly_s3_en.php?block_no=47662&view=13
rain_obs = pipeline_functions.get_ghcnd_precip(sitepath,datapath,syear,eyear,rain_sites)
pipeline_functions.write_ghcnd_precip(sitepath,sitename,rain_obs)
############################################
############ pipeline MAIN call ############
raw_ds, clean_ds, watch_ds, era_ds, corr_ds, lin_ds, forcing_ds = pipeline_functions.main(
datapath = datapath,
sitedata = sitedata,
siteattrs = siteattrs,
raw_ds = raw_ds,
fullpipeline = fullpipeline,
qcplotdetail = qcplotdetail)
############################################
print('post processing, plotting and checking')
pipeline_functions.post_process_site(sitedata,siteattrs,datapath,
raw_ds,forcing_ds,clean_ds,era_ds,watch_ds,corr_ds,lin_ds,
forcingplots,create_outofsample_obs)
print(f'{sitename} done!')
return raw_ds, clean_ds, watch_ds, era_ds, corr_ds, forcing_ds
##########################################################################
# specific functinos
##########################################################################
def import_obs(sitedata,siteattrs):
# read data csv
print('reading raw data file')
raw1 = pd.read_csv(f'{datapath}/{sitename}/yoyogi_forcing_rev.dat', na_values=['NaN',-9999.], delim_whitespace=True)
raw2 = pd.read_csv(f'{datapath}/{sitename}/yoyogi_evaluation.dat', na_values=['NaN',-9999.], delim_whitespace=True)
# get times from data and reindex
times = pd.date_range(start='2016-04-01 00:00', end='2020-03-31 23:00', freq='60Min')
raw1.index = times
raw2.index = times
# create dataframe in ALMA format
df = | pd.DataFrame(index=times) | pandas.DataFrame |
'''
Illustration of the uncertainty surrounding point estimations of the decay value (with and without stationarity breaks) in a Hawkes process.
This code produces normalized decay distributions which deviate from the standard Gaussian, as exemplified in Fig. 1 in the paper.
'''
import functools
import os
import sys
import time
import hyperopt
import lmfit
import numpy as np
import pandas as pd
from scipy import stats
from tick.hawkes import HawkesExpKern, SimuHawkesExpKernels, SimuHawkesMulti
import constants
const = constants.Constants(n_dims=1)
SAMPLE_SIZE = 100
BETA_INCREMENT = 1
SPLIT_INDEX = int(const.n_realizations / 2)
# fct to optimize
def __loglik_wrapper(params, event_times_dict_list):
learner = HawkesExpKern(decays=[[params["beta"]] * const.n_dims] * const.n_dims)
learner.fit(event_times_dict_list)
return -learner.score()
df = []
df_splitbeta = []
intensity_parameters = {
"mu": [0.1],
"alpha": [[0.5]],
"beta": [[1.2]]
}
print('true beta is', intensity_parameters["beta"])
for i, initial_beta_i in enumerate(np.linspace(0.01, 10, num=SAMPLE_SIZE)):
print('round', i)
# simulation
hawkes_exp_kernels = SimuHawkesExpKernels(adjacency=list(intensity_parameters["alpha"] / np.array(intensity_parameters["beta"])),
decays=intensity_parameters["beta"],
baseline=intensity_parameters["mu"],
end_time=const.simulation_end,
verbose=False)
multi = SimuHawkesMulti(hawkes_exp_kernels, n_simulations=const.n_realizations, n_threads=8)
multi.simulate()
# LBFGSB
parameters = lmfit.Parameters()
parameters.add("beta", min=0, value=1.)
minimizer = lmfit.Minimizer(__loglik_wrapper, parameters,
fcn_args=(multi.timestamps, ))
start_time = time.time()
result = minimizer.minimize(method="lbfgsb")
end_time = time.time()
df.append(result.params["beta"].value)
# simulation but with split beta
hawkes_exp_simu = SimuHawkesExpKernels(adjacency=list(intensity_parameters['alpha'] / np.array(intensity_parameters['beta'])),
decays=intensity_parameters['beta'],
baseline=intensity_parameters['mu'],
max_jumps=const.max_jumps,
verbose=False)
multi = SimuHawkesMulti(hawkes_exp_simu, n_simulations=const.n_realizations, n_threads=8)
multi.simulate()
hawkes_exp_simu = SimuHawkesExpKernels(adjacency=list(intensity_parameters['alpha'] / np.array(intensity_parameters['beta'])),
decays=(np.array(intensity_parameters['beta']) + BETA_INCREMENT).tolist(),
baseline=intensity_parameters['mu'],
max_jumps=const.max_jumps,
verbose=False)
multi_other_beta = SimuHawkesMulti(hawkes_exp_simu, n_simulations=const.n_realizations, n_threads=8)
multi_other_beta.simulate()
combined_timestamps = multi.timestamps[SPLIT_INDEX:] + multi_other_beta.timestamps[SPLIT_INDEX:]
# LBFGSB
parameters = lmfit.Parameters()
parameters.add("beta", min=0, value=1.)
minimizer = lmfit.Minimizer(__loglik_wrapper, parameters,
fcn_args=(combined_timestamps, ))
start_time = time.time()
result = minimizer.minimize(method="lbfgsb")
end_time = time.time()
df_splitbeta.append(result.params["beta"].value)
rounded_beta = np.round(intensity_parameters["beta"][0][0], 4)
| pd.DataFrame({"Beta": df, "SplitBeta": df_splitbeta}) | pandas.DataFrame |
import pandas as pd
import inspect
import functools
# ============================================ DataFrame ============================================ #
# Decorates a generator function that yields rows (v,...)
def pd_dfrows(columns=None):
def dec(fn):
def wrapper(*args,**kwargs):
return pd.DataFrame([*fn(*args,**kwargs)],columns=columns)
return functools.update_wrapper(wrapper,fn)
return dec
# Decorates a generator function that yields k,(v,...) pairs
def pd_dataframe(index=None,columns=None):
def dec(fn):
def wrapper(*args,**kwargs):
i,d = (list(x) for x in zip(*fn(*args,**kwargs)))
return pd.DataFrame(d,pd.Index(i,name=index),columns=columns)
return functools.update_wrapper(wrapper,fn)
return dec
# Decorates a generator function that yields (k,...),(v,...) pairs
def pd_multiframe(index=None,columns=None):
def dec(fn):
def wrapper(*args,**kwargs):
i,d = (list(x) for x in zip(*fn(*args,**kwargs)))
return pd.DataFrame(d,index=pd.MultiIndex.from_tuples(i,names=index),columns=columns)
return functools.update_wrapper(wrapper,fn)
return dec
# ============================================ Series ============================================ #
# Decorates a generator function that yields k,v pairs
def pd_series(index=None,name=None):
def dec(fn):
def wrapper(*args,**kwargs):
i,d = (list(x) for x in zip(*fn(*args,**kwargs)))
return pd.Series(d,index=pd.Index(i,name=index),name=name)
return functools.update_wrapper(wrapper,fn)
return dec
# Decorates a generator function that yields (k,...),v pairs
def pd_multiseries(index=None,name=None):
def dec(fn):
def wrapper(*args,**kwargs):
i,d = [[*x] for x in zip(*fn(*args,**kwargs))]
return pd.Series(d,index=pd.MultiIndex.from_tuples(i,names=index),name=name)
return functools.update_wrapper(wrapper,fn)
return dec
# ============================================ Index ============================================ #
# Decorates a generator function that yields (k,...)
def pd_multi_index(names=None):
def dec(fn):
def wrapper(*args,**kwargs):
return pd.MultiIndex.from_tuples([*fn(*args,**kwargs)],names=names)
return functools.update_wrapper(wrapper,fn)
return dec
# Decorates a generator function that yields k
def pd_index(name=None):
def dec(fn):
def wrapper(*args,**kwargs):
return pd.Index([*fn(*args,**kwargs)],name=name)
return functools.update_wrapper(wrapper,fn)
return dec
# ============================================ Joins ============================================ #
# decorates either a generator function that yields dataframes, or an iterable containing dataframes.
def pd_concat(axis=0,**catargs):
def dec(fn):
def wrapper(*args,**kwargs):
return pd.concat([*fn(*args,**kwargs)],axis=axis,**catargs)
return functools.update_wrapper(wrapper,fn)
return dec
# ============================================ Transforms ============================================ #
# decorates a function that reindexes dataframes
def pd_reindex(name=None):
def dec(fn):
def wrapper(df):
inx = pd.Index([*map(fn,df.index)],name=(name if name!=None else df.index.names if df.index.ndim>1 else df.index.name))
return pd.DataFrame(df.values,index=inx,columns=df.columns)
return wrapper
return dec
# decorates a function that transforms both the index values and column values of an inputted dataframe
def pd_transform(inx=None,col=None):
def dec(fn):
def wrapper(df,*args,**kwargs):
i,d = [[*x] for x in zip(*fn(df,*args,**kwargs))]
index = pd.Index(i,name=(inx if inx!=None else df.index.names if df.index.ndim>1 else df.index.name))
return pd.DataFrame(d,index,columns=(col if col!=None else df.columns))
return wrapper
return dec
# ============================================ GroupBy ============================================ #
def pd_groupby_agg(by,columns=None):
def dec(fn):
if inspect.isgeneratorfunction(fn):
def wrapper(df,*args,**kwargs):
i,d = [[*x] for x in zip(*(a for b in (((g,r) for r in fn(data,*args,**kwargs)) for g,data in df.groupby(by)) for a in b))]
inx = pd.Index(i,name=by) if type(by) == str else pd.MultiIndex.from_tuples(i,names=by)
return pd.DataFrame(d,inx,columns=columns)
return wrapper
else:
def wrapper(df,*args,**kwargs):
i,d = [[*x] for x in zip(*((g,fn(data,*args,**kwargs)) for g,data in df.groupby(by)))]
inx = pd.Index(i,name=by) if type(by) == str else pd.MultiIndex.from_tuples(i,names=by)
return | pd.DataFrame(d,inx,columns=columns) | pandas.DataFrame |
#!/usr/bin/env python3
import argparse
from collections import Counter
from itertools import combinations
from math import lgamma, log, factorial
import numpy as np
import operator
import os
import pandas as pd
from functools import reduce
import sys
import time
import warnings
###############################
##### AUXILIARY FUNCTIONS #####
###############################
def nCr(n, r):
"""
Returns the number of combinations of r elements out a total of n.
"""
f = factorial
return f(n) // f(r) // f(n-r)
def lg(n,x):
return lgamma(n+x) - lgamma(x)
def onepositive(dist):
"""
True if there only one positive count in `dist`.
"""
npos = 0
for n in dist:
if n > 0:
npos += 1
if npos > 1:
return False
return True
def ml(dist):
"""
Compute the maximum likelihood given full instantiation counts in dist.
"""
tot = sum(dist)
res = 0.0
for n in dist:
if n > 0:
res += n*log(n/tot)
return res
def ml_sum(distss):
"""
Compute the total maximum likelihood of the full instantiation counts
in distss.
"""
res = 0.0
for dists in distss:
res += sum([ml(d) for d in dists])
return res
###############################
####### BOUND FUNCTIONS #######
###############################
def diffa(dist, alpha, r):
"""
Compute the derivative of local-local BDeu score.
"""
res = 0.0
for n in dist:
for i in range(n):
res += 1.0/(i*r+alpha)
for i in range(sum(dist)):
res -= 1.0/(i+alpha)
return res
def g(dist, aq):
"""
Compute function g (Lemma 5) for a given full parent isntantiation.
Parameters
----------
dists: list ints
Counts of the child variable for a given full parent instantiation.
aq: float
Equivalent sample size divided by the product of parents arities.
"""
res = log(2*min(dist)/aq + 1)
for d in dist:
res += - log(2*d/aq + 1)
return res
def h(dist, alpha, r):
"""
Computes function h (Lemma 8).
"""
res = -lg(sum(dist), alpha)
alphar = alpha/r
for n in dist:
if n > 0:
res += lg(n, alphar)
return res
def ubh_js(dists, alpha, r, counters=None):
"""
Compute bound h for each instantiation js of set of parents S.
See Theorem 3 for the definition of the bound.
Parameters
----------
dists: list of lists
Counts of the child variable for each full parent instantiation j
that is compatible with js.
alpha: float
The equivalent sample size (ESS).
r: int
Arity (number of possible values) of the child variable.
counters: dict (optional)
Dictionary used to store the number of times each function
(ml, f + g, h) was the minimum in the last part of the equation in
Theorem 3.
Returns
-------
Upper bound h for a given isntantiation of parent set S.
"""
is_g, is_h, is_ml = 0, 0, 1
mls = 0.0
best_diff = 0.0
for dist in dists:
ml_j = ml(dist)
mls += ml_j
ubg_plus_f = -len(dist)*log(r) + g(dist, alpha)
iffirst_ub = min(ubg_plus_f, ml_j)
ubh = 0
if not onepositive(dist) and diffa(dist, alpha, r) >= 0 and alpha <= 1:
ubh = h(dist, alpha/2, r)
iffirst_ub = min(iffirst_ub, ubh)
diff = iffirst_ub - ml_j
if diff < best_diff:
best_diff = diff
is_g, is_h, is_ml = 0, 0, 0
if iffirst_ub == ubg_plus_f:
is_g = 1
if iffirst_ub == ubh:
is_h = 1
if counters is not None:
counters['inner_ml'] += is_ml
counters['inner_g'] += is_g
counters['inner_h'] += is_h
counters['inner_total'] += 1
return best_diff + mls
###############################
######### DATA CLASS ##########
### main code ###
###############################
class Data:
"""
A dataset of complete discrete data.
This class holds all the information used during the experiments.
"""
def __init__(self, data, name):
""""
Attributes
----------
data: pandas dataframe or path to csv file.
The data to be used for the experiments.
name: str
Name used to save results (usually matching dataset name).
It is assumed that:
1. All values are separated by whitespace
2. Comment lines start with a '#'
3. The first line is a header stating the names of the variables
4. The second line states the arities of the variables
5. All other lines contain the actual data
"""
if isinstance(data, pd.DataFrame) == False:
data = pd.read_csv(data,
delim_whitespace=True,
comment='#')
arities = [int(x) for x in data.iloc[0]]
self._name = name
self._data = data[1:]
self._arities = dict(zip(list(self._data), arities))
self._variables = list(data.columns)
self._varidx = {}
# Initialize all the counters to zero
self.counters = {}
self.reset_counters()
for i, v in enumerate(self._variables):
self._varidx[v] = i
self.get_atoms()
def reset_counters(self):
"""
There are a number of counters to keep track of the number of
scores and bounds computed. This function resets them to zero.
"""
# 'min' counters are used to keep track the number of times each of
# bound g and h is the tightest.
self.counters['min_ubg'] = 0
self.counters['min_ubh'] = 0
# 'inner' counters are used inside bound h. See ubh_js function in
# utils.py or Theorem 3 in the paper.
self.counters['inner_ml'] = 0
self.counters['inner_g'] = 0
self.counters['inner_h'] = 0
self.counters['inner_total'] = 0
def upper_bound_f(self, child, posfamilyinsts):
"""
Compute a weak upper bound on supersets of a given parent set.
Parameters
----------
child: int
Index of the child of the family.
posfamilyinsts: int
The number of instantiations of the family which occur at least
once in the data
Returns
-------
Upper bound h (float).
"""
return -posfamilyinsts * log(self._arities[child])
def upper_bound_g(self, child, parents, aq, posfamilyinsts, atoms_for_parents):
"""
Compute an upper bound on supersets of parents
Parameters
----------
child: int
Index of the child of the family.
parents: list
The parents of the family (an iterable of indices)
aq: float
Equivalent sample size divided by the product of parents arities.
posfamilyinsts: int
The number of instantiations of the family which occur at least
once in the data
atoms_for_parents: dict
For each instantiation of `parents` (keys in the dictionary), a
list of list of counts the child variable. Each of the inner
lists corresponds to a full instantion of all variables (but
the child) that is compatible with the instantiation of the
parents in the key. See atoms_for_parents function.
Returns
-------
Upper bound g (float).
"""
m_final = 0
for dists in atoms_for_parents:
pens = []
# Compute g for each full instantiation.
for dist in dists:
pens.append(g(dist, aq))
m_min = min(pens)
m_final += m_min
if len(pens) > 1:
pens[pens.index(m_min)] = float('inf')
m_final += min(pens)
return -posfamilyinsts*log(self._arities[child]) + m_final
def upper_bound_h(self, child, parents, alpha, atoms_for_parents):
"""
Compute an upper bound on supersets of parents.
Parameters
----------
child: int
Index of the child of the family.
parents: list
The parents of the family (an iterable of indices)
alpha: float
Equivalent sample size.
atoms_for_parents: dict
For each instantiation of `parents` (keys in the dictionary), a
list of list of counts the child variable. Each of the inner
lists corresponds to a full instantion of all variables (but
the child) that is compatible with the instantiation of the
parents in the key. See atoms_for_parents function.
Returns
-------
Upper bound h (float).
"""
for pa in parents:
alpha /= self._arities[pa]
r = self._arities[child]
this_ub = 0.0
# Compute ubh for each instantiation of parent set S
for dists in atoms_for_parents:
this_ub += ubh_js(dists, alpha, r, self.counters)
return this_ub
def upper_bound_min_min(self, child, parents, aq, counts, atoms_for_parents):
"""
Returns the best (min) of the two bounds (g and h).
Parameters
----------
child: int
Index of the child of the family.
parents: list
The parents of the family (an iterable of indices)
aq: float
Equivalent sample size divided by the product of parents arities.
counts: pandas series
The counts for each of the full instantiations.
(Only the number of full instantations is actually needed).
atoms_for_parents: dict
For each instantiation of `parents` (keys in the dictionary), a
list of list of counts the child variable. Each of the inner
lists corresponds to a full instantion of all variables (but
the child) that is compatible with the instantiation of the
parents in the key. See atoms_for_parents function.
Returns
-------
Upper bound min(g, h) (float).
"""
r = self._arities[child]
this_ub = 0.0
m_final = 0
for child_counts in atoms_for_parents:
# Upper bound h
this_ub += ubh_js(child_counts, aq, r)
# Upper bound g
pens = []
for cc in child_counts:
pen = + log(2*min(cc)/aq + 1)
for c in cc:
pen += - log(2*c/aq + 1)
pens.append(pen)
m_min = min(pens)
m_final += m_min
if len(pens) > 1:
pens[pens.index(m_min)] = float('inf')
m_final += min(pens)
ubg = -len(counts)*log(self._arities[child]) + m_final
if this_ub < ubg:
self.counters['min_ubh'] += 1
elif this_ub > ubg:
self.counters['min_ubg'] += 1
else:
self.counters['min_ubh'] += 1
self.counters['min_ubg'] += 1
return min(this_ub, -len(counts)*log(self._arities[child]) + m_final)
def bdeu_score(self, child, parents, alpha=None, bound=None):
"""
Computes the (local) score of a given child and a parent set.
Parameters
----------
child: int
Index of the child of the family.
parents: list
The parents of the family (an iterable of indices)
alpha: float
Equivalent sample size.
Returns
-------
A tuple (score, ubs) where
- score is the BDeu score of a particular child and parent set
- ubs is a dictionary of mapping the names of upper bounds to
upper bounds on the BDeu scores of supersets of the parent set.
"""
if alpha is None:
alpha = 1.0
warnings.warn('ESS (alpha) not defined. Defaulting to alpha=1.0.')
aq = alpha
for parent in parents:
aq /= self._arities[parent]
aqr = aq / self._arities[child]
counts = self._data.groupby(list(parents)+[child], sort=True).size()
posfamilyinsts = len(counts)
bdeu_score = 0.0
if len(parents) == 0:
nij = 0
for nijk in counts:
bdeu_score += lg(nijk,aqr)
nij += nijk
bdeu_score -= lg(nij,aq)
else:
cnt = Counter()
for idx, nijk in counts.iteritems():
cnt[idx[:-1]] += nijk
bdeu_score += lg(nijk,aqr)
for nij in cnt.values():
bdeu_score -= lg(nij,aq)
atoms_for_parents = self.atoms_for_parents(child, parents).values()
if bound == 'f':
bounds = {'f': self.upper_bound_f(child, posfamilyinsts)}
elif bound == 'g':
bounds = {'g': self.upper_bound_g(child, parents, aq, posfamilyinsts, atoms_for_parents)}
elif bound == 'h':
bounds = {'h': self.upper_bound_h(child, parents, alpha, atoms_for_parents)}
elif bound == 'min':
bounds = {'min': self.upper_bound_min_min(child, parents, aq, counts, atoms_for_parents)}
elif bound == 'all':
bounds = {'f': self.upper_bound_f(child, posfamilyinsts),
'g': self.upper_bound_g(child, parents, aq, posfamilyinsts, atoms_for_parents),
'h': self.upper_bound_h(child, parents, alpha, atoms_for_parents),
'min': self.upper_bound_min_min(child, parents, aq, counts, atoms_for_parents)}
elif bound is None:
return bdeu_score
return bdeu_score, bounds
def pen_ll_score(self, child, parents, pen_type):
"""
Returns a the AIC score of a particular child and parent set
Parameters
----------
child: int
Index of the child of the family.
parents: list
The parents of the family (an iterable of indices)
pen_type: str or float
Either a type of score ('BIC' or 'AIC') or a penalisation
coefficient.
"""
counts = self._data.groupby(list(parents)+[child],sort=True).size()
posfamilyinsts = len(counts)
LL = 0
if len(parents) == 0:
nij = counts.sum()
for nijk in counts:
LL += nijk*np.log(nijk/nij)
pen = (self._arities[child] -1)
else:
cnt = Counter()
# Compute nij for each parent configuration
for idx, nijk in counts.iteritems():
cnt[idx[:-1]] += nijk
# Compute the loglikelihood
for idx, nijk in counts.iteritems():
LL += nijk*np.log(nijk/cnt[idx[:-1]])
# Compute the penalization for AIC
pen = 1
for parent in parents:
pen *= self._arities[parent]
pen *= self._arities[child] -1
if pen_type == 'AIC':
score = LL - pen
elif pen_type == 'BIC':
pen *= 0.5*np.log(counts.sum())
score = LL - pen
elif isinstance(pen_type, (int, float)):
score = LL - pen_type*pen
else:
Exception(pen_type + ' is not supported yet. Please use BIC or AIC.')
return score
def all_bdeu_scores(self, alpha=None, palim=None, bound=None, filepath=None):
"""
Exhaustively compute all BDeu scores and upper bounds for all families
up to `palim`
Parameters
----------
child: int
Index of the child of the family.
alpha: float
Equivalent sample size.
palim: int
The maximum number of parents.
bound: str
The bound to compute. Either 'f', 'g', 'h', 'min'.
If bound == 'all' computes all bounds.
filepath: str
Path to file where to save the scores. If left to None, the scores
are not saved.
Returns
-------
score_dict: dict
A dictionary dkt where dkt[child][parents] = bdeu_score
"""
if palim is None:
palim = 3
warnings.warn('Maximum number of parents (palim) not defined. Defaulting to palim=3.')
if alpha is None:
alpha = 1.0
warnings.warn('ESS (alpha) not defined. Defaulting to alpha=1.0.')
score_dict = {}
for i, child in enumerate(self._variables):
potential_parents = frozenset(self._variables[:i]+self._variables[i+1:])
child_dkt = {}
for pasize in range(palim+1):
for parents in combinations(potential_parents,pasize):
child_dkt[frozenset(parents)] = self.bdeu_score(child,parents,alpha,bound=bound)
score_dict[child] = child_dkt
if filepath is not None:
self.write_scores(filepath, score_dict)
return score_dict
def all_pen_ll_scores(self, score_type, filepath=None, palim=None):
"""
Exhaustively compute all BDeu scores and upper bounds for all families
up to `palim`
Parameters
----------
score_type: str or float
Either a type of score ('BIC' or 'AIC') or a penalisation
coefficient.
filepath: str
Path to file where to save the scores. If left to None, the scores
are not saved.
palim: int
Maximum number of parents.
Returns
-------
score_dict: dict
A dictionary dkt where dkt[child][parents] = bdeu_score
"""
if palim is None:
palim = 3
warnings.warn('Maximum number of parents (palim) not defined. Defaulting to palim=3.')
score_dict = {}
for i, child in enumerate(self._variables):
potential_parents = frozenset(self._variables[:i]+self._variables[i+1:])
child_dkt = {}
for pasize in range(palim+1):
for parents in combinations(potential_parents,pasize):
child_dkt[frozenset(parents)] = self.pen_ll_score(child, parents, score_type)
score_dict[child] = child_dkt
if filepath is not None:
self.write_scores(filepath, score_dict)
return score_dict
def write_scores(self, filepath, score_dict):
"""
Saves a dictionary of scores to filepath.
See all_pen_ll_scores or all_bdeu_scores.
"""
score_info = '{}\n'.format(len(self._variables))
for child, parents in score_dict.items():
score_info += child + ' {}\n'.format(len(score_dict[child]))
for parent, score in parents.items():
score_info += str(score) + ' ' + str(len(parent)) + ' ' + ' '.join(parent) + '\n'
with open(filepath, 'w') as w:
w.write(score_info)
def atoms_for_parents(self, child, parents):
"""
Return a dictionary whose keys are instantiations of `parents`
with positive counts in the data and whose values are lists of lists
of child counts.
Parameters
----------
child: int
The (index of) child variable.
parents: list of ints
The list of indices of the parent variables.
Returns
-------
dkt: dict
[parents instantiations] = [[child counts full intantiation 1],
[child counts full intantiation 2],
...
[child_counts full intantiation n]]
Example
-------
If dkt is the returned dictionary and dkt[0,1,0] = [[1,2], [0,4]],
then there are 3 variables in `parents` and there are 2 full parent
instantiations for the instantiation (0,1,0): one with child counts
[1,2] and one with child counts [0,4].
A full instantiation means that all variables (but the child) have
a value assigned to them. The full instantatiations in each key are
the ones compatible with the corresponding instantiation of `parents`
in that key. In the example, if we have 4 variables (plus the child)
that means there are two possible instantiation of the 4th variable:
one where the child is distributed as [1, 2], and other where it is
distributed as [0, 4]. The 4th variable might have more than 2
states, but those are not observed (zero counts) in this example.
"""
# Get indices of parents in vector of all possible parents for child
child_idx = self._varidx[child]
parentset = frozenset(parents)
pa_idxs = []
for i, parent in enumerate(self._variables[:child_idx]+self._variables[child_idx+1:]):
if parent in parentset:
pa_idxs.append(i)
# As we open the tree of variables following the index order, we only
# look at full instantations of parents and variables of higher index.
upper_pa_idxs = list(range(max(pa_idxs + [-1]) + 1, len(self._variables)-1))
upper_dkt = {}
for fullinst, childcounts in self._atoms[child].items():
inst = tuple([fullinst[i] for i in pa_idxs + upper_pa_idxs])
try:
upper_dkt[inst] = list(np.array(upper_dkt[inst]) + np.array(childcounts))
except KeyError:
upper_dkt[inst] = childcounts
# The counts for instantations that differ only on variables of lower
# index can be safely summed to improve the bounds.
dkt = {}
posfamilyinsts = 0
for fullinst, childcounts in upper_dkt.items():
inst = tuple([fullinst[i] for i in range(len(pa_idxs))])
# In this step, we have to remove the zero counts!
non_zeros = [x for x in childcounts if x>0]
posfamilyinsts += len(non_zeros)
try:
dkt[inst].append(non_zeros)
except KeyError:
dkt[inst] = [non_zeros]
return dkt
def get_atoms(self):
"""
Compute a dictionary whose keys are child variables and whose values
are dictionaries mapping instantiations of all the other parents to a
list of counts for the child variable for that instantiation.
Only parent set instantations with a positive count in the data are
included.
The dictionary is stored as the value of self._atoms
"""
# Create the counts as a pandas DataFrame with a new column 'counts'
counts = pd.DataFrame({'counts' : self._data.groupby(self._variables).size()}).reset_index()
# Save the counts inside a list to facilitate concatenation
listfy = lambda x : [x]
counts['counts'] = counts['counts'].apply(listfy)
dktall = {}
for child in self._variables:
all_but_child = [var for var in self._variables if var != child]
# The sum operation concatenate the lists of counts
# for rows that differ only on the child variable
# The unstack operation fill in the full instantations
# which do not have all possible values of child in the data
# so that we can keep the zeros in place
child_counts = counts.groupby(by=self._variables).agg({'counts': 'sum'}).unstack(child, fill_value=[0]).stack().reset_index()
child_counts = child_counts.groupby(by=all_but_child).agg({'counts': 'sum'}).reset_index()
dkt_child = child_counts.set_index(all_but_child).to_dict('index')
for cc in dkt_child:
dkt_child[cc] = dkt_child[cc]['counts']
dktall[child] = dkt_child
self._atoms = dktall
def pruned_bdeu_scores_per_child(self, child, bound, timeout, alpha=1.0, palim=None, verbose=False, save_step=False):
"""
Return a dictionary for the child variable mapping parent sets to
BDeu scores.
Not all parent sets are included. Only those parent set of cardinality
at most `palim` can be included. Also, if it can be established that
a parent set can not be a parent set for the child in an optimal Bayesian
network, then it is not included.
Also, outputs a pandas DataFrame with the number of scores computed.
The DataFrame is saved to memory every iteration over palim so not to
miss results if the process is terminated.
Parameters
----------
child: int
The (index of) child variable.
bound: str
The type of bound to use.
timeout: int
The maximum amount of time the function has to run (secs).
alpha: float
The effective sample size (prior parameter).
palim: int
The maximum number of parents.
verbose: boolean
Whether messages on progress should be printed.
save_step: boolean
Whether to save a csv per child
"""
if palim is None:
palim = 3
warnings.warn('Maximum number of parents (palim) not defined. Defaulting to palim=3.')
if bound == 'h':
scores_per_palim = pd.DataFrame(columns=['child', 'palim', 'alpha', 'all_scores', 'n_scores_' + bound, 'time_' + bound, 'inf_n_scores', 'inner_ml', 'inner_g', 'inner_h', 'inner_total', 'best_pa'])
elif bound == 'min':
scores_per_palim = pd.DataFrame(columns=['child', 'palim', 'alpha', 'all_scores', 'n_scores_' + bound, 'time_' + bound, 'inf_n_scores', 'min_ubg', 'min_ubh', 'best_pa'])
else:
scores_per_palim = | pd.DataFrame(columns=['child', 'palim', 'alpha', 'all_scores', 'n_scores_' + bound, 'time_' + bound, 'inf_n_scores', 'best_pa']) | pandas.DataFrame |
#! /usr/bin/env python3
import pandas as pd
import os
from steves_utils.summary_utils import (
get_experiments_from_path
)
from steves_utils.utils_v2 import (
get_experiments_base_path
)
class tuned_1v2_Helper:
def __init__(self, series_path = os.path.join(get_experiments_base_path(), "tuned_1v2")):
self.independent_vars = ["Dataset", "x_transform"]
self.series_path = series_path
self.series_name = "tuned_1v2"
def get_all_trials(self):
# raise Exception("Whatever")
all_trials = []
trials = get_experiments_from_path(
self.series_path
)
for t in trials:
t["series_name"] = self.series_name
for key, value in t["parameters"].items():
t[key] = value
del t["parameters"]
for key, value in t["results"].items():
t[key] = value
del t["results"]
for key, value in t.items():
if type(value) == list:
t[key] = tuple(value)
all_trials.append(t)
p = | pd.DataFrame(all_trials) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
assert the_union is self.index
the_union = self.index.union(self.index[:0])
assert the_union is self.index
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = self.index.union(other)
# assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
assert the_int is self.index
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
# assert result.equals(tuples)
def test_sub(self):
first = self.index
# - now raises (previously was set op difference)
with pytest.raises(TypeError):
first - self.index[-3:]
with pytest.raises(TypeError):
self.index[-3:] - first
with pytest.raises(TypeError):
self.index[-3:] - first.tolist()
with pytest.raises(TypeError):
first.tolist() - self.index[-3:]
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '
'from empty list',
MultiIndex.from_tuples, [])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator(self):
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
with tm.assert_raises_regex(
TypeError, 'Input must be a list / sequence of tuple-likes.'):
MultiIndex.from_tuples(0)
def test_from_tuples_empty(self):
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_argsort(self):
result = self.index.argsort()
expected = self.index.values.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
assert sorted_idx.equals(mi)
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop('foo')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([('bar', 'two')])
pytest.raises(KeyError, self.index.drop, [('bar', 'two')])
pytest.raises(KeyError, self.index.drop, index)
pytest.raises(KeyError, self.index.drop, ['foo', 'two'])
# partially correct argument
mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])
pytest.raises(KeyError, self.index.drop, mixed_index)
# error='ignore'
dropped = self.index.drop(index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(['foo', 'two'], errors='ignore')
expected = self.index[[2, 3, 4, 5]]
| tm.assert_index_equal(dropped, expected) | pandas.util.testing.assert_index_equal |
import pandas as pd
import numpy as np
from datetime import date
"""
dataset split:
(date_received)
dateset3: 20160701~20160731 (113640),features3 from 20160315~20160630 (off_test)
dateset2: 20160515~20160615 (258446),features2 from 20160201~20160514
dateset1: 20160414~20160514 (138303),features1 from 20160101~20160413
1.merchant related:
sales_use_coupon. total_coupon
transfer_rate = sales_use_coupon/total_coupon.
merchant_avg_distance,merchant_min_distance,merchant_max_distance of those use coupon
total_sales. coupon_rate = sales_use_coupon/total_sales.
2.coupon related:
discount_rate. discount_man. discount_jian. is_man_jian
day_of_week,day_of_month. (date_received)
3.user related:
distance.
user_avg_distance, user_min_distance,user_max_distance.
buy_use_coupon. buy_total. coupon_received.
buy_use_coupon/coupon_received.
avg_diff_date_datereceived. min_diff_date_datereceived. max_diff_date_datereceived.
count_merchant.
4.user_merchant:
times_user_buy_merchant_before.
5. other feature:
this_month_user_receive_all_coupon_count
this_month_user_receive_same_coupon_count
this_month_user_receive_same_coupon_lastone
this_month_user_receive_same_coupon_firstone
this_day_user_receive_all_coupon_count
this_day_user_receive_same_coupon_count
day_gap_before, day_gap_after (receive the same coupon)
"""
#1754884 record,1053282 with coupon_id,9738 coupon. date_received:20160101~20160615,date:20160101~20160630, 539438 users, 8415 merchants
off_train = pd.read_csv('data/ccf_offline_stage1_train.csv',header=None)
off_train.columns = ['user_id','merchant_id','coupon_id','discount_rate','distance','date_received','date']
#2050 coupon_id. date_received:20160701~20160731, 76309 users(76307 in trainset, 35965 in online_trainset), 1559 merchants(1558 in trainset)
off_test = pd.read_csv('data/ccf_offline_stage1_test_revised.csv',header=None)
off_test.columns = ['user_id','merchant_id','coupon_id','discount_rate','distance','date_received']
#11429826 record(872357 with coupon_id),762858 user(267448 in off_train)
on_train = pd.read_csv('data/ccf_online_stage1_train.csv',header=None)
on_train.columns = ['user_id','merchant_id','action','coupon_id','discount_rate','date_received','date']
dataset3 = off_test
feature3 = off_train[((off_train.date>='20160315')&(off_train.date<='20160630'))|((off_train.date=='null')&(off_train.date_received>='20160315')&(off_train.date_received<='20160630'))]
dataset2 = off_train[(off_train.date_received>='20160515')&(off_train.date_received<='20160615')]
feature2 = off_train[(off_train.date>='20160201')&(off_train.date<='20160514')|((off_train.date=='null')&(off_train.date_received>='20160201')&(off_train.date_received<='20160514'))]
dataset1 = off_train[(off_train.date_received>='20160414')&(off_train.date_received<='20160514')]
feature1 = off_train[(off_train.date>='20160101')&(off_train.date<='20160413')|((off_train.date=='null')&(off_train.date_received>='20160101')&(off_train.date_received<='20160413'))]
############# other feature ##################3
"""
5. other feature:
this_month_user_receive_all_coupon_count
this_month_user_receive_same_coupon_count
this_month_user_receive_same_coupon_lastone
this_month_user_receive_same_coupon_firstone
this_day_user_receive_all_coupon_count
this_day_user_receive_same_coupon_count
day_gap_before, day_gap_after (receive the same coupon)
"""
#for dataset3
t = dataset3[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset3[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset3[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset3[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset3[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset3[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset3[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset3[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature3 = pd.merge(t1,t,on='user_id')
other_feature3 = pd.merge(other_feature3,t3,on=['user_id','coupon_id'])
other_feature3 = pd.merge(other_feature3,t4,on=['user_id','date_received'])
other_feature3 = pd.merge(other_feature3,t5,on=['user_id','coupon_id','date_received'])
other_feature3 = pd.merge(other_feature3,t7,on=['user_id','coupon_id','date_received'])
other_feature3.to_csv('data/other_feature3.csv',index=None)
print(other_feature3.shape)
#for dataset2
t = dataset2[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset2[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset2[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset2[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received.astype('int')
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received.astype('int') - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset2[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset2[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset2[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset2[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature2 = pd.merge(t1,t,on='user_id')
other_feature2 = pd.merge(other_feature2,t3,on=['user_id','coupon_id'])
other_feature2 = pd.merge(other_feature2,t4,on=['user_id','date_received'])
other_feature2 = pd.merge(other_feature2,t5,on=['user_id','coupon_id','date_received'])
other_feature2 = pd.merge(other_feature2,t7,on=['user_id','coupon_id','date_received'])
other_feature2.to_csv('data/other_feature2.csv',index=None)
print(other_feature2.shape)
#for dataset1
t = dataset1[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset1[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset1[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset1[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received.astype('int')
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received.astype('int') - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset1[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset1[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset1[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset1[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature1 = pd.merge(t1,t,on='user_id')
other_feature1 = pd.merge(other_feature1,t3,on=['user_id','coupon_id'])
other_feature1 = pd.merge(other_feature1,t4,on=['user_id','date_received'])
other_feature1 = pd.merge(other_feature1,t5,on=['user_id','coupon_id','date_received'])
other_feature1 = pd.merge(other_feature1,t7,on=['user_id','coupon_id','date_received'])
other_feature1.to_csv('data/other_feature1.csv',index=None)
print(other_feature1.shape)
############# coupon related feature #############
"""
2.coupon related:
discount_rate. discount_man. discount_jian. is_man_jian
day_of_week,day_of_month. (date_received)
"""
def calc_discount_rate(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return float(s[0])
else:
return 1.0-float(s[1])/float(s[0])
def get_discount_man(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return 'null'
else:
return int(s[0])
def get_discount_jian(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return 'null'
else:
return int(s[1])
def is_man_jian(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return 0
else:
return 1
#dataset3
dataset3['day_of_week'] = dataset3.date_received.astype('str').apply(lambda x:date(int(x[0:4]),int(x[4:6]),int(x[6:8])).weekday()+1)
dataset3['day_of_month'] = dataset3.date_received.astype('str').apply(lambda x:int(x[6:8]))
dataset3['days_distance'] = dataset3.date_received.astype('str').apply(lambda x:(date(int(x[0:4]),int(x[4:6]),int(x[6:8]))-date(2016,6,30)).days)
dataset3['discount_man'] = dataset3.discount_rate.apply(get_discount_man)
dataset3['discount_jian'] = dataset3.discount_rate.apply(get_discount_jian)
dataset3['is_man_jian'] = dataset3.discount_rate.apply(is_man_jian)
dataset3['discount_rate'] = dataset3.discount_rate.apply(calc_discount_rate)
d = dataset3[['coupon_id']]
d['coupon_count'] = 1
d = d.groupby('coupon_id').agg('sum').reset_index()
dataset3 = pd.merge(dataset3,d,on='coupon_id',how='left')
dataset3.to_csv('data/coupon3_feature.csv',index=None)
#dataset2
dataset2['day_of_week'] = dataset2.date_received.astype('str').apply(lambda x:date(int(x[0:4]),int(x[4:6]),int(x[6:8])).weekday()+1)
dataset2['day_of_month'] = dataset2.date_received.astype('str').apply(lambda x:int(x[6:8]))
dataset2['days_distance'] = dataset2.date_received.astype('str').apply(lambda x:(date(int(x[0:4]),int(x[4:6]),int(x[6:8]))-date(2016,5,14)).days)
dataset2['discount_man'] = dataset2.discount_rate.apply(get_discount_man)
dataset2['discount_jian'] = dataset2.discount_rate.apply(get_discount_jian)
dataset2['is_man_jian'] = dataset2.discount_rate.apply(is_man_jian)
dataset2['discount_rate'] = dataset2.discount_rate.apply(calc_discount_rate)
d = dataset2[['coupon_id']]
d['coupon_count'] = 1
d = d.groupby('coupon_id').agg('sum').reset_index()
dataset2 = pd.merge(dataset2,d,on='coupon_id',how='left')
dataset2.to_csv('data/coupon2_feature.csv',index=None)
#dataset1
dataset1['day_of_week'] = dataset1.date_received.astype('str').apply(lambda x:date(int(x[0:4]),int(x[4:6]),int(x[6:8])).weekday()+1)
dataset1['day_of_month'] = dataset1.date_received.astype('str').apply(lambda x:int(x[6:8]))
dataset1['days_distance'] = dataset1.date_received.astype('str').apply(lambda x:(date(int(x[0:4]),int(x[4:6]),int(x[6:8]))-date(2016,4,13)).days)
dataset1['discount_man'] = dataset1.discount_rate.apply(get_discount_man)
dataset1['discount_jian'] = dataset1.discount_rate.apply(get_discount_jian)
dataset1['is_man_jian'] = dataset1.discount_rate.apply(is_man_jian)
dataset1['discount_rate'] = dataset1.discount_rate.apply(calc_discount_rate)
d = dataset1[['coupon_id']]
d['coupon_count'] = 1
d = d.groupby('coupon_id').agg('sum').reset_index()
dataset1 = pd.merge(dataset1,d,on='coupon_id',how='left')
dataset1.to_csv('data/coupon1_feature.csv',index=None)
############# merchant related feature #############
"""
1.merchant related:
total_sales. sales_use_coupon. total_coupon
coupon_rate = sales_use_coupon/total_sales.
transfer_rate = sales_use_coupon/total_coupon.
merchant_avg_distance,merchant_min_distance,merchant_max_distance of those use coupon
"""
#for dataset3
merchant3 = feature3[['merchant_id','coupon_id','distance','date_received','date']]
t = merchant3[['merchant_id']]
t.drop_duplicates(inplace=True)
t1 = merchant3[merchant3.date!='null'][['merchant_id']]
t1['total_sales'] = 1
t1 = t1.groupby('merchant_id').agg('sum').reset_index()
t2 = merchant3[(merchant3.date!='null')&(merchant3.coupon_id!='null')][['merchant_id']]
t2['sales_use_coupon'] = 1
t2 = t2.groupby('merchant_id').agg('sum').reset_index()
t3 = merchant3[merchant3.coupon_id!='null'][['merchant_id']]
t3['total_coupon'] = 1
t3 = t3.groupby('merchant_id').agg('sum').reset_index()
t4 = merchant3[(merchant3.date!='null')&(merchant3.coupon_id!='null')][['merchant_id','distance']]
t4.replace('null',-1,inplace=True)
t4.distance = t4.distance.astype('int')
t4.replace(-1,np.nan,inplace=True)
t5 = t4.groupby('merchant_id').agg('min').reset_index()
t5.rename(columns={'distance':'merchant_min_distance'},inplace=True)
t6 = t4.groupby('merchant_id').agg('max').reset_index()
t6.rename(columns={'distance':'merchant_max_distance'},inplace=True)
t7 = t4.groupby('merchant_id').agg('mean').reset_index()
t7.rename(columns={'distance':'merchant_mean_distance'},inplace=True)
t8 = t4.groupby('merchant_id').agg('median').reset_index()
t8.rename(columns={'distance':'merchant_median_distance'},inplace=True)
merchant3_feature = pd.merge(t,t1,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t2,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t3,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t5,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t6,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t7,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t8,on='merchant_id',how='left')
merchant3_feature.sales_use_coupon = merchant3_feature.sales_use_coupon.replace(np.nan,0) #fillna with 0
merchant3_feature['merchant_coupon_transfer_rate'] = merchant3_feature.sales_use_coupon.astype('float') / merchant3_feature.total_coupon
merchant3_feature['coupon_rate'] = merchant3_feature.sales_use_coupon.astype('float') / merchant3_feature.total_sales
merchant3_feature.total_coupon = merchant3_feature.total_coupon.replace(np.nan,0) #fillna with 0
merchant3_feature.to_csv('data/merchant3_feature.csv',index=None)
#for dataset2
merchant2 = feature2[['merchant_id','coupon_id','distance','date_received','date']]
t = merchant2[['merchant_id']]
t.drop_duplicates(inplace=True)
t1 = merchant2[merchant2.date!='null'][['merchant_id']]
t1['total_sales'] = 1
t1 = t1.groupby('merchant_id').agg('sum').reset_index()
t2 = merchant2[(merchant2.date!='null')&(merchant2.coupon_id!='null')][['merchant_id']]
t2['sales_use_coupon'] = 1
t2 = t2.groupby('merchant_id').agg('sum').reset_index()
t3 = merchant2[merchant2.coupon_id!='null'][['merchant_id']]
t3['total_coupon'] = 1
t3 = t3.groupby('merchant_id').agg('sum').reset_index()
t4 = merchant2[(merchant2.date!='null')&(merchant2.coupon_id!='null')][['merchant_id','distance']]
t4.replace('null',-1,inplace=True)
t4.distance = t4.distance.astype('int')
t4.replace(-1,np.nan,inplace=True)
t5 = t4.groupby('merchant_id').agg('min').reset_index()
t5.rename(columns={'distance':'merchant_min_distance'},inplace=True)
t6 = t4.groupby('merchant_id').agg('max').reset_index()
t6.rename(columns={'distance':'merchant_max_distance'},inplace=True)
t7 = t4.groupby('merchant_id').agg('mean').reset_index()
t7.rename(columns={'distance':'merchant_mean_distance'},inplace=True)
t8 = t4.groupby('merchant_id').agg('median').reset_index()
t8.rename(columns={'distance':'merchant_median_distance'},inplace=True)
merchant2_feature = pd.merge(t,t1,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t2,on='merchant_id',how='left')
merchant2_feature = | pd.merge(merchant2_feature,t3,on='merchant_id',how='left') | pandas.merge |
import logging as log
import typing
from takco.linkedstring import LinkedString
try:
import pandas as pd
except:
log.error(f"Cannot import pandas")
@pd.api.extensions.register_dataframe_accessor("takco")
class TakcoAccessor:
def __init__(self, df):
self._df = df
self.provenance = {}
@staticmethod
def try_html(obj):
return obj._repr_html_() if hasattr(obj, "_repr_html_") else obj
@property
def head(self):
return tuple(zip(*list(self._df.columns)))
@classmethod
def from_header(cls, head):
columns = pd.MultiIndex.from_frame(pd.DataFrame(head).T)
return pd.DataFrame([], columns=columns)
@property
def style(self):
head = self._df.columns.to_frame().applymap(self.try_html)
head.insert(0, -1, range(len(self._df.columns)))
return pd.DataFrame(
self._df.applymap(self.try_html).values,
columns=pd.MultiIndex.from_arrays(head.values.T),
).style.set_table_styles(
[{"selector": f"thead tr:first-child", "props": [("display", "none")]}]
)
def highlight_cells(self, body=(), head=(), color=None, props=()):
props = props or [("background-color", color or "#ff0")]
st = self.style
for ci, ri in body:
st.table_styles.append(
{"selector": f".data.row{ri}.col{ci}", "props": props}
)
for ci, li in head:
st.table_styles.append(
{"selector": f".col_heading.level{li+1}.col{ci}", "props": props}
)
return st
def highlight_pivot(self, level, colfrom, colto, color=None, props=(), **kwargs):
head = [(c, level) for c in range(colfrom, colto + 1)]
return self.highlight_cells(head=head, color=color, props=props)
def to_html(self):
return self.style.render()
def _repr_html_(self):
return self._df.head().takco.to_html()
TABEL_PROBLEMS = [
"Error: This is not a valid number. Please refer to the documentation at for correct input.",
"[[|]]",
]
def get_tabel_rows(matrix, linked=True):
urltemplate = "http://{lang}.wikipedia.org/wiki/{page}"
newmatrix = []
for row in matrix:
newrow = []
for cell in row:
text = str(cell.get("text", "") or "")
for p in TABEL_PROBLEMS:
text = text.replace(p, "")
if linked:
links = []
for link in cell.get("surfaceLinks", []):
target = link.get("target")
if not target:
continue
start = link.get("offset", 0)
end = link.get("endOffset", len(text))
if link.get("linkType") in ["INTERNAL", "INTERNAL_RED"]:
try:
lang = target.get("language", "en")
page = target.setdefault("href", target.get("title", ""))
page = page.replace(" ", "_")
url = urltemplate.format(lang=lang, page=page)
if target.get("id", 0) > 0:
url += "?curid=" + str(target.get("id"))
except:
raise Exception(f"bad target {target}")
if start == 0 and end == 1:
end = len(text) # HACK
links.append((start, end, url))
newrow.append(LinkedString(text, links))
else:
newrow.append(text)
newmatrix.append(newrow)
return newmatrix
def to_tabel_rows(matrix):
return [
[
{
"text": getattr(c, "text", str(c)),
"surfaceLinks": [
{
"offset": start,
"endOffset": end,
"linkType": "INTERNAL",
"target": {"url": url, "title": url.split("/")[-1],},
}
for start, end, url in getattr(c, "links", [])
],
}
for c in row
]
for row in matrix
]
def from_tabel(obj, linked=True):
body = get_tabel_rows(obj.get("tableData", []), linked=linked)
head = get_tabel_rows(obj.get("tableHeaders", []), linked=linked)
try:
df = pd.DataFrame(body, columns=head or None)
except:
df = pd.DataFrame(body)
provenance = {}
for key in ["tableCaption", "sectionTitle", "pgTitle", "tableId", "pgId"]:
provenance[key] = obj.get(key)
df.attrs["provenance"] = provenance
return df
class Table(dict):
""" A takco table object
>>> Table(head=[['foo','bar']], body=[['1','2']]).head
(('foo', 'bar'),)
"""
_id: str
head: typing.Tuple[typing.Tuple[str, ...], ...]
body: typing.Tuple[typing.Tuple[str, ...], ...]
provenance: typing.Dict[str, typing.Any]
annotations: typing.Dict[str, typing.Any]
headerId: int
_old_keys = {
"_id": lambda self: self._id,
"tableData": lambda self: to_tabel_rows(self.body),
"tableHeaders": lambda self: to_tabel_rows(self.head),
"headerId": lambda self: self.headerId,
"numCols": lambda self: len(next(iter(self.body), ())),
"numDataRows": lambda self: len(self.body),
"numHeaderRows": lambda self: len(self.head),
"numericColumns": lambda self: [],
}
_default_annotations = ["entities", "properties", "classes"]
def __init__(
self, obj=None, _id=None, head=(), body=(), provenance=(), annotations=(),
linked=True
):
if isinstance(obj, Table):
_id = obj._id
head, body = obj.head, obj.body
provenance = dict(obj.provenance)
annotations = dict(obj.annotations)
for key in self._default_annotations:
if key in obj:
annotations[key] = obj.get(key)
for key in list(obj.keys()):
if (key not in self._old_keys) and (
key not in self._default_annotations
):
provenance[key] = obj.get(key)
elif obj is not None:
_id = obj.get("_id")
body = get_tabel_rows(obj.get("tableData", []), linked=linked)
head = get_tabel_rows(obj.get("tableHeaders", []), linked=linked)
annotations = {}
for key in self._default_annotations:
if key in obj:
annotations[key] = obj.get(key)
provenance = {}
for key in list(obj.keys()):
if (key not in self._old_keys) and (
key not in self._default_annotations
):
provenance[key] = obj.get(key)
self.head, self.body = tuple(map(tuple, head)), tuple(map(tuple, body))
self._id = _id or str(hash(self.head + self.body))
self.provenance = dict(provenance)
self.annotations = dict(annotations)
self.headerId = self.get_headerId(self.head)
@staticmethod
def get_headerId(header):
import hashlib
# header is a tuple of tuples.
header = tuple(map(tuple, header))
h = hashlib.sha224(str(header).encode()).hexdigest()
return int(h[:16], 16) // 2 # integer between 0 and SQLite MAX_VAL
def to_dict(self):
return {
**{k: self[k] for k in self._old_keys},
**self.provenance,
**self.annotations,
}
def __repr__(self):
return self.df.head(1).__repr__()
def _repr_html_(self):
return self.df.takco._repr_html_()
def __bool__(self):
return bool(self.body)
def __len__(self):
return len(self.body)
@property
def df(self):
columns = | pd.MultiIndex.from_arrays(self.head) | pandas.MultiIndex.from_arrays |
#some of these imports are extraneous and left over from the flask megatutorial
from flask import render_template, flash, redirect, url_for, request, Flask, jsonify, send_from_directory
from app import app, db, DataWizardTools, HousingToolBox
from app.models import User, Post
from app.forms import PostForm
from werkzeug.urls import url_parse
from datetime import datetime
from datetime import date
import pandas as pd
import os
@app.route("/WaiverMaker", methods=['GET', 'POST'])
def WaiverMaker():
#upload file from computer
if request.method == 'POST':
print(request.files['file'])
f = request.files['file']
#turn the excel file into a dataframe, but skip the top 2 rows if they are blank
test = pd.read_excel(f)
test.fillna('',inplace=True)
if test.iloc[0][0] == '':
df = pd.read_excel(f,skiprows=2)
print("Skipped top two rows")
else:
df = | pd.read_excel(f) | pandas.read_excel |
"""figures of merit is a collection of financial calculations for energy.
This module contains financial calculations based on solar power and batteries
in a given network. The networks used are defined as network objects (see evolve parsers).
TODO: Add inverters: Inverters are not considered at the moment and Improve Nan Handeling
"""
import numpy
import pandas as pd
from c3x.data_cleaning import unit_conversion
#Todo:
#Add inverters: Inverters are not considered at the moment
#Improve Nan Handeling
def meter_power(meas_dict: dict, meter: int, axis: int = 0, column: int = 0) -> pd.Series:
"""
calculates the power for a meter of individual measurement points
by summing load, solar and battery power
Args:
meas_dict (dict): dict with measurement for one or multiple nodes.
meter(int): Id for a meter
axis (int): how data is concatenated for results
column (int): column index to be used
return:
meter_p (pd.Series): combined power (solar, battery, load)
"""
meter_p = pd.DataFrame()
if meas_dict[meter]:
meter_p = pd.DataFrame()
for meas in meas_dict[meter]:
if 'load' in meas:
meter_p = pd.concat([meter_p, meas_dict[meter][meas].iloc[:,column]], axis=axis)
elif 'solar' in meas:
meter_p = pd.concat([meter_p, meas_dict[meter][meas].iloc[:,column]], axis=axis)
elif 'batteries' in meas:
meter_p = pd.concat([meter_p, meas_dict[meter][meas].iloc[:,column]], axis=axis)
meter_p = meter_p.sum(axis=1)
return meter_p
def financial(meter_p: pd.Series, import_tariff: pd.Series, export_tariff: pd.Series) -> pd.Series:
"""
Evaluate the financial outcome for a customer.
A conversion from kW to kWh is handled internally
Note: assumes constant step size in timestamps (use forth index beforehand)
Args:
meter_p (pd.Series ): Power of a node
import_tariff (pd.Series): Expects this to be in $/kWh.
export_tariff (pd.Series): Expects this to be in $/kWh.
Returns:
cost (pd.Series): cost per measurement point, using import and export tariffs
"""
# Note: need to ensure meter data is converted to kWh
timestep = numpy.timedelta64(meter_p.index[1] - meter_p.index[0])
meter = unit_conversion.convert_watt_to_watt_hour(meter_p, timedelta=timestep)
import_power_cost = meter.where(meter >= 0).fillna(value=0.0)
export_power_revenue = meter.where(meter < 0).fillna(value=0.0)
cost = import_power_cost * import_tariff + export_power_revenue*export_tariff
return cost
def customer_financial(meas_dict: dict, node_keys: list = None, tariff: dict = None) -> dict:
"""
Evaluate the financial outcome for a selected customer or for all customers.
Note: not currently setup to handle missing data (eg NANs)
#TODO: consider inverters and how to avoid double counting with solar, batteries
Args:
meas_dict (dict): dict with measurement for one or multiple nodes.
node_keys (list): list nodes for which financials are calculated.
tariff (dict): nodes tariff data. Expects this to be in $/kWh.
Returns:
results_dict: cost per node and the average cost over all nodes
"""
results_dict = {}
average = []
nodes = node_keys if node_keys else meas_dict.keys()
for key in nodes:
if type(key) == int:
key = str(key)
if meas_dict[key]:
if key in tariff:
meter_p = meter_power(meas_dict, key, axis=1)
meter_p_cost = financial(meter_p,
tariff[key]['import_tariff'],
tariff[key]['export_tariff'])
results_dict[key] = meter_p_cost
initiate = 0
for node in results_dict.values():
average = node if initiate == 0 else average.append(node)
initiate = 1
average = numpy.nanmean(average)
results_dict["average"] = average
return results_dict
def customer_cost_financial(tariff: dict, energy_grid_load: pd.Series, energy_solar_grid: pd.Series,
energy_battery_load: pd.Series, energy_solar_battery: pd.Series,
energy_solar_load: pd.Series) -> pd.Series:
"""
evaluates the customers cost
Args:
tariff: specifies tariffs to be applied to aggregation of customers.
energy_grid_load: specifies the energy flow between grid and load
energy_solar_grid: specifies the energy flow between solar and gird
energy_battery_load: specifies the energy flow between battery and load
energy_solar_battery: specifies the energy flow between solar and battery
energy_solar_load: specifies the energy flow between solar and load
Returns:
customer_cost (pd.Series):
"""
customer_cost = financial(energy_grid_load, tariff['re_import_tariff'], 0)
customer_cost += financial(energy_grid_load, tariff['rt_import_tariff'], 0)
customer_cost += financial(energy_battery_load, tariff['le_import_tariff'], 0)
customer_cost += financial(energy_battery_load, tariff['lt_import_tariff'], 0)
customer_cost -= financial(energy_solar_grid, tariff['re_export_tariff'], 0)
customer_cost += financial(energy_solar_grid, tariff['rt_export_tariff'], 0)
customer_cost -= financial(energy_solar_battery, tariff['le_export_tariff'], 0)
customer_cost += financial(energy_solar_battery, tariff['lt_export_tariff'], 0)
customer_cost -= financial(energy_solar_battery, tariff['le_export_tariff'], 0)
customer_cost += financial(energy_solar_load, tariff['lt_import_tariff'], 0)
customer_cost += financial(energy_solar_load, tariff['lt_export_tariff'], 0)
return customer_cost
def battery_cost_financial(tariff: dict, energy_grid_battery: pd.Series,
energy_battery_grid: pd.Series, energy_battery_load: pd.Series,
energy_solar_battery: pd.Series) -> pd.Series:
"""
evaluates the battery cost
Args:
tariff (dict): specifies tariffs to be applied to aggregation of customers.
energy_grid_battery (pd.Series): specifies the energy flow between grid and battery
energy_battery_grid (pd.Series): specifies the energy flow between battery and gird
energy_battery_load (pd.Series): specifies the energy flow between battery and load
energy_solar_battery (pd.Series): specifies the energy flow between solar and battery
Returns:
battery_cost (pd.Series):
"""
battery_cost = financial(energy_solar_battery, tariff['le_import_tariff'], 0)
battery_cost += financial(energy_solar_battery, tariff['lt_import_tariff'], 0)
battery_cost -= financial(energy_battery_load, tariff['le_export_tariff'], 0)
battery_cost += financial(energy_battery_load, tariff['lt_export_tariff'], 0)
battery_cost += financial(energy_grid_battery, tariff['re_import_tariff'], 0)
battery_cost += financial(energy_grid_battery, tariff['rt_import_tariff'], 0)
battery_cost -= financial(energy_battery_grid, tariff['re_export_tariff'], 0)
battery_cost += financial(energy_battery_grid, tariff['rt_export_tariff'], 0)
return battery_cost
def network_cost_financial(tariff: dict, energy_grid_load: pd.Series,
energy_grid_battery: pd.Series, energy_battery_grid: pd.Series,
energy_battery_load: pd.Series, energy_solar_battery: pd.Series,
energy_solar_load: pd.Series) -> pd.Series:
"""
evaluates the network cost
Args:
tariff (dict): specifies tariffs to be applied to aggregation of customers.
energy_grid_load (pd.Series): specifies the energy flow between grid and load
energy_grid_battery (pd.Series): specifies the energy flow between grid and battery
energy_battery_grid (pd.Series): specifies the energy flow between battery and grid
energy_battery_load (pd.Series): specifies the energy flow between battery and solar
energy_solar_battery (pd.Series) : specifies the energy flow between solar and battery
energy_solar_load (pd.Series): specifies the energy flow between solar and load
Returns:
network_cost(pd.Series)
"""
network_cost = -financial(energy_grid_load, tariff['rt_import_tariff'], 0)
network_cost -= financial(energy_battery_load, tariff['lt_import_tariff'], 0)
network_cost -= financial(energy_battery_load, tariff['lt_export_tariff'], 0)
network_cost -= financial(energy_solar_battery, tariff['lt_import_tariff'], 0)
network_cost -= financial(energy_solar_battery, tariff['lt_export_tariff'], 0)
network_cost -= financial(energy_grid_battery, tariff['rt_import_tariff'], 0)
network_cost -= financial(energy_battery_grid, tariff['rt_export_tariff'], 0)
network_cost -= financial(energy_solar_load, tariff['lt_import_tariff'], 0)
network_cost -= financial(energy_solar_load, tariff['lt_export_tariff'], 0)
return network_cost
def lem_financial(customer_tariffs, energy_grid_load, energy_grid_battery, energy_solar_grid,
energy_battery_grid, energy_battery_load, energy_solar_battery,
energy_solar_load, battery_tariffs=None):
"""
evaluate the cost for the local energy model
Args:
customer_tariffs: specifies tariffs to be applied to aggregation of customers.
energy_grid_load (pd.series): specifies the energy flow between grid and load
energy_grid_battery: specifies the energy flow between grid and battery
energy_solar_grid: specifies the energy flow between solar and grid
energy_battery_grid: specifies the energy flow between battery and grid
energy_battery_load: specifies the energy flow between battery and solar
energy_solar_battery: specifies the energy flow between solar and battery
energy_solar_load: specifies the energy flow between solar and load
battery_tariffs: specifies tariffs to be applied to aggregation of battery.
(if none given customer_tariffs ware used)
Returns:
customer_cost, battery_cost, network_cost
"""
customer_cost = customer_cost_financial(customer_tariffs, energy_grid_load, energy_solar_grid,
energy_battery_load, energy_solar_battery,
energy_solar_load)
bt_choice = battery_tariffs if battery_tariffs else customer_tariffs
battery_cost = battery_cost_financial(bt_choice, energy_grid_battery, energy_battery_grid,
energy_battery_load, energy_solar_battery)
network_cost = network_cost_financial(customer_tariffs, energy_grid_load, energy_grid_battery,
energy_battery_grid, energy_battery_load,
energy_solar_battery, energy_solar_load)
return customer_cost, battery_cost, network_cost
def peak_powers(meas_dict: dict, node_keys: list = None) -> dict:
"""
Calculate the peak power flows into and out of the network.
#TODO: consider selecting peak powers per phase
#TODO: consider inverters and how to avoid double counting with solar, batteries
Args:
meas_dict (dict): dict with measurement for one or multiple nodes.
node_keys (list): list of Node.names in Network.nodes.
Returns:
results_dict (dict): dictionary of peak power into and out of network in kW,
and in kW/connection point.
"""
nodes = node_keys if node_keys else meas_dict.keys()
sum_meter_power = pd.DataFrame([])
for key in nodes:
if type(key) == int:
key = str(key)
if meas_dict[key]:
meter_p = meter_power(meas_dict, key, axis=1)
if sum_meter_power.empty:
sum_meter_power = meter_p.copy()
else:
sum_meter_power = pd.concat([sum_meter_power, meter_p], axis=1, sort=True)
sum_power = sum_meter_power.sum(axis=1)
aver_power = numpy.nanmean(sum_meter_power, axis=1)
return {"peak_power_import": numpy.max(sum_power),
"peak_power_export": numpy.min(sum_power),
"peak_power_import_av": numpy.max(aver_power),
"peak_power_export_av": numpy.min(aver_power),
"peak_power_import_index": sum_power.idxmax(),
"peak_power_export_index": sum_power.idxmax()}
def self_sufficiency(load_p: pd.DataFrame, solar_p: pd.DataFrame, battery_p: pd.DataFrame):
"""
Self-sufficiency = 1 - imports / consumption
Note: the function expects a full index
#TODO: consider inverters and how to avoid double counting with solar, batteries
Args:
load_p (pd.dataframe): measurement data for load of a s single node.
solar_p (pd.dataframe): measurement data for solar of a s single node.
battery_p(pd.dataframe): measurement data for battery of a s single node.
Returns:
results_dict: self_consumption_solar, self_consumption_batteries
"""
self_sufficiency_solar = numpy.nan
self_sufficiency_battery = numpy.nan
if not load_p.empty:
net_load_solar = pd.concat((load_p, solar_p), axis=1).sum(axis=1)
net_load_solar_battery = pd.concat((load_p, solar_p, battery_p), axis=1).sum(axis=1)
#create an array that contains which entries are import and which are export
mask_import_solar = (net_load_solar >= 0)
mask_import_solar_battery = (net_load_solar_battery >= 0)
net_import_solar = net_load_solar * mask_import_solar
net_import_solar_battery = net_load_solar_battery * mask_import_solar_battery
sum_load = numpy.nansum(load_p)
sum_solar = numpy.nansum(solar_p)
# it doesn't make sense to calculate this if there is no solar or the load date is missing (0.0)
if sum_solar < 0 and sum_load != 0:
self_sufficiency_solar = 1 - (numpy.nansum(net_import_solar) / sum_load)
self_sufficiency_battery = 1 - (numpy.nansum(net_import_solar_battery) / sum_load)
else:
print("Warning: not enough data to calculate")
return {"self_sufficiency_solar": self_sufficiency_solar,
"self_sufficiency_batteries": self_sufficiency_battery}
def self_consumption(load_p: pd.DataFrame, solar_p: pd.DataFrame, battery_p: pd.DataFrame) -> dict:
"""
Self-consumption = 1 - exports / generation
Note: the function expects a full index
#TODO: consider inverters and how to avoid double counting with solar, batteries
Args:
load_p (pd.dataframe): measurement data for load of a s single node.
solar_p (pd.dataframe): measurement data for solar of a s single node.
battery_p(pd.dataframe): measurement data for battery of a s single node.
Retruns:
results_dict: self_consumption_solar, self_consumption_batteries
"""
net_load_solar = pd.concat((load_p, solar_p), axis=1).sum(axis=1)
net_load_solar_battery = pd.concat((load_p, solar_p, battery_p), axis=1).sum(axis=1)
# create an array that contains which entries are import and which are export
mask_export_solar = (net_load_solar < 0)
mask_export_solar_battery = (net_load_solar_battery < 0)
net_export_solar = net_load_solar * mask_export_solar
net_import_solar_battery = net_load_solar_battery * mask_export_solar_battery
sum_solar = numpy.nansum(solar_p)
self_consumption_solar = numpy.nan
self_consumption_battery = numpy.nan
if sum_solar < 0:
self_consumption_solar = 1 - (numpy.nansum(net_export_solar) / sum_solar)
self_consumption_battery = 1 - (numpy.nansum(net_import_solar_battery) / sum_solar)
return {"self_consumption_solar": self_consumption_solar,
"self_consumption_batteries": self_consumption_battery}
def self_sufficiency_self_consumption_average(self_consumption_self_sufficiency_dict: dict) -> dict:
"""
calculates the average for self sufficiency and consumption over a given measurement.
#TODO: consider inverters and how to avoid double counting with solar, batteries
Args:
self_consumption_self_sufficiency_dict: The dictionary has a node Id as Key and
4 values per node
Returns:
results_dict: dictionary with averages for the given network
"""
self_sufficiency_solar = []
self_sufficiency_batteries = []
self_consumption_solar = []
self_consumption_batteries = []
for node in self_consumption_self_sufficiency_dict.values():
self_sufficiency_solar.append(node["self_sufficiency_solar"])
self_sufficiency_batteries.append(node["self_sufficiency_batteries"])
self_consumption_solar.append(node["self_consumption_solar"])
self_consumption_batteries.append(node["self_consumption_batteries"])
av_self_sufficiency_solar = numpy.nanmean(self_sufficiency_solar)
av_self_sufficiency_batteries = numpy.nanmean(self_sufficiency_batteries)
av_self_consumption_solar = numpy.nanmean(self_consumption_solar)
av_self_consumption_batteries = numpy.nanmean(self_consumption_batteries)
return {"av_self_sufficiency_solar": av_self_sufficiency_solar,
"av_self_sufficiency_batteries": av_self_sufficiency_batteries,
"av_self_consumption_solar": av_self_consumption_solar,
"av_self_consumption_batteries": av_self_consumption_batteries}
def self_sufficiency_self_consumption(meas_dict: dict, node_keys: list = None, column: int = 0) -> dict:
"""
Self-sufficiency = 1 - imports / consumption
Self-consumption = 1 - exports / generation
And average over those
#TODO: consider inverters and how to avoid double counting with solar, batteries
Args:
meas_dict (dict): dict with measurement for one or multiple nodes.
node_keys (list): list of Node.names in Network.nodes.
column (int): Column index used for calculation
Returns:
results_dict: self_sufficiency_solar, self_sufficiency_batteries,
self_consumption_solar, self_consumption_batteries
"""
results_dict = {}
nodes = node_keys if node_keys else meas_dict.keys()
for key in nodes:
if type(key) == int:
key = str(key)
if meas_dict[key]:
load_p = pd.DataFrame()
solar_p = pd.DataFrame()
battery_p = pd.DataFrame()
for meas in meas_dict[key]:
data_df = meas_dict[key][meas]
if not data_df.empty:
if 'loads' in meas:
load_p = pd.concat([load_p, meas_dict[key][meas].iloc[:,column]])
elif 'solar' in meas:
solar_p = pd.concat([solar_p, meas_dict[key][meas].iloc[:,column]])
elif 'batteries' in meas:
battery_p = pd.concat([battery_p, meas_dict[key][meas].iloc[:,column]])
self_sufficiency_dict = self_sufficiency(load_p, solar_p, battery_p)
self_consumption_dict = self_consumption(load_p, solar_p, battery_p)
results_dict[key] = self_sufficiency_dict.copy()
results_dict[key].update(self_consumption_dict)
averages_dict = self_sufficiency_self_consumption_average(results_dict)
results_dict.update(averages_dict)
return results_dict
def network_net_power(meas_dict: dict, node_keys: list = None, column: int = 0) -> dict:
"""
Calculate the net power (kW) of the network on the point of common coupling
(ignoring network structure and losses etc).
Import and Export are the net_load with all values set to zero, which are not matching.
Note: net_load is calculated by using load, solar and battery values for each node at each
time. If your load already has solar factored into it, then you should not pass the solar data
on as a separate column in your measurement dict
#TODO: consider inverters and how to avoid double counting with solar, batteries
Args:
meas_dict (dict): dict with measurement for one or multiple nodes.
node_keys (list): list of Node.names in Network.nodes.
column (int): Column index used for calculation
Returns:
dictionary of net_load, net_import, net_export
"""
nodes = node_keys if node_keys else meas_dict.keys()
load_p = pd.DataFrame()
solar_p = | pd.DataFrame() | pandas.DataFrame |
# coding: utf-8
"""Extract AA mutations from NT mutations
Author: <NAME> - Vector Engineering Team (<EMAIL>)
"""
import pandas as pd
from scripts.fasta import read_fasta_file
from scripts.util import translate
def extract_aa_mutations(
dna_mutation_file, gene_or_protein_file, reference_file, mode="gene"
):
# Load the reference sequence
with open(reference_file, "r") as fp:
lines = fp.readlines()
ref = read_fasta_file(lines)
ref_seq = list(ref.values())[0]
# JSON to dataframe
gene_or_protein_df = pd.read_json(gene_or_protein_file)
if mode == "gene":
# Only take protein-coding genes
gene_or_protein_df = (
gene_or_protein_df.loc[gene_or_protein_df["protein_coding"] == 1, :]
# set the gene as the index
.set_index("name")
)
else:
gene_or_protein_df = gene_or_protein_df.set_index("name")
dna_mutation_df = | pd.read_csv(dna_mutation_file) | pandas.read_csv |
import os
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import pickle
import tensorflow
from tensorflow.keras import metrics
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import Adam
import models
print('GPU', tensorflow.config.experimental.list_physical_devices('GPU'))
physical_devices = tensorflow.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tensorflow.config.experimental.set_memory_growth(physical_devices[0], True)
epochs = 500
batch_size = 50
shape = (224, 224)
def pickel_logs(log_entry):
log_file = 'logs.pickle'
try:
with open(log_file, 'rb') as f:
logs = pickle.load(f)
except:
logs = []
logs.append(log_entry)
with open(log_file, 'wb') as f:
pickle.dump(logs, f, pickle.HIGHEST_PROTOCOL)
def image_generator(preprocessing_function):
train_datagen = ImageDataGenerator(
preprocessing_function=preprocessing_function,
rotation_range=40,
width_shift_range=0.3,
height_shift_range=0.4,
shear_range=0.2,
zoom_range=0.4,
horizontal_flip=True
)
test_datagen = ImageDataGenerator(
preprocessing_function=preprocessing_function)
return train_datagen, test_datagen
def prepate_dataset(train_size=100, val_size=250, test_size=600):
data = []
directory = '/tf/work/data/chest-xray-pneumonia/'
for folder in sorted(os.listdir(directory)):
for file in sorted(os.listdir(directory+folder)):
data.append((folder, file, directory+folder+'/'+file))
dataframe = | pd.DataFrame(data, columns=['label', 'file', 'path']) | pandas.DataFrame |
"""
This module includes two types of discrete state-space formulations for biogas plants.
The anaerobic digestion model in FlexibleBiogasPlantModel is based on the work in
https://doi.org/10.1016/j.energy.2017.12.073 and ISBN: 978-3-319-16192-1
The module is designed to work with fledge: https://doi.org/10.5281/zenodo.3715873
The code is organized and implemented based on the flexible building model cobmo: https://zenodo.org/record/3523539
"""
import numpy as np
import pandas as pd
import scipy.linalg
import os
import inspect
import sys
import datetime as dt
import pyomo.environ as pyo
import bipmo.utils
class BiogasPlantModel(object):
"""
BiogasPlantModel represents all attributes and functions that all biogas plants have in common. It is the basis for
every model that inherits from it. Caution: It does not work as a standalone model!
"""
model_type: str = None
der_name: str = 'Biogas Plant'
plant_scenarios: pd.DataFrame
states: pd.Index
controls: pd.Index
outputs: pd.Index
switches: pd.Index
chp_schedule: pd.DataFrame
disturbances: pd.Index
state_vector_initial: pd.Series
state_matrix: pd.DataFrame
control_matrix: pd.DataFrame
disturbance_matrix: pd.DataFrame
state_output_matrix: pd.DataFrame
control_output_matrix: pd.DataFrame
disturbance_output_matrix: pd.DataFrame
timestep_start: pd.Timestamp
timestep_end: pd.Timestamp
timestep_interval: pd.Timedelta
timesteps: pd.Index
disturbance_timeseries: pd.DataFrame
output_maximum_timeseries: pd.DataFrame
output_minimum_timeseries: pd.DataFrame
marginal_cost: float
lhv_table: pd.DataFrame
temp_in: float
cp_water: float
feedstock_limit_type: str
available_feedstock: float
def __init__(
self,
scenario_name: str,
timestep_start=None,
timestep_end=None,
timestep_interval=None,
connect_electric_grid=True,
):
# Scenario name.
self.scenario_name = scenario_name
# Define the biogas plant model (change paths accordingly).
base_path = os.path.dirname(os.path.dirname(os.path.normpath(__file__)))
# Load the scenario.
self.plant_scenarios = pd.read_csv(
os.path.join(base_path, 'data/biogas_plant_scenario.csv')
)
self.plant_scenarios = self.plant_scenarios[
self.plant_scenarios['scenario_name'] == self.scenario_name]
self.plant_scenarios.index = pd.Index([self.scenario_name])
# Load marginal cost
self.marginal_cost = self.plant_scenarios.loc[
self.scenario_name, 'marginal_cost_EUR_Wh-1']
# Load feedstock data used in the scenario.
self.plant_feedstock = pd.read_csv(
os.path.join(base_path, 'data/biogas_plant_feedstock.csv')
)
self.plant_feedstock = self.plant_feedstock[
self.plant_feedstock['feedstock_type']
== self.plant_scenarios.loc[self.scenario_name, 'feedstock_type']
]
self.plant_feedstock.index = pd.Index([self.scenario_name])
self.feedstock_limit_type = self.plant_scenarios.loc[
self.scenario_name, 'availability_limit_type']
self.available_feedstock = self.plant_scenarios.loc[
self.scenario_name, 'availability_substrate_ton_per_year']
# Load CHP data used in the scenario.
self.CHP_list = self.plant_scenarios.CHP_name[self.scenario_name].split()
self.number_CHP = len(self.CHP_list)
self.plant_CHP_source = pd.read_csv(
os.path.join(base_path, 'data/biogas_plant_CHP.csv')
)
self.plant_CHP = pd.DataFrame(columns=self.plant_CHP_source.columns)
for i in self.CHP_list:
self.plant_CHP = pd.concat([
self.plant_CHP,
self.plant_CHP_source[self.plant_CHP_source['CHP_name'] == i]
])
self.plant_CHP.index = self.plant_CHP['CHP_name']
self.elec_cap_list = pd.DataFrame([cap for cap in self.plant_CHP.elec_cap_Wel],
index=self.CHP_list,
columns=['elec_cap_Wel'])
self.ramp_rate_list = pd.DataFrame([rate for rate in self.plant_CHP.ramp_capacity_W_min],
index=self.CHP_list,
columns=['ramp_rate_W_min'])
# Load storage data used in the scenario.
self.plant_storage = pd.read_csv(
os.path.join(base_path, 'data/biogas_plant_storage.csv')
)
self.plant_storage = self.plant_storage[
self.plant_storage['storage_name']
== self.plant_scenarios.loc[self.scenario_name, 'storage_name']
]
self.plant_storage.index = pd.Index([self.scenario_name])
# Define useful values.
self.lhv_table = pd.DataFrame(
# Lower heating value of methane in J/m3.
[35.8e6],
pd.Index(['LHV_methane']),
pd.Index(['LHV value (in J/m^3)'])
)
self.temp_in = self.plant_scenarios.loc[
# Temperature of the digestion process in °C.
self.scenario_name, 'digester_temp']
self.cp_water = 4182 # Specific heat of water in J/(K*kg) at 20°C.
# Define CHP coefficients
self.set_gains = | pd.Index([]) | pandas.Index |
# This script analyzes the csv files output by PixDistStats2.py
# Updated Feb 2021.
# PixDistStats2 separates the data into biological replicates instead of aggregating all data for each sample group.
# This script takes those data and does stats and makes plots.
# pixel_distance.py actually performs the measurement of minimum distance
# between tumor and lyve-1 pixels, and outputs the results for each image.
# PixDistStats.py performs stats and makes plots on ALL the data separated by sample group. However,
# this is insufficient because it isn't split up into biological replicates, or normalized.
# PixDistStats2.py separates the data into biological replicates instead of aggregating
# all data for each sample group, and experiments with plots.
# PixDistStats3.py takes data from PixDistStats2, normalizes it to total pixels for each animal,
# does statistical comparisons and makes plots.
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import os
import pixel_distance as pxd
import pandas as pd
from scipy.stats import stats
from statsmodels.stats.multicomp import pairwise_tukeyhsd, MultiComparison
import joypy as jpy
def load_datas(dir):
distbypercentiles = pd.read_csv(dir + 'dist_by_percentiles.csv', index_col='percentiles')
numpixbydistbins = | pd.read_csv(dir + 'numpix_by_dist_bins.csv', index_col='distance bins') | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2015, IBM Corp.
# All rights reserved.
#
# Distributed under the terms of the BSD Simplified License.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
# Python 2 Compatibility
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import dict
from builtins import zip
from builtins import str
from builtins import int
from future import standard_library
standard_library.install_aliases()
from collections import OrderedDict
import itertools
import math
import warnings
from numbers import Number
import pandas as pd
import numpy as np
import six
import ibmdbpy
from ibmdbpy.utils import chunklist
"""
Statistics module for IdaDataFrames
"""
def _numeric_stats(idadf, stat, columns):
"""
Compute various stats from one or several numerical columns of an IdaDataFrame.
Parameters
----------
idadf : IdaDataFrame
Data source.
stat : str
Name of the statistic to be computed.
columns : str or list of str
Name of the columns that belong to the IdaDataFrame.
Returns
-------
Tuple or float64
One value for each column. For a one column input a float64 value is returned except for median
Notes
-----
Currently, the following functions are supported: count, mean, median, std,
var, min, max, sum. Should return a tuple. Only available for numerical
columns.
"""
# Calculate count, mean, median, std, var, min, max
if isinstance(columns, six.string_types):
columns = [columns]
if isinstance(stat, six.string_types):
if stat == "count":
select_string = 'COUNT(\"' + '\"), COUNT(\"'.join(columns) + '\")'
elif stat == "mean":
select_string = ('AVG(CAST(\"' +
'\" AS FLOAT)), AVG(CAST(\"'.join(columns) +
'\" AS FLOAT))')
elif stat == "median":
return _get_percentiles(idadf, 0.5, columns).values[0]
elif stat == "std":
tuple_count = _numeric_stats(idadf, 'count', columns)
# in case of only one column, ensure tuple_count is iterable
if len(columns) == 1:
tuple_count = [tuple_count]
count_dict = dict((x, y) for x, y in zip(columns, tuple_count))
agg_list = []
for column in columns:
agg_list.append("STDDEV(\"%s\")*(SQRT(%s)/SQRT(%s))"
%(column, count_dict[column], count_dict[column]-1))
select_string = ', '.join(agg_list)
elif stat == "var":
tuple_count = _numeric_stats(idadf, 'count', columns)
if len(columns) == 1:
tuple_count = [tuple_count]
count_dict = dict((x, int(y)) for x, y in zip(columns, tuple_count))
agg_list = []
for column in columns:
agg_list.append("VAR(\"%s\")*(%s.0/%s.0)"
%(column, count_dict[column], count_dict[column]-1))
select_string = ', '.join(agg_list)
elif stat == "min":
select_string = 'MIN(\"' + '\"), MIN(\"'.join(columns) + '\")'
elif stat == "max":
select_string = 'MAX(\"' + '\"), MAX(\"'.join(columns) + '\")'
elif stat == "sum":
select_string = 'SUM(\"' + '\"), SUM(\"'.join(columns) + '\")'
name = idadf.internal_state.current_state
return idadf.ida_query("SELECT %s FROM %s" %(select_string, name)).values[0]
def _get_percentiles(idadf, percentiles, columns):
"""
Return percentiles over all entries of a column or list of columns in the
IdaDataFrame.
Parameters
----------
idadf : IdaDataFrame
percentiles: Float or list of floats.
All values in percentiles must be > 0 and < 1
columns: String or list of string
Name of columns belonging to the IdaDataFrame.
Returns
-------
DataFrame
"""
if isinstance(columns, six.string_types):
columns = [columns]
if isinstance(percentiles, Number):
percentiles = [percentiles]
name = idadf.internal_state.current_state
# Get na values for each columns
tuple_na = _get_number_of_nas(idadf, columns)
nrow = idadf.shape[0]
data = pd.DataFrame()
for index_col, column in enumerate(columns):
nb_not_missing = nrow - tuple_na[index_col]
indexes = [float(x)*float(nb_not_missing-1) + 1 for x in percentiles]
low = [math.floor(x) for x in indexes]
high = [math.ceil(x) for x in indexes]
tuplelist = []
i = 0
for flag in [((x+1) == y) for x, y in zip(low, high)]:
if flag:
tuplelist.append((i, i+1))
i += 2
else:
tuplelist.append((i, i))
i += 1
unique = low + high
unique = set(unique)
unique = sorted(unique)
unique = [str(x) for x in unique]
indexes_string = ",".join(unique)
df = idadf.ida_query("(SELECT \""+column+"\" AS \""+column+"\" FROM (SELECT "+
"ROW_NUMBER() OVER(ORDER BY \""+column+"\") as rn, \""+
column + "\" FROM (SELECT * FROM " + name +
")) WHERE rn in("+ indexes_string +"))")
#indexvalues = list(df[df.columns[0]])
indexvalues = list(df)
#import pdb ; pdb.set_trace()
#print(tuplelist)
#print(indexvalues)
indexfinal = [(float(str(indexvalues[x[0]]))+float(str(indexvalues[x[1]])))/2 for x in tuplelist]
new_data = pd.DataFrame(indexfinal)
data[column] = (new_data.T).values[0]
percentile_names = [x for x in percentiles]
data.index = percentile_names
return data
def _categorical_stats(idadf, stat, columns):
# TODO:
"""
Computes various stats from one or several categorical columns of the IdaDataFrame.
This is not implemented.
Parameters
----------
idadf : IdaDataFrame
stat : str
Name of the statistic function to be computed.
columns : str or list of str
Name of columns belonging to the IdaDataFrame.
Returns
-------
Tuple.
"""
# Calculates count, unique, top, freq
raise NotImplementedError("TODO")
def _get_number_of_nas(idadf, columns):
"""
Return the count of missing values for a list of columns in the IdaDataFrame.
Parameters
----------
idadf : IdaDataFrame
columns : str or list
One column as a string or a list of columns in the idaDataFrame.
Returns
-------
Tuple
"""
if isinstance(columns, six.string_types):
columns = [columns]
name = idadf.internal_state.current_state
query_list = list()
for column in columns:
string = ("(SELECT COUNT(*) AS \"" + column + "\" FROM " +
name + " WHERE \"" + column + "\" IS NULL)")
query_list.append(string)
query_string = ', '.join(query_list)
# TODO: Improvement idea : Get nrow (shape) and substract by count("COLUMN")
return idadf.ida_query("SELECT * FROM " + query_string, first_row_only = True)
def _count_level(idadf, columnlist=None):
"""
Count distinct levels across a list of columns of an IdaDataFrame grouped
by themselves.
Parameters
----------
columnlist : list
List of column names that exist in the IdaDataFrame. By default, these
are all columns in IdaDataFrame.
Returns
-------
Tuple
Notes
-----
The function assumes the follwing:
* The columns given as parameter exists in the IdaDataframe.
* The parameter columnlist is an optional list.
* Columns are referenced by their own name (character string).
"""
if columnlist is None:
columnlist = idadf.columns
name = idadf.internal_state.current_state
query_list = []
for column in columnlist:
# Here cast ?
query_list.append("(SELECT COUNT(*) AS \"" + column +"\" FROM (" +
"SELECT \"" + column + "\" FROM " + name +
" GROUP BY \"" + column + "\" ))")
#query_list.append("(SELECT CAST(COUNT(*) AS BIGINT) AS \"" + column +"\" FROM (" +
# "SELECT \"" + column + "\" FROM " + name + " ))")
query_string = ', '.join(query_list)
column_string = '\"' + '\", \"'.join(columnlist) + '\"'
return idadf.ida_query("SELECT " + column_string + " FROM " + query_string, first_row_only = True)
def _count_level_groupby(idadf, columnlist=None):
"""
Count distinct levels across a list of columns in the IdaDataFrame grouped
by themselves. This is used to get the dimension of the resulting cross table.
Parameters
----------
columnlist : list
List of column names existing in the IdaDataFrame. By default, these
are columns of self
Returns
-------
Tuple
Notes
-----
The function assumes the follwing:
* The columns given as parameter exists in the IdaDataframe.
* The parameter columnlist is a optional and is a list.
* Columns are referenced by their own name (character string).
"""
if columnlist is None:
columnlist = idadf.columns
name = idadf.internal_state.current_state
column_string = '\"' + '\", \"'.join(columnlist) + '\"'
query = (("SELECT COUNT(*) FROM (SELECT %s, COUNT(*) as COUNT "+
"FROM %s GROUP BY %s ORDER BY %s, COUNT ASC)")
%(column_string, name, column_string, column_string))
return idadf.ida_query(query, first_row_only = True)
# TODO: REFACTORING: factors function should maybe return a tuple ?
def _factors_count(idadf, columnlist, valuelist=None):
"""
Count non-missing values for all columns in a list (valuelist) over the
IdaDataFrame grouped by a list of columns(columnlist).
Parameters
----------
columnlist : list
List of column names that exist in self.
valuelist : list
List of column names that exist in self.
Assumptions
-----------
* The columns given as parameter exists in the IdaDataframe
* The parameter columnlist is a optional and is a list
* Columns are referenced by their own name (character string)
Returns
-------
DataFrame
"""
column_string = '\"' + '\", \"'.join(columnlist) + '\"'
name = idadf.internal_state.current_state
if valuelist is None:
query = (("SELECT %s, COUNT(*) as COUNT FROM %s GROUP BY %s ORDER BY %s, COUNT ASC")
%(column_string, name, column_string, column_string))
else:
agg_list = []
for value in valuelist:
query = "COUNT(\"%s\") as \"%s\""%(value,value)
agg_list.append(query)
agg_string = ', '.join(agg_list)
value_string = '\"' + '", "'.join(valuelist) + '\"'
query = (("SELECT %s,%s FROM %s GROUP BY %s ORDER BY %s,%s ASC")
%(column_string, agg_string, name, column_string, column_string, value_string))
return idadf.ida_query(query)
def _factors_sum(idadf, columnlist, valuelist):
"""
Compute the arithmetic sum over for all columns in a list (valuelist)
over the IdaDataFrame grouped by a list of columns (columnlist).
Parameters
----------
columnlist : list
List of column names that exist in self.
valuelist : list
List of column names that exist in self.
Assumptions
-----------
* The columns given as parameter exists in the IdaDataframe
* The parameter columnlist is a optional and is a list
* Columns are referenced by their own name (character string)
Returns
-------
DataFrame
"""
column_string = '\"' + '\", \"'.join(columnlist) + '\"'
name = idadf.internal_state.current_state
agg_list = []
for value in valuelist:
query = "SUM(\"%s\") as \"%s\""%(value, value)
agg_list.append(query)
agg_string = ', '.join(agg_list)
value_string = '\"' + '", "'.join(valuelist) + '\"'
query = (("SELECT %s,%s FROM %s GROUP BY %s ORDER BY %s,%s ASC")
%(column_string, agg_string, name, column_string, column_string, value_string))
return idadf.ida_query(query)
def _factors_avg(idadf, columnlist, valuelist):
"""
Compute the arithmetic average for all columns in a list (valuelist) over
the IdaDataFrame grouped by a list of columns (columnlist).
Parameters
----------
columnlist : list
List of column names that exist in self.
valuelist : list
List of column names that exist in self.
Assumptions
-----------
* The columns given as parameter exists in the IdaDataframe
* The parameter columnlist and valuelist are array-like
* Columns are referenced by their own name (character string)
Returns
-------
DataFrame
"""
column_string = '\"' + '\", \"'.join(columnlist) + '\"'
name = idadf.internal_state.current_state
agg_list = []
for value in valuelist:
agg = (("CAST(AVG(CAST(\"%s\" AS DECIMAL(10,6))) AS DECIMAL(10,6)) \"%s\"")
%(value, value))
agg_list.append(agg)
agg_string = ', '.join(agg_list)
value_string = '\"' + '", "'.join(valuelist) + '\"'
query = (("SELECT %s,%s FROM %s GROUP BY %s ORDER BY %s,%s ASC")
%(column_string, agg_string, name, column_string, column_string, value_string))
return idadf.ida_query(query)
###############################################################################
### Pivot Table
###############################################################################
def pivot_table(idadf, values=None, columns=None, max_entries=1000, sort=None,
factor_threshold=None, interactive=False, aggfunc='count'):
"""
See IdaDataFrame.pivot_table
"""
# TODO : Support index
if aggfunc.lower() not in ['count', 'sum', 'avg', 'average', 'mean']:
print("For now only 'count' and 'sum' and 'mean' as aggregation function is supported")
return
if (columns is None) & (factor_threshold is None):
print("Please provide parameter factor_threshold for automatic selection of columns")
return
if isinstance(columns, six.string_types):
columns = [columns]
if isinstance(values, six.string_types):
values = [values]
if (values is None) and (aggfunc.lower() != "count"):
raise ValueError("Cannot aggregate using another function than count if" +
"no value(s) was/were given")
####### Identify automatically categorical fields #########
# Load distinct count for each and evaluate categorical or not
data = idadf._table_def(factor_threshold) #
if columns is None:
factors = data.loc[data['VALTYPE'] == "CATEGORICAL", ['TYPENAME', 'FACTORS']]
if len(factors) == 0:
print("No categorical columns to tabulate")
return
else:
factors = data.loc[columns, ['TYPENAME', 'FACTORS']]
if sort == "alpha":
factors.sort_index(inplace=True, ascending=1)
elif sort == "factor":
factors.sort(['FACTORS'], inplace=True, ascending=1)
if columns is None:
print("Automatic selection of columns :", factors.index.values)
columns = factors.index.values
nb_row = _count_level_groupby(idadf, factors.index.values)[0] * len(columns)
nb_col = len(factors.index.values)
nb_entries = nb_row * nb_col
if nb_entries > max_entries: # Overflow risk
print("Number of entries :", nb_entries)
print("Value counts for factors:")
factor_values = factors[['FACTORS']]
factor_values.columns = ['']
print(factor_values.T)
print("WARNING :Attempt to make a table with more than " +
str(max_entries)+ " elements. Either increase max_entries " +
"parameter or remove columns with too many levels.")
return
print("Output dataframe has dimensions", nb_row, "x", (nb_col+1))
if interactive is True:
display_yes = ibmdbpy.utils.query_yes_no("Do you want to download it in memory ?")
if not display_yes:
return
categorical_columns = list(factors.index)
if aggfunc.lower() == 'count':
dataframe = _factors_count(idadf, categorical_columns, values) # Download dataframe
if aggfunc.lower() == 'sum':
dataframe = _factors_sum(idadf, categorical_columns, values) # Download dataframe
if aggfunc.lower() in ['avg', 'average', 'mean']:
dataframe = _factors_avg(idadf, categorical_columns, values) # Download dataframe
if values is not None:
agg_values = values
else: agg_values = aggfunc.upper()
if isinstance(agg_values, six.string_types):
agg_values = [agg_values]
dataframe.columns = categorical_columns + agg_values # Name the aggregate column
# Formatting result
if len(agg_values) == 1:
dataframe[None] = agg_values[0]
else:
catdataframe = dataframe[categorical_columns]
dataframe = catdataframe.join(dataframe[agg_values].stack().reset_index(1))
dataframe['level_1'] = pd.Categorical(dataframe['level_1'], agg_values)
dataframe = dataframe.rename(columns={'level_1':None})
dataframe = dataframe.sort([None] + categorical_columns)
dataframe.set_index([None] + categorical_columns, inplace=True)
dataframe = dataframe.astype(float)
result = pd.Series(dataframe[dataframe.columns[0]])
result.name = None
return result
###############################################################################
### Descriptive statistics
###############################################################################
def describe(idadf, percentiles=[0.25, 0.50, 0.75]):
"""
See IdaDataFrame.describe
"""
if percentiles is not None:
if isinstance(percentiles, Number):
percentiles = [percentiles]
if True in [(not isinstance(x, Number)) for x in percentiles]:
raise TypeError("Argument 'percentiles' should be either a number or " +
"a list of numbers between 0 and 1")
elif True in [((x >= 1) | (x <= 0)) for x in percentiles]:
raise ValueError("Numbers in argument 'percentiles' should be between 0 and 1")
# Improvement idea : We could use dtypes instead of calculating this everytime
columns = idadf._get_numerical_columns()
data = []
if not columns:
columns = idadf._get_categorical_columns()
if not columns:
raise NotImplementedError("No numerical and no categorical columns")
else:
raise NotImplementedError("Categorical only idaDataFrame are not handled currently")
# TODO : Handle categorical columns
data.append(_categorical_stats(idadf, "count", columns))
data.append(_categorical_stats(idadf, "unique", columns))
data.append(_categorical_stats(idadf, "top", columns))
data.append(_categorical_stats(idadf, "freq", columns))
else:
data.append(_numeric_stats(idadf, "count", columns))
data.append(_numeric_stats(idadf, "mean", columns))
data.append(_numeric_stats(idadf, "std", columns))
data.append(_numeric_stats(idadf, "min", columns))
if percentiles is not None:
perc = (_get_percentiles(idadf, percentiles, columns))
for tup in perc.itertuples(index=False):
data.append(tup)
data.append(_numeric_stats(idadf, "max", columns))
data = pd.DataFrame(data)
data.columns = columns
if percentiles is not None:
percentile_names = [(str(int(x * 100)) + "%") for x in percentiles]
else:
percentile_names = []
data.index = ['count', 'mean', 'std', 'min'] + percentile_names + ['max']
# quick fix -> JDBC problems
#for column in data.columns:
# data[[column]] = data[[column]].astype(float)
if isinstance(idadf, ibmdbpy.IdaSeries):
data = pd.Series(data[data.columns[0]])
return data
def quantile(idadf, q=0.5):
"""
See IdaDataFrame.quantile
"""
if isinstance(q, Number):
q = [q]
# Sanity check
if True in [(not isinstance(x, Number)) for x in q]:
raise TypeError("Argument 'q' should be either a number or " +
"a list of numbers between 0 and 1")
elif True in [((x >= 1) | (x <= 0)) for x in q]:
raise ValueError("Numbers in argument 'percentiles' should be between 0 and 1")
columns = idadf._get_numerical_columns()
if not columns:
print(idadf.name + " has no numeric columns")
return
result = _get_percentiles(idadf, q, columns)
if isinstance(q, list):
if len(q) > 1:
return result
result = result.T
result = result[result.columns[0]]
result.name = q[0]
result = result.astype('float')
if len(result) == 1:
result = result[0]
return result
# Note : Not casting to double can lead to SQL overflow
# TODO: Has to be modified in ibmdbR
def cov(idadf, other = None):
"""
See IdaDataFrame.cov
"""
if isinstance(idadf, ibmdbpy.IdaSeries):
raise TypeError("cov() missing 1 required positional argument: 'other'")
columns = idadf._get_numerical_columns()
if not columns:
print(idadf.name + " has no numeric columns")
return
tuple_count = _numeric_stats(idadf, 'count', columns)
count_dict = dict((x, int(y)) for x, y in zip(columns, tuple_count))
agg_list = []
combinations = [x for x in itertools.combinations_with_replacement(columns, 2)]
columns_set = [{x[0], x[1]} for x in combinations]
for column_pair in combinations:
agg_list.append("COVARIANCE(\"" + column_pair[0] + "\",\"" +
column_pair[1] + "\")*(" +
str(min([count_dict[column_pair[0]],
count_dict[column_pair[1]]])) + ".0/" +
str(min([count_dict[column_pair[0]],
count_dict[column_pair[1]]])-1) + ".0)")
agg_string = ', '.join(agg_list)
name = idadf.internal_state.current_state
data = idadf.ida_query("SELECT %s FROM %s"%(agg_string, name), first_row_only = True)
tuple_list = []
for column1 in columns:
list_value = []
for column2 in columns:
for index, column_set in enumerate(columns_set):
if {column1, column2} == column_set:
list_value.append(data[index])
break
tuple_list.append(tuple(list_value))
result = pd.DataFrame(tuple_list)
result.index = columns
result.columns = columns
if len(result) == 1:
result = result[0]
return result
def corr(idadf, features=None,ignore_indexer=True):
"""
See IdaDataFrame.corr
"""
if isinstance(idadf, ibmdbpy.IdaSeries):
raise TypeError("corr() missing 1 required positional argument: 'other'")
# TODO: catch case n <= 1
numerical_columns = idadf._get_numerical_columns()
if not numerical_columns:
print(idadf.name + " has no numeric columns")
return
if ignore_indexer is True:
if idadf.indexer:
if idadf.indexer in numerical_columns:
numerical_columns.remove(idadf.indexer)
#print(features)
#target, features = ibmdbpy.utils._check_input(target, features)
if features is not None:
for feature in features:
if feature not in numerical_columns:
raise TypeError("Correlation-based measure not available for non-numerical columns %s"%feature)
else:
features = numerical_columns
#if target not in columns:
# raise ValueError("%s is not a column of numerical type in %s"%(target, idadf.name))
values = OrderedDict()
combinations = [x for x in itertools.combinations(features, 2)]
#columns_set = [{x[0], x[1]} for x in combinations]
if len(features) < 64: # the limit of variables for an SQL statement is 4096, i.e 64^2
agg_list = []
for column_pair in combinations:
agg = "CORRELATION(\"%s\",\"%s\")"%(column_pair[0], column_pair[1])
agg_list.append(agg)
agg_string = ', '.join(agg_list)
name = idadf.internal_state.current_state
data = idadf.ida_query("SELECT %s FROM %s"%(agg_string, name), first_row_only = True)
for i, element in enumerate(combinations):
if element[0] not in values:
values[element[0]] = {}
if element[1] not in values:
values[element[1]] = {}
values[element[0]][element[1]] = data[i]
values[element[1]][element[0]] = data[i]
result = pd.DataFrame(values).fillna(1)
else:
chunkgen = chunklist(combinations, 100)
for chunk in chunkgen:
agg_list = []
for column_pair in chunk:
agg = "CORRELATION(\"%s\",\"%s\")"%(column_pair[0], column_pair[1])
agg_list.append(agg)
agg_string = ', '.join(agg_list)
name = idadf.internal_state.current_state
data = idadf.ida_query("SELECT %s FROM %s"%(agg_string, name), first_row_only = True)
for i, element in enumerate(chunk):
if element[0] not in values:
values[element[0]] = OrderedDict()
if element[1] not in values:
values[element[1]] = OrderedDict()
values[element[0]][element[1]] = data[i]
values[element[1]][element[0]] = data[i]
result = pd.DataFrame(values).fillna(1)
result = result.reindex(result.columns)
if len(result) == 1:
result = result[0]
return result
### corrwith
def mad(idadf):
"""
See IdaDataFrame.mad
"""
columns = idadf._get_numerical_columns()
if not columns:
print(idadf.name + " has no numeric columns")
return
mean_tuple = _numeric_stats(idadf, "mean", columns)
absmean_dict = dict((x, abs(y)) for x, y in zip(columns, mean_tuple))
tuple_na = _get_number_of_nas(idadf, columns)
agg_list = []
for index_col, column in enumerate(columns):
agg_list.append("SUM(ABS(\"" + column + "\" -" +
str(absmean_dict[column]) + "))/" +
str(idadf.shape[0] - tuple_na[index_col]))
agg_string = ', '.join(agg_list)
name = idadf.internal_state.current_state
mad_tuple = idadf.ida_query("SELECT %s FROM %s"%(agg_string, name))
result = pd.Series(mad_tuple.values[0])
result.index = columns
result = result.astype('float')
if isinstance(idadf, ibmdbpy.IdaSeries):
result = result[0]
return result
def ida_min(idadf):
"""
See idadataFrame.min
"""
na_tuple = _get_number_of_nas(idadf, idadf.columns)
min_tuple = _numeric_stats(idadf, "min", idadf.columns)
if not hasattr(min_tuple,"__iter__") : min_tuple = (min_tuple,) # dirty fix
min_list = [np.nan if ((y > 0) and not isinstance(x, Number))
else x for x, y in zip(min_tuple, na_tuple)]
min_tuple = tuple(min_list)
result = pd.Series(min_tuple)
result.index = idadf.columns
#if isinstance(idadf, ibmdbpy.IdaSeries):
# result = result[0]
return result
def ida_max(idadf):
"""
See idadataFrame.max
"""
na_tuple = _get_number_of_nas(idadf, idadf.columns)
max_tuple = _numeric_stats(idadf, "max", idadf.columns)
if not hasattr(max_tuple,"__iter__") : max_tuple = (max_tuple,) # dirty fix
max_list = [np.nan if ((y > 0) and not isinstance(x, Number))
else x for x, y in zip(max_tuple, na_tuple)]
max_tuple = tuple(max_list)
result = pd.Series(max_tuple)
result.index = idadf.columns
#if isinstance(idadf, ibmdbpy.IdaSeries):
# result = result[0]
return result
def count(idadf):
"""
See IdaDataFrame.count
"""
count_tuple = _numeric_stats(idadf, "count", idadf.columns)
result = pd.Series(count_tuple)
result.index = idadf.columns
result = result.astype(int)
if isinstance(idadf, ibmdbpy.IdaSeries):
result = result[0]
return result
def count_distinct(idadf):
"""
See IdaDataFrame.count_distinct
"""
result = pd.Series(_count_level(idadf))
result.index = idadf.columns
result = result.astype(int)
if isinstance(idadf, ibmdbpy.IdaSeries):
result = result[0]
return result
def std(idadf):
"""
See IdaDataFrame.std
"""
columns = idadf._get_numerical_columns()
if not columns:
warnings.warn("%s has no numeric columns"%idadf.name)
return pd.Series()
std_tuple = _numeric_stats(idadf, "std", columns)
result = pd.Series(std_tuple)
result.index = columns
if isinstance(idadf, ibmdbpy.IdaSeries):
result = result[0]
return result
def var(idadf):
"""
See IdaDataFrame.var
"""
columns = idadf._get_numerical_columns()
if not columns:
warnings.warn("%s has no numeric columns"%idadf.name)
return pd.Series()
var_tuple = _numeric_stats(idadf, "var", columns)
result = pd.Series(var_tuple)
result.index = columns
if isinstance(idadf, ibmdbpy.IdaSeries):
result = result[0]
return result
def mean(idadf):
"""
See IdaDataFrame.mean
"""
columns = idadf._get_numerical_columns()
if not columns:
warnings.warn("%s has no numeric columns"%idadf.name)
return pd.Series()
mean_tuple = _numeric_stats(idadf, "mean", columns)
result = pd.Series(mean_tuple)
result.index = columns
if isinstance(idadf, ibmdbpy.IdaSeries):
result = result[0]
return result
def ida_sum(idadf):
"""
See IdaDataFrame.sum
"""
#Behave like having the option "numeric only" to true
columns = idadf._get_numerical_columns()
if not columns:
warnings.warn("%s has no numeric columns"%idadf.name)
return | pd.Series() | pandas.Series |
import pandas as pd
import numpy as np
from pandas.api.types import is_numeric_dtype
import re
from nltk.tokenize import word_tokenize
import joblib
import pickle
def func(ser):
nans = np.count_nonzero( | pd.isnull(ser) | pandas.isnull |
"""Class for intent operations - training, predict"""
import os
import re
import json
import datetime
import joblib
import numpy as np
import pandas as pd
from typing import List, Union
from sklearn.model_selection import GridSearchCV
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import StandardScaler, MultiLabelBinarizer
from sklearn.ensemble import RandomForestClassifier
from .base import DatasetBunch, RuleBunch, Classifier
from .utils import get_intent_labels, make_dir
from .transformer import PercentSVD
DEFAULT_FOLDER = os.path.join(os.getcwd(), "models")
class OneClassClassifier(Classifier):
"""Classifier used for dataset which has only one class."""
def __init__(self, intent: str):
self._intent = intent
def predict(self, words: str="", context: Union[str, dict]=None) -> List[str]:
return [self._intent]
class RuleClassifier(Classifier):
"""Rule-based classifier"""
def __init__(self, rule_bunch: RuleBunch):
self._patterns = [re.compile(r) if r else None
for r in rule_bunch.words_rules]
try:
self._context_rules = \
[json.loads(r) if r else {} for r in rule_bunch.context_rules] \
if rule_bunch.context_rules else []
except AttributeError:
self._context_rules = []
self._intent_labels = rule_bunch.intent_labels
def predict(self, words: str="", context: Union[str, dict]=None) -> List[str]:
"""
Predict intent labels according to words patterns and comparision
between context and context_rule.
Parameters
----------
words: user input
context: context information
Returns
-------
List of predicted labels or empty list if failed to the matches.
"""
def context_match(context: dict, rule_context: dict) -> bool:
if not rule_context:
return True
else:
return False if not context else \
all(rule_context.get(k) == v for k, v in context.items())
# make sure the context to be a dict
if not context:
context = {}
else:
if isinstance(context, str):
context = json.loads(context)
if not words and not context:
return []
intent_labels = []
for i, pattern in enumerate(self._patterns):
if not self._context_rules:
if pattern.match(words):
for label in self._intent_labels[i]:
intent_labels.append(label)
else:
if pattern.match(words) and \
context_match(context, self._context_rules[i]):
for label in self._intent_labels[i]:
intent_labels.append(label)
return intent_labels
class ModelClassifier(Classifier):
def __init__(self, folder: str=DEFAULT_FOLDER, customer: str="common",
lang="en", n_jobs=None):
"""
Parameters
----------
folder: The folder to save the final models.
customer: Name used to distinguish different customers.
lang: Language, "en" for English or "cn" for Chinese.
n_jobs : n_jobs in GridSearchCV, int or None, optional (default=None)
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
"""
self._folder = folder
self._customer = customer
self._lang = lang
self._n_jobs = n_jobs
self._classifiers = {}
self._mlbs = {}
self._reports = {}
def fit(self, data_bunch: DatasetBunch):
"""
Fit with GridSearchCV method to find the optimal parameters.
Disassemble the intents in form of multi-levels to get sub-datasets
and train models using these sub-datasets.
Parameters
----------
data_bunch: Data bunch instance with texts, extended_features, intents.
"""
def make_choice(labels: str, prefixs: set) -> bool:
for label in labels.replace(" ", "").split(","):
for prefix in prefixs:
if label.startswith(prefix):
return True
else:
return False
def make_labels(labels_data: np.array, label_set: set) -> List[List[str]]:
labels = []
for labels_str in labels_data:
lbls = []
for label in labels_str.replace(" ", "").split(","):
lbls += [lbl for lbl in label_set if label.startswith(lbl)]
labels.append(lbls)
return labels
make_choice_vect = np.vectorize(make_choice)
for clf_name, label_set in get_intent_labels(data_bunch.intents).items():
if len(label_set) == 1:
self._classifiers[clf_name] = \
OneClassClassifier(list(label_set)[0])
self._reports[clf_name] = {"clf_type": "OneClassClassifier"}
else:
choices = make_choice_vect(data_bunch.intents, label_set)
mlb = MultiLabelBinarizer(classes=list(label_set))
search = self._fit(
X=pd.DataFrame({
"words": data_bunch.words[choices],
"contexts": [json.loads(c) if c else {}
for c in data_bunch.contexts[choices]]}),
y=mlb.fit_transform(
make_labels(data_bunch.intents[choices], label_set))
)
self._classifiers[clf_name] = search.best_estimator_
self._mlbs[clf_name] = mlb
self._reports[clf_name] = {
"clf_type": "sklearn-classifier",
"scoring": search.scoring,
"cv": search.cv,
"best_params": search.best_params_,
"best_score": search.best_score_,
}
def _fit(self, X: pd.DataFrame, y: np.array):
"""Fit classifier
Parameters
----------
# X: pd.DataFrame with columns "words" and "contexts".
X: tuple of "words" and "contexts".
y: intent labels
Returns
-------
Instance of sklearn classifier or OneClassClassifier.
"""
def has_context(contexts):
if contexts.empty:
return False
for context in contexts:
if not context:
continue
if json.loads(context):
return True
else:
return False
if has_context(X["contexts"]):
vectorizer = ColumnTransformer([
# words to vectors
("words2vect",
TfidfVectorizer(token_pattern=r"(?u)(\{\w+\}|\w+)"),
"words"),
# contexts to vectors
("contexts2vect", DictVectorizer(), "contexts")
])
else:
vectorizer = ColumnTransformer([
# words to vectors
("words2vect",
TfidfVectorizer(token_pattern=r"(?u)(\{\w+\}|\w+)"),
"words")
])
pipeline = Pipeline([
# transform words and contexts to vectors
("vectorizer", vectorizer),
# feature values standardization
("scaler", StandardScaler(with_mean=False)),
# dimensionality reduction
# ("svd", PercentSVD()),
# classifier
("clf", RandomForestClassifier())
# ("clf", MLPClassifier(max_iter=1000, hidden_layer_sizes=(50, 50)))
])
params = {
# "svd__percent": np.linspace(0.1, 1, 10), # todo
"clf__n_estimators": range(5, 100, 5),
"clf__max_features": [None, "sqrt", "log2"],
"clf__class_weight": ["balanced", "balanced_subsample", None],
# "clf__hidden_layer_sizes": [(n,) for n in range(10, 110, 10)],
# "clf__activation": ["identity", "logistic", "tanh", "relu"],
# "clf__solver": ["lbfgs", "sgd", "adam"],
# "clf__learning_rate": ["constant", "invscaling", "adaptive"]
}
search = GridSearchCV(estimator=pipeline, param_grid=params, cv=5,
n_jobs=self._n_jobs)
search.fit(X, y)
return search
def predict(self, words: str="", context: Union[str, dict]=None) -> List[str]:
"""
Parameters
----------
words: user input
context: context information
Returns
-------
List of predicted labels.
"""
if not context:
X = | pd.DataFrame({"words": [words], "contexts": ["{}"]}) | pandas.DataFrame |
# Copyright 2017-2020 Lawrence Livermore National Security, LLC and other
# Hatchet Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: MIT
import glob
import struct
import re
import os
import traceback
import numpy as np
import pandas as pd
import multiprocessing as mp
import multiprocessing.sharedctypes
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
# cython imports
try:
import hatchet.cython_modules.libs.subtract_metrics as smc
except ImportError:
print("-" * 80)
print(
"""Error: Shared object (.so) not found for cython module.\n\tPlease run install.sh from the hatchet root directory to build modules."""
)
print("-" * 80)
traceback.print_exc()
raise
import hatchet.graphframe
from hatchet.node import Node
from hatchet.graph import Graph
from hatchet.util.profiler import Timer
from hatchet.frame import Frame
src_file = 0
def init_shared_array(buf_):
"""Initialize shared array."""
global shared_metrics
shared_metrics = buf_
def read_metricdb_file(args):
"""Read a single metricdb file into a 1D array."""
filename, num_nodes, num_threads_per_rank, num_metrics, shape = args
rank = int(
re.search(r"\-(\d+)\-(\d+)\-([\w\d]+)\-(\d+)\-\d.metric-db$", filename).group(1)
)
thread = int(
re.search(r"\-(\d+)\-(\d+)\-([\w\d]+)\-(\d+)\-\d.metric-db$", filename).group(2)
)
with open(filename, "rb") as metricdb:
metricdb.seek(32)
arr1d = np.fromfile(
metricdb, dtype=np.dtype(">f8"), count=num_nodes * num_metrics
)
arr = np.frombuffer(shared_metrics).reshape(shape)
# copy the data in the right place in the larger 2D array of metrics
rank_offset = (rank * num_threads_per_rank + thread) * num_nodes
arr[rank_offset : rank_offset + num_nodes, :2].flat = arr1d.flat
arr[rank_offset : rank_offset + num_nodes, 2] = range(1, num_nodes + 1)
arr[rank_offset : rank_offset + num_nodes, 3] = rank
arr[rank_offset : rank_offset + num_nodes, 4] = thread
class HPCToolkitReader:
"""Read in the various sections of an HPCToolkit experiment.xml file and
metric-db files.
"""
def __init__(self, dir_name):
# this is the name of the HPCToolkit database directory. The directory
# contains an experiment.xml and some metric-db files
self.dir_name = dir_name
root = ET.parse(self.dir_name + "/experiment.xml").getroot()
self.loadmodule_table = next(root.iter("LoadModuleTable"))
self.file_table = next(root.iter("FileTable"))
self.procedure_table = next(root.iter("ProcedureTable"))
self.metricdb_table = next(root.iter("MetricDBTable"))
self.callpath_profile = next(root.iter("SecCallPathProfileData"))
# For a parallel run, there should be one metric-db file per MPI
# process
metricdb_files = glob.glob(self.dir_name + "/*.metric-db")
self.num_metricdb_files = len(metricdb_files)
# We need to know how many threads per rank there are. This counts the
# number of thread 0 metric-db files (i.e., number of ranks), then
# uses this as the divisor to the total number of metric-db files.
metricdb_numranks_files = glob.glob(self.dir_name + "/*-000-*.metric-db")
self.num_ranks = len(metricdb_numranks_files)
self.num_threads_per_rank = int(
self.num_metricdb_files / len(metricdb_numranks_files)
)
# Read one metric-db file to extract the number of nodes in the CCT
# and the number of metrics
with open(metricdb_files[0], "rb") as metricdb:
metricdb.read(18) # skip tag
metricdb.read(5) # skip version TODO: should we?
endian = metricdb.read(1)
if endian == b"b":
self.num_nodes = struct.unpack(">i", metricdb.read(4))[0]
self.num_metrics = struct.unpack(">i", metricdb.read(4))[0]
else:
raise ValueError(
"HPCToolkitReader doesn't support endian '%s'" % endian
)
self.load_modules = {}
self.src_files = {}
self.procedure_names = {}
self.metric_names = {}
# this list of dicts will hold all the node information such as
# procedure name, load module, filename, etc. for all the nodes
self.node_dicts = []
self.timer = Timer()
def fill_tables(self):
"""Read certain sections of the experiment.xml file to create dicts of load
modules, src_files, procedure_names, and metric_names.
"""
for loadm in (self.loadmodule_table).iter("LoadModule"):
self.load_modules[loadm.get("i")] = loadm.get("n")
for filename in (self.file_table).iter("File"):
self.src_files[filename.get("i")] = filename.get("n")
for procedure in (self.procedure_table).iter("Procedure"):
self.procedure_names[procedure.get("i")] = procedure.get("n")
for metric in (self.metricdb_table).iter("MetricDB"):
self.metric_names[metric.get("i")] = metric.get("n")
return (
self.load_modules,
self.src_files,
self.procedure_names,
self.metric_names,
)
def read_all_metricdb_files(self):
"""Read all the metric-db files and create a dataframe with num_nodes X
num_metricdb_files rows and num_metrics columns. Three additional columns
store the node id, MPI process rank, and thread id (if applicable).
"""
metricdb_files = glob.glob(self.dir_name + "/*.metric-db")
metricdb_files.sort()
# All the metric data per node and per process is read into the metrics
# array below. The three additional columns are for storing the implicit
# node id (nid), MPI process rank, and thread id (if applicable).
shape = [self.num_nodes * self.num_metricdb_files, self.num_metrics + 3]
size = int(np.prod(shape))
# shared memory buffer for multiprocessing
shared_buffer = mp.sharedctypes.RawArray("d", size)
pool = mp.Pool(initializer=init_shared_array, initargs=(shared_buffer,))
self.metrics = np.frombuffer(shared_buffer).reshape(shape)
args = [
(
filename,
self.num_nodes,
self.num_threads_per_rank,
self.num_metrics,
shape,
)
for filename in metricdb_files
]
try:
pool.map(read_metricdb_file, args)
finally:
pool.close()
# once all files have been read, create a dataframe of metrics
metric_names = [
self.metric_names[key] for key in sorted(self.metric_names.keys())
]
for idx, name in enumerate(metric_names):
if name == "CPUTIME (usec) (E)" or name == "CPUTIME (sec) (E)":
metric_names[idx] = "time"
if name == "CPUTIME (usec) (I)" or name == "CPUTIME (sec) (I)":
metric_names[idx] = "time (inc)"
self.metric_columns = metric_names
df_columns = self.metric_columns + ["nid", "rank", "thread"]
self.df_metrics = pd.DataFrame(self.metrics, columns=df_columns)
self.df_metrics["nid"] = self.df_metrics["nid"].astype(int, copy=False)
self.df_metrics["rank"] = self.df_metrics["rank"].astype(int, copy=False)
self.df_metrics["thread"] = self.df_metrics["thread"].astype(int, copy=False)
# if number of threads per rank is 1, we do not need to keep the thread ID column
if self.num_threads_per_rank == 1:
del self.df_metrics["thread"]
# used to speedup parse_xml_node
self.np_metrics = self.df_metrics[self.metric_columns].values
# getting the number of execution threads for our stride in
# subtract_exclusive_metric_vals/ num nodes is already calculated
self.total_execution_threads = self.num_threads_per_rank * self.num_ranks
def read(self):
"""Read the experiment.xml file to extract the calling context tree and create
a dataframe out of it. Then merge the two dataframes to create the final
dataframe.
Return:
(GraphFrame): new GraphFrame with HPCToolkit data.
"""
with self.timer.phase("fill tables"):
self.fill_tables()
with self.timer.phase("read metric db"):
self.read_all_metricdb_files()
list_roots = []
# parse the ElementTree to generate a calling context tree
for root in self.callpath_profile.findall("PF"):
global src_file
nid = int(root.get("i"))
src_file = root.get("f")
# start with the root and create the callpath and node for the root
# also a corresponding node_dict to be inserted into the dataframe
graph_root = Node(
Frame(
{"type": "function", "name": self.procedure_names[root.get("n")]}
),
None,
)
node_dict = self.create_node_dict(
nid,
graph_root,
self.procedure_names[root.get("n")],
"PF",
self.src_files[src_file],
int(root.get("l")),
self.load_modules[root.get("lm")],
)
self.node_dicts.append(node_dict)
list_roots.append(graph_root)
# start graph construction at the root
with self.timer.phase("graph construction"):
self.parse_xml_children(root, graph_root)
# put updated metrics back in dataframe
for i, column in enumerate(self.metric_columns):
if "(inc)" not in column:
self.df_metrics[column] = self.np_metrics.T[i]
with self.timer.phase("graph construction"):
graph = Graph(list_roots)
graph.enumerate_traverse()
# create a dataframe for all the nodes in the graph
self.df_nodes = | pd.DataFrame.from_dict(data=self.node_dicts) | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
from numba import njit
import pytest
from vectorbt import defaults
from vectorbt.utils import checks, config, decorators, math, array
from tests.utils import hash
# ############# config.py ############# #
class TestConfig:
def test_config(self):
conf = config.Config({'a': 0, 'b': {'c': 1}}, frozen=False)
conf['b']['d'] = 2
conf = config.Config({'a': 0, 'b': {'c': 1}}, frozen=True)
conf['a'] = 2
with pytest.raises(Exception) as e_info:
conf['d'] = 2
# go deeper
conf['b']['c'] = 2
with pytest.raises(Exception) as e_info:
conf['b']['d'] = 2
def test_merge_kwargs(self):
assert config.merge_kwargs({'a': 1}, {'b': 2}) == {'a': 1, 'b': 2}
assert config.merge_kwargs({'a': 1}, {'a': 2}) == {'a': 2}
assert config.merge_kwargs({'a': {'b': 2}}, {'a': {'c': 3}}) == {'a': {'b': 2, 'c': 3}}
assert config.merge_kwargs({'a': {'b': 2}}, {'a': {'b': 3}}) == {'a': {'b': 3}}
# ############# decorators.py ############# #
class TestDecorators:
def test_class_or_instancemethod(self):
class G:
@decorators.class_or_instancemethod
def g(self_or_cls):
if isinstance(self_or_cls, type):
return True # class
return False # instance
assert G.g()
assert not G().g()
def test_custom_property(self):
class G:
@decorators.custom_property(some='key')
def cache_me(self): return np.random.uniform()
assert 'some' in G.cache_me.kwargs
assert G.cache_me.kwargs['some'] == 'key'
def test_custom_method(self):
class G:
@decorators.custom_method(some='key')
def cache_me(self): return np.random.uniform()
assert 'some' in G.cache_me.kwargs
assert G.cache_me.kwargs['some'] == 'key'
def test_cached_property(self):
class G:
@decorators.cached_property(some='key')
def cache_me(self): return np.random.uniform()
assert 'some' in G.cache_me.kwargs
assert G.cache_me.kwargs['some'] == 'key'
class G:
@decorators.cached_property
def cache_me(self): return np.random.uniform()
g = G()
# general caching
cached_number = g.cache_me
assert g.cache_me == cached_number
# clear_cache method
G.cache_me.clear_cache(g)
cached_number2 = g.cache_me
assert cached_number2 != cached_number
assert g.cache_me == cached_number2
# disabled locally
G.cache_me.disabled = True
cached_number3 = g.cache_me
assert cached_number3 != cached_number2
assert g.cache_me != cached_number3
G.cache_me.disabled = False
# disabled globally
defaults.caching = False
cached_number4 = g.cache_me
assert cached_number4 != cached_number3
assert g.cache_me != cached_number4
defaults.caching = True
def test_cached_method(self):
class G:
@decorators.cached_method(some='key')
def cache_me(self): return np.random.uniform()
assert 'some' in G.cache_me.kwargs
assert G.cache_me.kwargs['some'] == 'key'
class G:
@decorators.cached_method
def cache_me(self, b=10): return np.random.uniform() * 10
g = G()
# general caching
cached_number = g.cache_me()
assert g.cache_me() == cached_number
# clear_cache method
G.cache_me.clear_cache(g)
cached_number2 = g.cache_me()
assert cached_number2 != cached_number
assert g.cache_me() == cached_number2
# disabled locally
G.cache_me.disabled = True
cached_number3 = g.cache_me()
assert cached_number3 != cached_number2
assert g.cache_me() != cached_number3
G.cache_me.disabled = False
# disabled globally
defaults.caching = False
cached_number4 = g.cache_me()
assert cached_number4 != cached_number3
assert g.cache_me() != cached_number4
defaults.caching = True
# disabled by non-hashable args
cached_number5 = g.cache_me(b=np.zeros(1))
assert cached_number5 != cached_number4
assert g.cache_me(b=np.zeros(1)) != cached_number5
def test_traverse_attr_kwargs(self):
class A:
@decorators.custom_property(some_key=0)
def a(self): pass
class B:
@decorators.cached_property(some_key=0, child_cls=A)
def a(self): pass
@decorators.custom_method(some_key=1)
def b(self): pass
class C:
@decorators.cached_method(some_key=0, child_cls=B)
def b(self): pass
@decorators.custom_property(some_key=1)
def c(self): pass
assert hash(str(decorators.traverse_attr_kwargs(C))) == 16728515581653529580
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key'))) == 16728515581653529580
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key', value=1))) == 703070484833749378
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key', value=(0, 1)))) == 16728515581653529580
# ############# checks.py ############# #
class TestChecks:
def test_is_pandas(self):
assert not checks.is_pandas(0)
assert not checks.is_pandas(np.array([0]))
assert checks.is_pandas(pd.Series([1, 2, 3]))
assert checks.is_pandas(pd.DataFrame([1, 2, 3]))
def test_is_series(self):
assert not checks.is_series(0)
assert not checks.is_series(np.array([0]))
assert checks.is_series(pd.Series([1, 2, 3]))
assert not checks.is_series(pd.DataFrame([1, 2, 3]))
def test_is_frame(self):
assert not checks.is_frame(0)
assert not checks.is_frame(np.array([0]))
assert not checks.is_frame(pd.Series([1, 2, 3]))
assert checks.is_frame(pd.DataFrame([1, 2, 3]))
def test_is_array(self):
assert not checks.is_array(0)
assert checks.is_array(np.array([0]))
assert checks.is_array(pd.Series([1, 2, 3]))
assert checks.is_array(pd.DataFrame([1, 2, 3]))
def test_is_numba_func(self):
def test_func(x):
return x
@njit
def test_func_nb(x):
return x
assert not checks.is_numba_func(test_func)
assert checks.is_numba_func(test_func_nb)
def test_is_hashable(self):
assert checks.is_hashable(2)
assert not checks.is_hashable(np.asarray(2))
def test_is_index_equal(self):
assert checks.is_index_equal(
pd.Index([0]),
pd.Index([0])
)
assert not checks.is_index_equal(
pd.Index([0]),
pd.Index([1])
)
assert not checks.is_index_equal(
pd.Index([0], name='name'),
pd.Index([0])
)
assert not checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]]),
pd.Index([0])
)
assert checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]]),
pd.MultiIndex.from_arrays([[0], [1]])
)
assert checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2']),
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2'])
)
assert not checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2']),
pd.MultiIndex.from_arrays([[0], [1]], names=['name3', 'name4'])
)
def test_is_default_index(self):
assert checks.is_default_index(pd.DataFrame([[1, 2, 3]]).columns)
assert checks.is_default_index(pd.Series([1, 2, 3]).to_frame().columns)
assert checks.is_default_index(pd.Index([0, 1, 2]))
assert not checks.is_default_index(pd.Index([0, 1, 2], name='name'))
def test_is_equal(self):
assert checks.is_equal(np.arange(3), np.arange(3), np.array_equal)
assert not checks.is_equal(np.arange(3), None, np.array_equal)
assert not checks.is_equal(None, np.arange(3), np.array_equal)
assert checks.is_equal(None, None, np.array_equal)
def test_assert_value_in(self):
checks.assert_value_in(0, (0, 1))
with pytest.raises(Exception) as e_info:
checks.assert_value_in(2, (0, 1))
def test_assert_numba_func(self):
def test_func(x):
return x
@njit
def test_func_nb(x):
return x
checks.assert_numba_func(test_func_nb)
with pytest.raises(Exception) as e_info:
checks.assert_numba_func(test_func)
def test_assert_not_none(self):
checks.assert_not_none(0)
with pytest.raises(Exception) as e_info:
checks.assert_not_none(None)
def test_assert_type(self):
checks.assert_type(0, int)
checks.assert_type(np.zeros(1), (np.ndarray, pd.Series))
checks.assert_type( | pd.Series([1, 2, 3]) | pandas.Series |
'''
Python reducer function
Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
'''
'''
Modified by <EMAIL> for AWS lambda map-reduce test.
This reducer function takes in multiple files which are mapper phase outputs , writes back to one parquet file in s3
'''
import boto3
import json
import random
import resource
from io import StringIO
import time
import awswrangler as wr
import pandas as pd
# create an S3 & Dynamo session
s3 = boto3.resource('s3')
s3_client = boto3.client('s3')
# constants
TASK_MAPPER_PREFIX = "task/mapper/";
TASK_REDUCER_PREFIX = "task/reducer/";
def write_to_s3(bucket, key, data, metadata):
# Write to S3 Bucket
s3.Bucket(bucket).put_object(Key=key, Body=data, Metadata=metadata)
def write_pandas_parquet_to_s3(df, bucketName,fname, keyName):
path = "s3://" + str(bucketName) + "/parquet/" + str(keyName)
# dummy dataframe
wr.s3.to_parquet(
df=df,
path=path,
compression='gzip'
)
def lambda_handler(event, context):
start_time = time.time()
job_bucket = event['jobBucket']
bucket = event['bucket']
reducer_keys = event['keys']
job_id = event['jobId']
r_id = event['reducerId']
step_id = event['stepId']
n_reducers = event['nReducers']
# aggr
results = {}
line_count = 0
final_df = pd.DataFrame()
# INPUT CSV => OUTPUT PARQUET
# Download and process all keys
for key in reducer_keys:
response = s3_client.get_object(Bucket=job_bucket, Key=key)
contents = response['Body'].read().decode('utf-8')
data = contents.split('\n')[1:-1]
df = pd.DataFrame(data,columns=['row'])
#print(df.shape)
df[['row_number','VendorID','tpep_pickup_datetime','tpep_dropoff_datetime','passenger_count','trip_distance','pickup_longitude','pickup_latitude','RatecodeID','store_and_fwd_flag','dropoff_longitude','dropoff_latitude','payment_type','fare_amount','extra','mta_tax','tip_amount','tolls_amount','improvement_surcharge','total_amount','LocationID']] = df.row.str.split(",",expand=True)
df.drop(['row','row_number'],axis=1,inplace=True)
#type convert
df['VendorID'] = pd.to_numeric(df['VendorID'])
df['passenger_count'] = pd.to_numeric(df['passenger_count'])
df['trip_distance'] = pd.to_numeric(df['trip_distance'])
df['pickup_latitude'] = pd.to_numeric(df['pickup_latitude'])
df['pickup_longitude'] = pd.to_numeric(df['pickup_longitude'])
df['RatecodeID'] = pd.to_numeric(df['RatecodeID'])
df['dropoff_longitude'] = pd.to_numeric(df['dropoff_longitude'])
df['dropoff_latitude'] = pd.to_numeric(df['dropoff_latitude'])
df['fare_amount'] = pd.to_numeric(df['fare_amount'])
df['extra'] = pd.to_numeric(df['extra'])
df['mta_tax'] = pd.to_numeric(df['mta_tax'])
df['tip_amount'] = pd.to_numeric(df['tip_amount'])
df['tolls_amount'] = | pd.to_numeric(df['tolls_amount']) | pandas.to_numeric |
# Copyright (C) 2014-2017 <NAME>, <NAME>, <NAME>, <NAME> (in alphabetic order)
#
# This file is part of OpenModal.
#
# OpenModal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# OpenModal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenModal. If not, see <http://www.gnu.org/licenses/>.
'''
Created on 20. maj 2014
TODO: That mnums in the beginning of every function, thats bad!
TODO: Alot of refactoring.
TODO: Put tables in a dictionary, that way you have a nice overview
of what is inside and also is much better :)
@author: Matjaz
'''
import time
import os
import itertools
from datetime import datetime
import pandas as pd
from pandas import ExcelWriter
from OpenModal.anim_tools import zyx_euler_to_rotation_matrix
import numpy as np
import pyuff
import OpenModal.utils as ut
# import _transformations as tr
# Uff fields definitions (human-readable).
types = dict()
types[15] = 'Geometry'
types[82] = 'Lines'
types[151] = 'Header'
types[2411] = 'Geometry'
types[164] = 'Units'
types[58] = 'Measurement'
types[55] = 'Analysis'
types[2420] = 'Coor. sys.'
types[18] = 'Coor. sys.'
# Function type definition.
FUNCTION_TYPE = dict()
FUNCTION_TYPE['General'] = 0 # also: unknown
FUNCTION_TYPE['Time Response'] = 1
FUNCTION_TYPE['Auto Spectrum'] = 2
FUNCTION_TYPE['Cross Spectrum'] = 3
FUNCTION_TYPE['Frequency Response Function'] = 4
FUNCTION_TYPE['Transmissibility'] = 5
FUNCTION_TYPE['Coherence'] = 6
FUNCTION_TYPE['Auto Correlation'] = 7
FUNCTION_TYPE['Cross Correlation'] = 8
FUNCTION_TYPE['Power Spectral Density (PSD)'] = 9
FUNCTION_TYPE['Energy Spectral Density (ESD)'] = 10
FUNCTION_TYPE['Probability Density Function'] = 11
FUNCTION_TYPE['Spectrum'] = 12
FUNCTION_TYPE['Cumulative Frequency Distribution'] = 13
FUNCTION_TYPE['Peaks Valley'] = 14
FUNCTION_TYPE['Stress/Cycles'] = 15
FUNCTION_TYPE['Strain/Cycles'] = 16
FUNCTION_TYPE['Orbit'] = 17
FUNCTION_TYPE['Mode Indicator Function'] = 18
FUNCTION_TYPE['Force Pattern'] = 19
FUNCTION_TYPE['Partial Power'] = 20
FUNCTION_TYPE['Partial Coherence'] = 21
FUNCTION_TYPE['Eigenvalue'] = 22
FUNCTION_TYPE['Eigenvector'] = 23
FUNCTION_TYPE['Shock Response Spectrum'] = 24
FUNCTION_TYPE['Finite Impulse Response Filter'] = 25
FUNCTION_TYPE['Multiple Coherence'] = 26
FUNCTION_TYPE['Order Function'] = 27
FUNCTION_TYPE['Phase Compensation'] = 28
# Specific data type for abscisa/ordinate
SPECIFIC_DATA_TYPE = dict()
SPECIFIC_DATA_TYPE['unknown'] = 0
SPECIFIC_DATA_TYPE['general'] = 1
SPECIFIC_DATA_TYPE['stress'] = 2
SPECIFIC_DATA_TYPE['strain'] = 3
SPECIFIC_DATA_TYPE['temperature'] = 5
SPECIFIC_DATA_TYPE['heat flux'] = 6
SPECIFIC_DATA_TYPE['displacement'] = 8
SPECIFIC_DATA_TYPE['reaction force'] = 9
SPECIFIC_DATA_TYPE['velocity'] = 11
SPECIFIC_DATA_TYPE['acceleration'] = 12
SPECIFIC_DATA_TYPE['excitation force'] = 13
SPECIFIC_DATA_TYPE['pressure'] = 15
SPECIFIC_DATA_TYPE['mass'] = 16
SPECIFIC_DATA_TYPE['time'] = 17
SPECIFIC_DATA_TYPE['frequency'] = 18
SPECIFIC_DATA_TYPE['rpm'] = 19
SPECIFIC_DATA_TYPE['order'] = 20
SPECIFIC_DATA_TYPE['sound pressure'] = 21
SPECIFIC_DATA_TYPE['sound intensity'] = 22
SPECIFIC_DATA_TYPE['sound power'] = 23
# TODO: Fast get and set. Check setting with enlargement.
class ModalData(object):
"""The data object holds all measurement, results and geometry data
"""
def __init__(self):
"""
Constructor
"""
self.create_empty()
def create_empty(self):
"""Create an empty data container."""
# Tables
self.tables = dict()
# Holds the tables, populated by importing a uff file.
# TODO: This is temporary? Maybe, maybe not, might be
# a good idea to have some reference of imported data!
self.uff_import_tables = dict()
self.create_info_table()
self.create_geometry_table()
self.create_measurement_table()
self.create_analysis_table()
self.create_lines_table()
self.create_elements_table()
# Set model id
self.model_id = 0
def create_info_table(self):
"""Creates an empty info table."""
self.tables['info'] = pd.DataFrame(columns=['model_id', 'model_name', 'description', 'units_code', 'length',
'force', 'temp', 'temp_offset'])
# self.tables['info'] = pd.DataFrame(columns=['model_id', 'uffid', 'value'])
def create_geometry_table(self):
"""Creates an empty geometry table."""
self.tables['geometry'] = pd.DataFrame(columns=['model_id', 'uffid', 'node_nums',
'x', 'y', 'z', 'thx', 'thy', 'thz',
'disp_cs', 'def_cs',
'color','clr_r','clr_g','clr_b','clr_a',
'r','phi','cyl_thz'])
def create_measurement_table(self):
"""Creates an empty measurement table."""
self.tables['measurement_index'] = pd.DataFrame(columns=['model_id', 'measurement_id', 'uffid', 'field_type', 'excitation_type',
'func_type', 'rsp_node', 'rsp_dir', 'ref_node',
'ref_dir', 'abscissa_spec_data_type',
'ordinate_spec_data_type', 'orddenom_spec_data_type', 'zero_padding'], dtype=int)
self.tables['measurement_values'] = pd.DataFrame(columns=['model_id', 'measurement_id', 'frq', 'amp'])
self.tables['measurement_values'].amp = self.tables['measurement_values'].amp.astype('complex')
self.tables['measurement_values_td'] = pd.DataFrame(columns=['model_id', 'measurement_id', 'n_avg', 'x_axis',
'excitation', 'response'])
def create_analysis_table(self):
"""Creates an empty analysis table."""
self.tables['analysis_index'] = pd.DataFrame(columns=['model_id', 'analysis_id', 'analysis_method', 'uffid',
'field_type', 'analysis_type', 'data_ch',
'spec_data_type', 'load_case', 'mode_n', 'eig', 'freq',
'freq_step_n', 'node_nums', 'rsp_node', 'rsp_dir',
'ref_node', 'ref_dir', 'data_type', 'ref_node', 'ref_dir',
'data_type', 'eig_real','eig_xi', 'spots'])
self.tables['analysis_values'] = pd.DataFrame(columns=['model_id', 'analysis_id', 'analysis_method', 'mode_n',
'node_nums', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6'])
self.tables['analysis_settings'] = pd.DataFrame(columns=['model_id', 'analysis_id', 'analysis_method',
'f_min','f_max', 'nmax', 'err_fn', 'err_xi', ])
self.tables['analysis_stabilisation'] = pd.DataFrame(columns=['model_id', 'analysis_id', 'analysis_method',
'pos', 'size', 'pen_color', 'pen_width',
'symbol', 'brush', 'damp'])
self.tables['analysis_index'].eig = self.tables['analysis_index'].eig.astype('complex')
self.tables['analysis_values'].r1 = self.tables['analysis_values'].r1.astype('complex')
self.tables['analysis_values'].r2 = self.tables['analysis_values'].r2.astype('complex')
self.tables['analysis_values'].r3 = self.tables['analysis_values'].r3.astype('complex')
self.tables['analysis_values'].r4 = self.tables['analysis_values'].r4.astype('complex')
self.tables['analysis_values'].r5 = self.tables['analysis_values'].r5.astype('complex')
self.tables['analysis_values'].r6 = self.tables['analysis_values'].r6.astype('complex')
def create_lines_table(self):
"""Creates an empty lines table."""
self.tables['lines'] = pd.DataFrame(['model_id', 'uffid', 'id', 'field_type', 'trace_num',
'color', 'n_nodes', 'trace_id', 'pos', 'node'])
def create_elements_table(self):
"""Creates an empty elements table."""
# TODO: Missing 'physical property table number' and 'material property ...'
# TODO: Missing 'fe descriptor id', chosen from a list of 232(!) types!!?
# TODO: Missing beam support.
self.tables['elements_index'] = pd.DataFrame(columns=['model_id', 'element_id', 'element_descriptor', 'color',
'nr_of_nodes','clr_r','clr_g','clr_b','clr_a'])
self.tables['elements_values'] = pd.DataFrame(columns=['model_id', 'element_id', 'node_id', 'node_pos'])
def new_model(self, model_id=-1, entries=dict()):
"""Set new model id. Values can be set through entries dictionary, for each
value left unset, default will be used."""
if model_id == -1:
# Create a new model_id. First check if table is empty.
current_models = self.tables['info'].model_id
if current_models.size == 0:
model_id = 0
else:
model_id = current_models.max() + 1
fields = {'db_app': 'ModalData', 'time_db_created': time.strftime("%d-%b-%y %H:%M:%S"),
'time_db_saved': time.strftime("%d-%b-%y %H:%M:%S"), 'program': 'OpenModal',
'model_name': 'DefaultName', 'description': 'DefaultDecription', 'units_code': 9,
'temp': 1, 'temp_mode': 1, 'temp_offset': 1, 'length': 1, 'force': 1,
'units_description': 'User unit system'}
for key in entries:
fields[key] = entries[key]
# TODO: Check if model_id already exists.
input = [model_id, fields['model_name'], fields['description'], fields['units_code'], fields['length'],
fields['force'], fields['temp'], fields['temp_offset']]
new_model = pd.DataFrame([input], columns=['model_id', 'model_name', 'description', 'units_code', 'length',
'force', 'temp', 'temp_offset'])
self.tables['info'] = pd.concat([self.tables['info'], new_model], ignore_index=True)
return model_id
def new_measurement(self, model_id, excitation_type, frequency, h, reference=[0, 0], response=[0, 0],
function_type='Frequency Response Function', abscissa='frequency', ordinate='acceleration',
denominator='excitation force', zero_padding=0, td_x_axis=np.array([]),
td_excitation=None, td_response=None):
"""Add a new measurement."""
# Check if model id exists.
if self.tables['info'].model_id.size == 0:
raise ValueError
elif not any(self.tables['info'].model_id == model_id):
raise ValueError
# Prepare a new measurement_id.
if self.tables['measurement_index'].measurement_id.size == 0:
measurement_id = 0
else:
measurement_id = self.tables['measurement_index'].measurement_id.max() + 1
newentry_idx = pd.DataFrame([[model_id, measurement_id, excitation_type, FUNCTION_TYPE[function_type], response[0],
response[1], reference[0], reference[1], SPECIFIC_DATA_TYPE[abscissa],
SPECIFIC_DATA_TYPE[ordinate], SPECIFIC_DATA_TYPE[denominator], zero_padding]],
columns=['model_id', 'measurement_id', 'excitation_type', 'func_type', 'rsp_node', 'rsp_dir',
'ref_node', 'ref_dir', 'abscissa_spec_data_type',
'ordinate_spec_data_type', 'orddenom_spec_data_type', 'zero_padding'])
self.tables['measurement_index'] = pd.concat([ self.tables['measurement_index'],
newentry_idx], ignore_index=True)
# Add entry with measured frf.
newentry_val = pd.DataFrame(columns=['model_id', 'measurement_id', 'frq', 'amp'])
newentry_val['frq'] = frequency
newentry_val['amp'] = h
newentry_val['model_id'] = model_id
newentry_val['measurement_id'] = measurement_id
self.tables['measurement_values'] = pd.concat([self.tables['measurement_values'],
newentry_val], ignore_index=True)
# if td_x_axis.size > 0:
# # TODO: Create it with size you already know. Should be faster?
# newentry_val_td = pd.DataFrame(columns=['model_id', 'measurement_id', 'x_axis', 'excitation', 'response'])
# newentry_val_td['x_axis'] = td_x_axis
# newentry_val_td['excitation'] = td_excitation
# newentry_val_td['response'] = td_response
# newentry_val_td['model_id'] = model_id
# newentry_val_td['measurement_id'] = measurement_id
#
# self.tables['measurement_values_td'] = pd.concat([self.tables['measurement_values_td'], newentry_val_td],
# ignore_index=True)
if td_x_axis.size > 0:
n_averages = len(td_response)
i = 0
# TODO: Optimize here.
for td_excitation_i, td_response_i in zip(td_excitation, td_response):
# TODO: Create it with size you already know. Should be faster?
newentry_val_td = pd.DataFrame(columns=['model_id', 'measurement_id', 'n_avg',
'x_axis', 'excitation', 'response'])
newentry_val_td['x_axis'] = td_x_axis
newentry_val_td['excitation'] = td_excitation_i
newentry_val_td['response'] = td_response_i
newentry_val_td['model_id'] = model_id
newentry_val_td['measurement_id'] = measurement_id
newentry_val_td['n_avg'] = i
i += 1
self.tables['measurement_values_td'] = pd.concat([self.tables['measurement_values_td'], newentry_val_td],
ignore_index=True)
def remove_model(self, model_id):
"""Remove all data connected to the supplied model id."""
try:
el_idx = self.tables['elements_index']
el_vals = self.tables['elements_values']
elements_id = el_idx[el_idx.model_id == model_id].element_id
self.tables['elements_values'] = self.tables['elements_values'][~el_vals.element_id.isin(elements_id)]
self.tables['elements_index'] = self.tables['elements_index'][el_idx.model_id != model_id]
except AttributeError:
print('There is no element data to delete.')
try:
lines = self.tables['lines']
self.tables['lines'] = self.tables['lines'][lines.model_id != model_id]
except AttributeError:
print('There is no line data to delete.')
try:
an_idx = self.tables['analysis_index']
an_vals = self.tables['analysis_values']
analysis_id = an_idx[an_idx.model_id == model_id].analysis_id
self.tables['analysis_values'] = self.tables['analysis_values'][~an_vals.element_id.isin(analysis_id)]
self.tables['analysis_index'] = self.tables['analysis_index'][an_idx.model_id != model_id]
except AttributeError:
print('There is no analysis data to delete.')
try:
me_idx = self.tables['measurement_index']
me_vals = self.tables['measurement_values']
me_vals_td = self.tables['measurement_values_td']
measurement_id = me_idx[me_idx.model_id == model_id].measurement_id
self.tables['measurement_values_td'] = self.tables['measurement_values_td'][~me_vals_td.measurement_id.isin(measurement_id)]
self.tables['measurement_values'] = self.tables['measurement_values'][~me_vals.measurement_id.isin(measurement_id)]
self.tables['measurement_index'] = self.tables['measurement_index'][me_idx.model_id != model_id]
except AttributeError:
print('There is no measurement data to delete.')
try:
geometry = self.tables['geometry']
self.tables['geometry'] = self.tables['geometry'][geometry.model_id != model_id]
except AttributeError:
print('There is no geometry data to delete.')
try:
info = self.tables['info']
self.tables['info'] = self.tables['info'][info.model_id != model_id]
except AttributeError:
print('There is no info data to delete.')
def import_uff(self, fname):
"""Pull data from uff."""
# Make sure you start with new model ids at the appropriate index.
if self.tables['info'].model_id.size > 0:
base_key = self.tables['info'].model_id.max() + 1
else:
base_key=0
uffdata = ModalDataUff(fname, base_key=base_key)
for key in self.tables.keys():
if key in uffdata.tables:
# uffdata.tables[key].model_id += 100
self.tables[key] = pd.concat([self.tables[key], uffdata.tables[key]], ignore_index=True)
self.uff_import_tables[key] = ''
self.file_structure = uffdata.file_structure
def export_to_uff(self, fname, model_ids=[], data_types=[], separate_files_flag=False):
"""Export data to uff."""
model_ids = self.tables['info'].model_id.unique()
if len(model_ids) == 0:
model_ids = self.tables['info'].model_id.unique()
if len(data_types) == 0:
data_types = ['nodes', 'lines', 'elements', 'measurements', 'analyses']
if len(model_ids) == 0:
print('Warning: Empty tables. (No model_ids found).')
return False
t = datetime.now()
folder_timestamp = 'OpenModal Export UFF -- {:%Y %d-%m %H-%M-%S}'.format(t)
export_folder = os.path.join(fname, folder_timestamp)
try:
os.mkdir(export_folder)
except:
print('Warning: File exists. Try again later ...')
return False
for model_id in model_ids:
# -- Write info.
dfi = self.tables['info']
dfi = dfi[dfi.model_id == model_id]
# TODO: Do not overwrite this dfi
model_name = dfi.model_name.values[0]
if not separate_files_flag:
uffwrite=pyuff.UFF(os.path.join(export_folder, '{0}_{1:.0f}.uff'.format(model_name, model_id)))
if len(dfi) != 0:
dset_info = {'db_app': 'modaldata v1',
'model_name': dfi.model_name.values[0],
'description': dfi.description.values[0],
'program': 'Open Modal'}
dset_units = {'units_code': dfi.units_code.values[0],
# TODO: Maybe implement other data.
# 'units_description': dfi.units_description,
# 'temp_mode': dfi.temp_mode,
'length': dfi.length.values[0],
'force': dfi.force.values[0],
'temp': dfi.temp.values[0],
'temp_offset': dfi.temp_offset.values[0]}
# for key in dset_info.keys():
# dset_info[key] = dset_info[key].value.values[0]
dset_info['type'] = 151
# for key in dset_units.keys():
# dset_units[key] = dset_units[key].value.values[0]
dset_units['type'] = 164
if separate_files_flag:
uffwrite=pyuff.UFF(os.path.join(export_folder, '{0}_{1:.0f}_info.uff'.format(model_name, model_id)))
uffwrite._write_set(dset_info, mode='add')
uffwrite._write_set(dset_units, mode='add')
# -- Write Geometry.
if 'nodes' in data_types:
dfg = self.tables['geometry']
#dfg = dfg[dfg.model_id==model_id]
#drop nan lines defined in geometry
model_id_mask=dfg.model_id==model_id
nan_mask = dfg[['node_nums','x', 'y', 'z','thz', 'thy', 'thx' , 'model_id']].notnull().all(axis=1)
comb_mask = model_id_mask & nan_mask
dfg = dfg[comb_mask]
if len(dfg) != 0:
# .. First the coordinate systems. Mind the order of angles (ZYX)
size = len(dfg)
local_cs = np.zeros((size * 4, 3), dtype=float)
th_angles = dfg[['thz', 'thy', 'thx']].values
for i in range(size):
#local_cs[i*4:i*4+3, :] = ut.zyx_euler_to_rotation_matrix(th_angles[i, :])
local_cs[i*4:i*4+3, :] = zyx_euler_to_rotation_matrix(th_angles[i, :]*np.pi/180.)
local_cs[i*4+3, :] = 0.0
dset_cs = {'local_cs': local_cs, 'nodes': dfg[['node_nums']].values, 'type': 2420}
uffwrite._write_set(dset_cs, mode='add')
# .. Then points.
dset_geometry = {'grid_global': dfg[['node_nums', 'x', 'y', 'z']].values,
'export_cs_number': 0,
'cs_color': 8,
'type': 2411}
if separate_files_flag:
uffwrite=pyuff.UFF(os.path.join(export_folder, '{0}_{1:.0f}_nodes.uff'.format(model_name, model_id)))
uffwrite._write_set(dset_geometry, mode='add')
# -- Write Measurements.
if 'measurements' in data_types:
dfi = self.tables['measurement_index']
dfi = dfi[dfi.model_id == model_id]
dfi.field_type = 58
if len(dfi) != 0:
dfv = self.tables['measurement_values']
dfv = dfv[dfv.model_id == model_id]
for id, measurement in dfi.iterrows():
data = dfv[dfv.measurement_id == measurement.measurement_id]
dsets={'type': measurement['field_type'],
'func_type': measurement['func_type'],
'data': data['amp'].values.astype('complex'),
'x': data['frq'].values,
'rsp_node': measurement['rsp_node'],
'rsp_dir': measurement['rsp_dir'],
'ref_node': measurement['ref_node'],
'ref_dir': measurement['ref_dir'],
'rsp_ent_name':model_name, 'ref_ent_name':model_name}
# TODO: Make rsp_ent_name and ref_ent_name fields in measurement_index table.
if pd.isnull(measurement['abscissa_spec_data_type']):
dsets['abscissa_spec_data_type'] = 0
else:
dsets['abscissa_spec_data_type'] = measurement['abscissa_spec_data_type']
if pd.isnull(measurement['ordinate_spec_data_type']):
dsets['ordinate_spec_data_type'] = 0
else:
dsets['ordinate_spec_data_type'] = measurement['ordinate_spec_data_type']
if pd.isnull(measurement['orddenom_spec_data_type']):
dsets['orddenom_spec_data_type'] = 0
else:
dsets['orddenom_spec_data_type'] = measurement['orddenom_spec_data_type']
if separate_files_flag:
uffwrite=pyuff.UFF(os.path.join(export_folder, '{0}_{1:.0f}_measurements.uff'.format(model_name, model_id)))
uffwrite._write_set(dsets, mode='add')
def export_to_csv(self, fname, model_ids=[], data_types=[]):
"""Export data to uff."""
if len(model_ids) == 0:
model_ids = self.tables['info'].model_id.unique()
if len(data_types) == 0:
data_types = ['nodes', 'lines', 'elements', 'measurements', 'analyses']
if len(model_ids) == 0:
print('Warning: Empty tables. (No model_ids found).')
return False
t = datetime.now()
folder_timestamp = 'OpenModal Export CSV -- {:%Y %d-%m %H-%M-%S}'.format(t)
export_folder = os.path.join(fname, folder_timestamp)
try:
os.mkdir(export_folder)
except:
print('Warning: File exists. Try again later ...')
return False
for model_id in model_ids:
# -- Write info.
dfi = self.tables['info']
dfi = dfi[dfi.model_id == model_id]
model_name = '{0}_{1:.0f}'.format(dfi.model_name.values[0], model_id)
model_dir = os.path.join(export_folder, model_name)
os.mkdir(model_dir)
df_ = self.tables['info']
df_[df_.model_id == model_id].to_csv(os.path.join(model_dir, 'info.csv'))
if 'nodes' in data_types:
df_ = self.tables['geometry']
df_[df_.model_id == model_id].to_csv(os.path.join(model_dir, 'geometry.csv'))
# -- Special treatment for measurements
if 'measurements' in data_types:
measurements_dir = os.path.join(model_dir, 'measurements')
os.mkdir(measurements_dir)
df_ = self.tables['measurement_index']
df_[df_.model_id == model_id].to_csv(os.path.join(measurements_dir, 'measurements_index.csv'))
df_ = self.tables['measurement_values']
grouped_measurements = df_[df_.model_id == model_id].groupby('measurement_id')
for id, measurement in grouped_measurements:
measurement['amp_real'] = measurement.amp.real
measurement['amp_imag'] = measurement.amp.imag
measurement[['frq', 'amp_real', 'amp_imag']].to_csv(os.path.join(measurements_dir,
'measurement_{0:.0f}.csv'.format(id)),
index=False)
class ModalDataUff(object):
'''
Reads the uff file and populates the following pandas tables:
-- ModalData.measurement_index : index of all measurements from field 58
-- ModalData.geometry : index of all points with CS from fields 2411 and 15
-- ModalData.info : info about measurements
Based on the position of field in the uff file, uffid is assigned to each field in the following
maner: first field, uffid = 0, second field, uffid = 1 and so on. Columns are named based on keys
from the UFF class if possible. Fields uffid and field_type (type of field, eg. 58) are added.
Geometry table combines nodes and their respective CSs, column names are altered.
'''
def __init__(self, fname='../../unvread/data/shield.uff', base_key=0):
'''
Constructor
'''
self.uff_object = pyuff.UFF(fname)
# Start above base_key.
self.base_key = base_key
self.uff_types = self.uff_object.get_set_types()
# print(self.uff_types)
# Models
self.models = dict()
# Tables
self.tables = dict()
# Coordinate-system tables
self.localcs = pd.DataFrame(columns=['model_id', 'uffidcs', 'node_nums', 'x1', 'x2', 'x3',
'y1', 'y2', 'y3',
'z1', 'z2', 'z3'])
self.localeul = pd.DataFrame(columns=['model_id', 'uffidcs', 'node_nums', 'thx', 'thy', 'thz'])
# File structure.
self.file_structure = ['%5d %-10s' % (field, types[field]) for field in self.uff_types]
self.create_model()
def create_model(self):
"""Scans the uff file and creates a model from
geometries and data, which is then populated. The models
are grouped based on the field 151!"""
# -- Scan geometries, each geometry is one model.
mnums = list(np.nonzero(self.uff_types==151)[0])
if len(mnums) == 0:
mnums = list(np.nonzero(self.uff_types==164)[0])
# -- What if there is no geometry? Only one model then I guess ...
if len(mnums) == 0:
print('Warning: There is no INFO or UNITS field!')
self.models[0] = range(len(self.uff_types))
# .. TODO: You have to pass this warning on.
else:
# .. Define intervals, by sequential order, for each model.
for model_id, num in enumerate(mnums):
if model_id == (len(mnums)-1):
self.models[model_id] = range(num, len(self.uff_types))
else:
# .. Last model has special treatment ([x:] instead of [x:y])
self.models[model_id] = range(num, mnums[model_id+1])
for model_id, model in self.models.items():
self.populate_model(model_id+self.base_key, model)
# print(self.models)
# print(self.uff_types)
def populate_model(self, model_id, model):
"""Read all data for each model."""
model = list(model)
self.gen_measurement_table(model_id, model)
self.gen_geometry_table(model_id, model)
self.gen_analysis_table(model_id, model)
self.gen_lines_table(model_id, model)
self.gen_info_table(model_id, model)
# .. TODO: Here is the place to check for connections between
# fields, other than by sequential order. Check if LMS
# writes anything. (It does not!)
def gen_measurement_table(self, model_id, model):
"""Read measurements."""
mnums = np.nonzero(self.uff_types[model] == 58)[0]
mnums += model[0]
if len(mnums) == 0:
return False
mlist = []
#dlist = pd.DataFrame()
# .. Create field list.
sdata = self.uff_object.read_sets(mnums[0])
fields = ['model_id', 'measurement_id', 'uffid', 'field_type']
fields.extend([key for key in sdata.keys() if not ('x' in key or 'data' in key)])
concat_list = []
for mnum in list(mnums):
dlist_ = pd.DataFrame()
sdata = self.uff_object.read_sets(mnum)
# .. Setup a new line in measurement index table.
line = [model_id, mnum, mnum, 58]
line.extend([sdata[key] for key in fields if not ('uffid' in key or 'field_type' in key or 'model_id' in key or 'measurement_id' in key)])
mlist.append(line)
# TODO: Uredi podporo za kompleksne vrednosti tukaj. NE štima še čist!
dlist_['frq'] = sdata['x']
dlist_['amp'] = sdata['data']
dlist_['amp'] = dlist_['amp'].astype('complex')
dlist_['amp'] = sdata['data']
dlist_['uffid'] = mnum
dlist_['measurement_id'] = mnum
dlist_['model_id'] = model_id
concat_list.append(dlist_)
dlist = pd.concat(concat_list, ignore_index=True)
concat_list = []
if 'measurement_index' in self.tables:
self.tables['measurement_index'] = pd.concat([self.tables['measurement_index'], pd.DataFrame(mlist, columns=fields)], ignore_index=True)
self.tables['measurement_values'] = | pd.concat([self.tables['measurement_values'], dlist], ignore_index=True) | pandas.concat |
import pandas as pd
import re
from bs4 import BeautifulSoup
import os.path
def main():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
#filepath = input('txt file: ')
filepath = 'goodreads.txt'
html_path = os.path.join(BASE_DIR, filepath)
with open(html_path, encoding='utf-8') as namafile:
soup = BeautifulSoup(namafile.read(), features="html.parser")
judul = soup.findAll('td',{'class':'field title'})
penulis = soup.findAll('td',{'class':'field author'})
tanggal = soup.findAll('td',{'class':'field date_read'})
title = []
author = []
date = []
for baris in judul:
title.append(baris.find('a').get_text().strip())
for baris in penulis:
full = re.split(',',baris.find('a').get_text().strip())
try: fullname = full[1].strip()+' '+full[0]
except: fullname = baris.find('a').get_text().strip()
author.append(fullname)
for baris in tanggal:
date.append(baris.find('span').get_text())
tabel = {'Title':title, 'Author':author, 'Status':'Finished', 'Progress':'', 'Book type':'', 'Highlight':'','Year':date}
df = | pd.DataFrame(tabel) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Copy of Lab4Rn.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Okb2MBZEdgXtPXNqA-zqZkXR5dfNqz28
"""
# !pip install wikipedia
import wikipedia
wikipedia.set_lang("en")
corpus = []
def search(topic):
summaries = []
pages = wikipedia.search(topic)
for page in pages:
content = wikipedia.summary(page)
summaries.append(content)
return summaries
topics = ["travel", "music", "food", "technology", "finance", "law", "politics", "car"]
corpus = {}
for t in topics:
try:
summaries = search(t)
if t in corpus:
corpus[t].extend(summaries)
else:
corpus[t] = summaries
except:
pass
corpus
finalCorpus = []
topics = []
for a, b in corpus.items():
print(a)
print(b)
topics.append(a)
finalCorpus.extend(b)
data = finalCorpus
finalCorpus
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
from sklearn.decomposition import NMF, LatentDirichletAllocation, TruncatedSVD
from sklearn.feature_extraction.text import CountVectorizer
NUM_TOPICS = 3
vectorizer = CountVectorizer(min_df=5, max_df=0.9,
stop_words='english', lowercase=True,
token_pattern='[a-zA-Z\-][a-zA-Z\-]{2,}')
data_vectorized = vectorizer.fit_transform(data)
# Build a Latent Dirichlet Allocation Model
lda_model = LatentDirichletAllocation(n_components=NUM_TOPICS, max_iter=10, learning_method='online')
lda_Z = lda_model.fit_transform(data_vectorized)
print(lda_Z.shape) # (NO_DOCUMENTS, NO_TOPICS)
# Build a Non-Negative Matrix Factorization Model
nmf_model = NMF(n_components=NUM_TOPICS)
nmf_Z = nmf_model.fit_transform(data_vectorized)
print(nmf_Z.shape) # (NO_DOCUMENTS, NO_TOPICS)
# Build a Latent Semantic Indexing Model
lsi_model = TruncatedSVD(n_components=NUM_TOPICS)
lsi_Z = lsi_model.fit_transform(data_vectorized)
print(lsi_Z.shape) # (NO_DOCUMENTS, NO_TOPICS)
# Let's see how the first document in the corpus looks like in different topic spaces
print(lda_Z[0])
print(nmf_Z[0])
print(lsi_Z[0])
def print_topics(model, vectorizer, top_n=10):
for idx, topic in enumerate(model.components_):
print("Topic %d:" % (idx))
print([(vectorizer.get_feature_names()[i], topic[i])
for i in topic.argsort()[:-top_n - 1:-1]])
print("LDA Model:")
print_topics(lda_model, vectorizer)
print("=" * 50)
print("NMF Model:")
print_topics(nmf_model, vectorizer)
print("=" * 50)
print("LSI Model:")
print_topics(lsi_model, vectorizer)
print("=" * 50)
print(topics)
import pandas as pd
from bokeh.io import push_notebook, show, output_notebook
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, LabelSet
output_notebook()
svd = TruncatedSVD(n_components=2)
documents_2d = svd.fit_transform(data_vectorized)
df = | pd.DataFrame(columns=['x', 'y', 'document']) | pandas.DataFrame |
"""Main module."""
import csv, json, pandas as pd
import os, sys, requests, datetime, time
import zipfile, io
import lxml.html as lhtml
import lxml.html.clean as lhtmlclean
import warnings
from pandas.core.common import SettingWithCopyWarning
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
class QualClient:
"""
QualClient is a python wrapper the provides convenient access to data
exports directly from Qualtrics into Pandas for further manipulation.
The client in intiated with an API Token, and API URL
It provides 3 Primary functions-
QualClient.pull_survey_meta():
Pulls down a complete list of your surveys and addtional parameters
such as isActive, Creation Date, Mod Date, Name, and IDs
QualClient.pull_definition(survey_id):
survey_id : str
Takes the supplied survey_id and returns a df with the
survey's defintion info, which identifies things like the
questions asked, question text, question order, and IDs
QualClient.pull_results(survey_id):
survey_id : str
Take the supplied survey_id and returns a df of all of the responses
to the survey, with both the raw text and encoding of the response.
This functionalty actually downloads and unzips files from Qualtrics, so be
aware that it might take a moment to return the finalized data.
DF takes the shape of a long table with one response per row.
Example Usage:
client = QualClient(API_Token, API_url)
definitions = client.survey(survey_id)
print(definitions.head())
"""
def __init__(self, api_token, api_url):
self.api_token = api_token
self.headers = {
'x-api-token': self.api_token,
'content-type': "application/json",
'cache-control': "no-cache"
}
self.api_url = api_url
self.survey_url = self.api_url + 'surveys/'
self.definition_url = self.api_url + 'survey-definitions/'
self.response_url = self.api_url + 'responseexports/'
self.failed_responses = ["cancelled", "failed"]
def pull_survey_meta(self):
arrQualtricsSurveys = []
arrSurveyName = []
arrSurveyActive = []
arrCreation = []
arrMod = []
def GetQualtricsSurveys(qualtricsSurveysURL):
response = requests.get(url=qualtricsSurveysURL,
headers=self.headers)
jsonResponse = response.json()
nextPage = jsonResponse['result']['nextPage']
arrQualtricsSurveys.extend(
[srv['id'] for srv in jsonResponse['result']['elements']])
arrSurveyName.extend(
[srv['name'] for srv in jsonResponse['result']['elements']])
arrSurveyActive.extend([
srv['isActive'] for srv in jsonResponse['result']['elements']
])
arrCreation.extend([
srv['creationDate']
for srv in jsonResponse['result']['elements']
])
arrMod.extend([
srv['lastModified']
for srv in jsonResponse['result']['elements']
])
#Contains nextPage
if (nextPage is not None):
GetQualtricsSurveys(nextPage)
GetQualtricsSurveys(self.survey_url)
df = pd.DataFrame({
'SurveyID': arrQualtricsSurveys,
'Survey_Name': arrSurveyName,
'IsActive': arrSurveyActive,
'Created': arrCreation,
'LastModified': arrMod
})
return df
def pull_definition(self, survey_id):
response = json.loads(
requests.get(
url=self.definition_url + survey_id,
headers=self.headers).content.decode("utf-8"))['result']
question = pd.json_normalize(response['Questions']).melt()
flow = pd.json_normalize(response['SurveyFlow']['Flow'])
if ("EmbeddedData" in flow.columns or "Flow" in flow.columns):
flow.rename(columns={
'ID': 'BlockID',
'Type': 'FlowType'
},
inplace=True)
if not 'BlockID' in flow.columns:
flow['BlockID'] = ""
flow = flow[[
'EmbeddedData', 'FlowID', 'BlockID', 'Flow', 'FlowType'
]].reset_index()
flow.rename(columns={'index': 'FlowSort'}, inplace=True)
flow_block = flow[(
flow.EmbeddedData.isnull() == True)].EmbeddedData.apply(
pd.Series).merge(
flow, right_index=True,
left_index=True).drop(["EmbeddedData"], axis=1).melt(
id_vars=[
'FlowSort', 'FlowID', 'BlockID', 'FlowType'
],
value_name="EmbeddedData")
embed = flow[(
flow.EmbeddedData.isnull() == False)].EmbeddedData.apply(
pd.Series).merge(
flow, right_index=True,
left_index=True).drop(["EmbeddedData"], axis=1).melt(
id_vars=[
'FlowSort', 'FlowID', 'BlockID', 'FlowType'
],
value_name="EmbeddedData")
embed = embed.EmbeddedData.apply(pd.Series).merge(
embed, right_index=True,
left_index=True).drop(["EmbeddedData"],
axis=1).dropna(subset=['Field', 'Type'])
embed = embed[[
'FlowSort', 'FlowID', 'BlockID', 'FlowType', 'Field', 'Type',
'Value'
]]
embed = embed.sort_values(by=['FlowSort'])
combined = flow_block.merge(
embed,
how='outer',
on=['FlowSort', 'FlowID', 'BlockID',
'FlowType']).sort_values(by=['FlowSort'])
combined.drop(["variable", "EmbeddedData"], axis=1, inplace=True)
combined.drop_duplicates(inplace=True)
else:
flow = flow[['FlowID', 'Type']].reset_index()
flow.columns = ['FlowSort', 'FlowID', 'BlockID', 'FlowType']
flow['Field'] = ''
flow['Type'] = ''
flow['Value'] = ''
combined = flow
blocks = pd.json_normalize(response['Blocks']).melt()
blocks[["BlockID",
"BlockSettings"]] = blocks.variable.str.split('.',
1,
expand=True)
blocks = blocks[~blocks['BlockSettings'].str.contains('Options')
& ~blocks['BlockSettings'].str.contains('SubType')]
blocks = blocks.pivot(index='BlockID',
columns='BlockSettings',
values='value')
blocks = blocks['BlockElements'].apply(pd.Series).merge(
blocks, right_index=True,
left_index=True).drop(['BlockElements'], axis=1).melt(
id_vars=['ID', 'Type', 'Description'],
value_name="BlockElement").dropna()
blocks.rename(columns={'ID': 'BlockID'}, inplace=True)
blocks['ElementType'] = blocks['BlockElement']
blocks['ElementType'] = blocks['ElementType'].apply(
lambda x: x['Type'])
blocks['QID'] = blocks['BlockElement'].apply(
lambda x: x['QuestionID'] if 'QuestionID' in x else "")
blocks = blocks.drop(['BlockElement'], axis=1)
blocks.rename(
columns=(lambda x: 'BlockElementSort' if x == 'variable' else
('Block' + x
if (('Block' in x) == False and x != 'QID') else x)),
inplace=True)
blocks = combined.merge(blocks, on='BlockID', how='right')
extract = question[(
question.variable.str.contains('.Language.') == False)]
extract[["QID", "QPath"]] = extract.variable.str.split('.',
1,
expand=True)
extract[["QPath",
"ChoiceSetting"]] = extract.QPath.str.rsplit('.',
1,
expand=True)
extract['value'] = extract.apply(
lambda x: response['Questions'][x.QID]['Labels']
if (x.QPath.startswith("Labels.") == True) else x['value'],
axis=1)
extract['ChoiceSetting'] = extract.apply(
lambda x: None
if (x.QPath.startswith("Labels.") == True) else x.ChoiceSetting,
axis=1)
extract['QPath'] = extract.apply(
lambda x: "Labels"
if (x.QPath.startswith("Labels.") == True) else x.QPath,
axis=1)
question_pvt = extract[(extract.ChoiceSetting.isnull() == True)]
question_pvt = question_pvt.pivot_table(index=['QID'],
columns=['QPath'],
values='value',
aggfunc='first').reset_index()
question_settings = extract[
(extract.QPath.str.contains("Choices.") == False)
& (extract.QPath.str.contains("Answers.") == False)]
choice_settings = question_settings[(
question_settings.ChoiceSetting.str.replace(
'-', '').str.isnumeric() == True)]
question_settings = question_settings[(
question_settings.ChoiceSetting.str.replace(
'-', '').str.isnumeric() == False)]
question_settings['QPath'] = question_settings.apply(
lambda x: x['QPath'] + "." + x['ChoiceSetting'], axis=1)
question_settings['QPath'] = question_settings.apply(
lambda x: x['QPath'].split('.', 2)[0] + "." + x['QPath'].split(
'.', 2)[2]
if "AdditionalQuestions" in x['QPath'] else x['QPath'],
axis=1)
question_settings = question_settings.drop(
columns=['variable', 'ChoiceSetting'])
question_settings = question_settings.pivot_table(
index=['QID'], columns=['QPath'], values='value',
aggfunc='first').reset_index()
question_pvt = question_pvt.merge(question_settings,
how='left',
on='QID')
if (choice_settings.empty == False):
choice_settings['CQID'] = choice_settings.apply(
lambda x: x['QID'] + '-' + x['ChoiceSetting']
if ((x['ChoiceSetting'] is not None) & (
(x['ChoiceSetting']).isnumeric())) else x['QID'],
axis=1)
choice_settings.drop(columns=['variable', 'QID'])
choice_settings = choice_settings.pivot_table(
index=['CQID'],
columns=['QPath'],
values='value',
aggfunc='first').reset_index()
answers = extract[(extract.QPath.str.contains("Answers.") == True)]
if (answers.empty == False):
answers[["QPath",
"CRecode"]] = answers.QPath.str.split('.', 1, expand=True)
answers['CRecode'] = answers['CRecode'].apply(
lambda x: '#' + x.split('.')[0] + '-' + x.split('.')[2]
if "Answers" in x else x)
answers['AnswerSort'] = 1
answers['AnswerSort'] = answers.groupby(
'QID')['AnswerSort'].cumsum()
answers = answers.drop(columns=['variable', 'ChoiceSetting'])
choices_pvt = extract[(extract.QPath.str.contains("Choices.") == True)]
choices_pvt[["QPath",
"CRecode"]] = choices_pvt.QPath.str.split('.',
1,
expand=True)
choices_pvt["IChoiceSetting"] = choices_pvt["CRecode"].apply(
lambda x: None if x is None else (x.split('.', 1)[1]
if x.count('.') > 0 else ""))
choices_pvt["ChoiceSetting"] = choices_pvt.apply(
lambda x: x['IChoiceSetting'] + "." + x['ChoiceSetting']
if "Image" in str(x['IChoiceSetting']) else x['ChoiceSetting'],
axis=1)
choices_pvt["PGRGrpIdx"] = choices_pvt["CRecode"].apply(
lambda x: None if x is None else x.split('.', 1)[0]
if 'Choices' in x else None)
choices_pvt["PGRChoiceIdx"] = choices_pvt["CRecode"].apply(
lambda x: None if x is None else x.rsplit('.', 1)[1]
if "Choices" in x else None)
choices_pvt["CRecode"] = choices_pvt["CRecode"].apply(
lambda x: None if x is None else (x.split('.', 1)[0]
if x.count('.') > 0 else x))
choices_pvt["CRecode"] = choices_pvt.apply(
lambda x: x["CRecode"] if x["PGRChoiceIdx"] is None else "#" + x[
"CRecode"] + "-" + x["PGRChoiceIdx"],
axis=1)
choices_pvt["CQID"] = choices_pvt.apply(
lambda x: x["QID"]
if x["CRecode"] is None else x["QID"] + x["CRecode"]
if "#" in x["CRecode"] else x["QID"] + "-" + x["CRecode"],
axis=1)
choices_pvt = choices_pvt.pivot_table(index=['CQID', 'QID'],
columns=['ChoiceSetting'],
values='value',
aggfunc='first').reset_index()
if (choice_settings.empty == False):
choices_pvt = choices_pvt.merge(choice_settings,
on='CQID',
how='left')
choices_order = extract[(extract.QPath == "ChoiceOrder")]
choices_order = choices_order.value.apply(pd.Series).merge(
choices_order, right_index=True, left_index=True).drop(
["value", "QPath", "variable", "ChoiceSetting"],
axis=1).melt(id_vars=['QID'], value_name="CRecode").dropna()
choices_order.columns = ['QID', 'ChoiceOrder', 'CRecode']
choices_order['CQID'] = choices_order['QID'] + "-" + choices_order[
'CRecode'].astype(str)
### Combine SVF - Blocks - Questions - Choices - ChoiceOrder
svFlattened = choices_pvt.merge(choices_order, how='left', on='CQID')
svFlattened = svFlattened.drop(columns="QID_y")
svFlattened = svFlattened.rename(columns={'QID_x': 'QID'})
svFlattened = question_pvt.merge(svFlattened, how='outer', on='QID')
svFlattened = blocks.merge(svFlattened, how='left', on='QID')
svFlattened['QuestionText'] = svFlattened['QuestionText_Unsafe'].apply(
lambda x: "" if x == "" else lhtmlclean.Cleaner(
style=True).clean_html(lhtml.fromstring(str(x))).text_content(
).replace("nan", "").strip())
svFlattened['Display'] = svFlattened['Display'].apply(
lambda x: "" if x == "" else lhtmlclean.Cleaner(
style=True).clean_html(lhtml.fromstring(str(x))).text_content(
).replace("nan", "").strip())
svFlattened['CQID'] = svFlattened.apply(
lambda x: x.CQID if "QID" in str(x.CQID) else x.Field
if pd.isnull(x.Field) == False else x.QID
if pd.isnull(x.QID) == False else "",
axis=1)
svFlattened = svFlattened.drop(
columns=['AnswerOrder', 'ChoiceOrder_x'], errors='ignore')
csvfilteredColumns = [
'FlowSort', 'FlowID', 'BlockElementSort', 'BlockDescription',
'QID', 'CQID', 'QuestionText', 'QuestionType', 'Selector',
'SubSelector', 'DataExportTag', 'ChoiceDataExportTags_y',
'Display', 'Image.Display', 'Image.ImageLocation',
'VariableNaming', 'ChoiceOrder_y', 'CRecode'
]
for x in csvfilteredColumns:
if (x not in svFlattened.columns):
svFlattened[x] = ''
svFlattenedFiltered = svFlattened[csvfilteredColumns].drop_duplicates(
subset='CQID', ignore_index=True)
# only return filtered, do we need to return the result unfiltered?
return svFlattenedFiltered
def pull_results(self, survey_id):
def pull_file(label, survey_id):
file_type = lambda x: "With Labels" if label == True else "Without Labels"
parameters = "{\"format\": \"csv\", \"useLabels\": "\
+ (str(label)).lower() + ", \"surveyId\": \""\
+ survey_id + "\"" + ", \"endDate\":\"" \
+ str(datetime.datetime.utcnow().isoformat()[0:19]) + "Z\"}"
response = requests.post(url=self.response_url,
headers=self.headers,
data=parameters)
responseFileID = response.json()["result"]["id"]
if (responseFileID is not None):
response = requests.get(url=self.response_url +
responseFileID,
headers=self.headers)
responseFileStatus = response.json()["result"]["status"]
while (responseFileStatus == "in progress"):
time.sleep(5)
response = requests.get(url=self.response_url +
responseFileID,
headers=self.headers)
responseFileStatus = response.json()["result"]["status"]
completion_rate = response.json(
)['result']['percentComplete']
print(
f"File Request ({file_type(label)}) - {completion_rate}%"
)
if (responseFileStatus in self.failed_responses):
print("Error Network Issue / Failed Request : " + survey_id)
responseFileDownload = response.json()["result"]["file"]
response = requests.get(url=responseFileDownload,
headers=self.headers)
else:
print('No Response file ID, please check the survey ID')
with zipfile.ZipFile(io.BytesIO(response.content),
mode='r') as file:
download = file.read(list(file.NameToInfo.keys())[0]).decode()
df = pd.read_csv(io.StringIO(download), low_memory=False)
return df
wlExport = pull_file(True, survey_id)
nlExport = pull_file(False, survey_id)
mdQID = pd.melt(wlExport.iloc[[1]])
mdQID.columns = ["QRecode", "QID"]
mdQID["QID"] = mdQID["QID"].apply(
lambda x: json.loads(x.replace("'", "\""))["ImportId"])
wlExport = wlExport.iloc[2:]
nlExport = nlExport.iloc[2:]
print("Exports are finished - Working on combining them...")
wlExport = wlExport.rename(
columns=lambda x: "ResponseID" if x == "ResponseId" else x)
mdTxtResp = pd.melt(wlExport, id_vars=["ResponseID"])
mdTxtResp.columns = ["ResponseID", "QRecode", "TxtRespAnswer"]
#Join Back ResponseID Values
mdRespIDs = | pd.melt(wlExport, value_vars=["ResponseID"]) | pandas.melt |
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel
from transformers import AdamW, get_cosine_with_hard_restarts_schedule_with_warmup
import gc
from tqdm import tqdm
class MyDataset(Dataset):
def __init__(self, df):
super().__init__()
self.data_list = []
self.end_of_text_token = " <|endoftext|> "
for index, row in df.iterrows():
data_str = f"{row[0]}{self.end_of_text_token}"
self.data_list.append(data_str)
def __len__(self):
return len(self.data_list)
def __getitem__(self, item):
return self.data_list[item]
def train(epochs, data_loader, batch_size, tokenizer, model, device):
batch_counter = 0
sum_loss = 0.0
for epoch in range(epochs):
print (f'Running {epoch+1} epoch')
for idx, txt in enumerate(data_loader):
txt = torch.tensor(tokenizer.encode(txt[0]))
txt = txt.unsqueeze(0).to(device)
outputs = model(txt, labels=txt)
loss, _ = outputs[:2]
loss.backward()
sum_loss += loss.data
if idx%batch_size==0:
batch_counter += 1
optimizer.step()
scheduler.step()
optimizer.zero_grad()
model.zero_grad()
if batch_counter == 10:
print(f"Total Loss is {sum_loss}")
batch_counter = 0
sum_loss = 0.0
return model
def save_model(model, name):
torch.save(model.state_dict(), f"{name}.pt")
def choose_from_top_k_top_n(probs, k=50, p=0.8):
ind = np.argpartition(probs, -k)[-k:]
top_prob = probs[ind]
top_prob = {i: top_prob[idx] for idx,i in enumerate(ind)}
sorted_top_prob = {k: v for k, v in sorted(top_prob.items(), key=lambda item: item[1], reverse=True)}
t=0
f=[]
pr = []
for k,v in sorted_top_prob.items():
t+=v
f.append(k)
pr.append(v)
if t>=p:
break
top_prob = pr / np.sum(pr)
token_id = np.random.choice(f, 1, p = top_prob)
return int(token_id)
def generate(tokenizer, model, sentences, label):
result = []
with torch.no_grad():
for idx in tqdm(range(sentences)):
finished = False
cur_ids = torch.tensor(tokenizer.encode(label)).unsqueeze(0).to(device)
for i in range(100):
outputs = model(cur_ids, labels=cur_ids)
loss, logits = outputs[:2]
softmax_logits = torch.softmax(logits[0,-1], dim=0)
if i < 5:
n = 10
else:
n = 5
next_token_id = choose_from_top_k_top_n(softmax_logits.to(device).cpu().numpy())
cur_ids = torch.cat([cur_ids, torch.ones((1,1)).long().to(device) * next_token_id], dim = 1)
if next_token_id in tokenizer.encode('<|endoftext|>'):
finished = True
break
if finished:
output_list = list(cur_ids.squeeze().to(device).cpu().numpy())
output_text = tokenizer.decode(output_list)
result.append(output_text)
else:
output_list = list(cur_ids.squeeze().to(device).cpu().numpy())
output_text = tokenizer.decode(output_list)
result.append(output_text)
return result
if __name__ == '__main__':
#dataset address
dataset_path = '../Data/Train_Dataset.csv'
df = | pd.read_csv(dataset_path) | pandas.read_csv |
import xgboost as xgb
import graphviz
import numpy as np
import pandas as pd
import random
import matplotlib
import textwrap
import scipy.spatial.distance as ssd
from scipy.stats import ks_2samp
from scipy.stats import entropy
import warnings
from sklearn import tree
from sklearn.manifold import TSNE
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import export_graphviz
from sklearn.cluster import KMeans
from scipy.cluster.hierarchy import dendrogram, linkage
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, label_binarize
from sklearn.tree import export_graphviz
from sklearn.metrics import precision_score, precision_recall_curve, average_precision_score
from matplotlib import pyplot as plt
import re
import math
from os import listdir
from bokeh.layouts import gridplot
from bokeh.models import LinearAxis,FactorRange,Range1d,LabelSet,Label,ColumnDataSource,HoverTool,WheelZoomTool,PanTool,BoxZoomTool,ResetTool,SaveTool,BasicTicker,ColorBar,LinearColorMapper,PrintfTickFormatter,DataSource
from bokeh.palettes import brewer,inferno,magma,viridis,grey,Category10
from bokeh.plotting import figure, show, output_file
from bokeh.transform import transform,factor_cmap
from bokeh.io import export_png
from graphviz import Source
from itertools import cycle
from sklearn.decomposition import PCA
import rpy2.robjects.packages as rpackages
import rpy2.robjects as robjects
import statsmodels.api as sm
import re
from PIL import Image
Image.warnings.simplefilter('ignore', Image.DecompressionBombWarning)
warnings.filterwarnings('ignore', category=PendingDeprecationWarning)
warnings.filterwarnings('ignore', category=ResourceWarning)
warnings.filterwarnings('ignore', category=FutureWarning)
rstats = rpackages.importr('stats')
warnings.simplefilter("error")
#####print table#####
def ppt(table,lines=30,maxWidth = 18,minWidth=10,keepDecimal=2,lineWidth=170):
if table.shape[0]<1000:
lines=table.shape[0]
if(minWidth<5):
minWidth=5
if(maxWidth<minWidth):
print("minWidth cannot large than maxWidth")
if(lines == -1):
lines = table.shape[0]
dicType = generateTypeDic(table)
dictWidth = generateWidthDic(table,lines,maxWidth,keepDecimal,minWidth)
strTemp='|'
for col in table:
if(dictWidth[col]<minWidth):
strTemp = strTemp + truncateString(str(col).upper(),minWidth)
else:
strTemp = strTemp + truncateString(str(col).upper(),dictWidth[col])
string = textwrap.fill(text=strTemp,width=lineWidth)
print(string)
count = 0
for index in range(0,table.shape[0]):
if(count<lines):
strTemp='|'
for col in table:
if(dicType[col] is 'str'):
strTemp = strTemp + truncateString(str(table[col].iloc[index]),dictWidth[col])
else:
strTemp = strTemp + truncateNumber(decimalIdentical(table[col].iloc[index],keepDecimal),dictWidth[col])
string = textwrap.fill(text=strTemp,width=lineWidth)
print(string)
else:
break
count +=1
def generateTypeDic(table):
dicType={}
for col in table:
if(type(table[col].iloc[0]) is np.float64):#if type is np.float64 and the element is NaN, the type is defined as np.float64
dicType[col]='float'
elif(type(table[col].iloc[0]) is np.int32):
dicType[col]='int'
else:
dicType[col]='str' #if type is str and the element is NaN, the type is defined as float, so that it will be asigned value as str.
return dicType
def generateWidthDic(table,lines,maxWidth,keepDecimal=2,minWidth=5):
dicWidth={}
for col in table:
width = minWidth
colType = generateTypeDic(table)[col]
if(colType is 'str'):
for index in range(0,lines):
lenth = len(str(table[col].iloc[index]))
if(lenth > width):
width = lenth
if(width > maxWidth):
width = maxWidth
elif(colType is 'float' or colType is 'int'):
for index in range(0,lines):
lenth = len(decimalIdentical(table[col].iloc[index],keepDecimal))
if(lenth > width):
width = lenth
if(width > max(9,minWidth)):
width = max(9,minWidth)
dicWidth[col] = width
return dicWidth
def decimalIdentical(flt,kd):
if(np.isnan(flt)):
return str(flt)
flStr = str(round(flt,kd))
try:
dotIndex = len(flStr)-flStr.index('.')-1
except ValueError as ve:
flStr = flStr+'.'
dotIndex = 0
for i in range(dotIndex,kd):
flStr = flStr + '0'
return flStr
def truncateNumber(string, length):
strTemp=''
if(len(string)<9):
strTemp=string
for _ in range(len(string),length+2):
strTemp=" "+strTemp
strTemp=strTemp
else:
scientificNotation='%.2E' % Decimal(string)
strTemp=str(scientificNotation)
for _ in range(len(strTemp),length+2):
strTemp=" "+strTemp
if(len(strTemp)!=length+2):
print('truncateNumber is wrong!'+str(strTemp)+' '+str(length))
else:
return strTemp+'| '
def truncateString(string, length):
strTemp=''
if(len(string)<=length):
strTemp=string
for _ in range(len(string),length+2):
strTemp=" "+strTemp
strTemp=strTemp
else:
strTemp=string[0:length]
strTemp=strTemp+".."
if(len(strTemp)!=length+2):
print('truncateString is wrong!'+str(strTemp))
else:
return strTemp+'| '
def generate_reordered_features(megadata_validation,numeric_features_validation,basic_info_features,simple_scale):
clustering_data_validation = megadata_validation.copy()
clustering_data_validation = scale_matrix(clustering_data_validation[numeric_features_validation],isrow=False,simple_scale=simple_scale)
corr_validation_DF = pd.DataFrame(clustering_data_validation,columns=numeric_features_validation).corr('spearman')
distance_validation_Matrix = corr_validation_DF.values
for i in range(0,len(distance_validation_Matrix)):
distance_validation_Matrix[i]=1-distance_validation_Matrix[i]
distance_validation_Matrix = ssd.squareform(distance_validation_Matrix)
linked = linkage(distance_validation_Matrix,'ward','euclidean',True)
labelList = corr_validation_DF.index
featureDict= {i:[corr_validation_DF.index[i]] for i in range(0,len(corr_validation_DF.index))}
for i in range(0,len(linked)):
index = i+linked.shape[0]+1
firstList = featureDict[linked[i][0]]
for j in featureDict[linked[i][1]]:
firstList.append(j)
if(len(firstList)!=linked[i][3]):
print("the length is not equal")
featureDict[index]=firstList
featureList=featureDict[linked.shape[0]*2]
for i in range(len(basic_info_features)):
featureList.append(basic_info_features[i])
return featureList
def prepare_scaled_data(layer1_df,layer2_df,layer3_df,reordered_feature_list,simple_scale=False):
layer1_mx = layer1_df.copy()
layer1_mx = layer1_mx.replace(0,1E-7)
layer1_mx = layer1_mx.fillna(1E-8)[reordered_feature_list]
layer2_mx = layer2_df.copy()
layer2_mx = layer2_mx.replace(0,1E-7)
layer2_mx = layer2_mx.fillna(1E-8)[reordered_feature_list]
layer3_mx = layer3_df.copy()
layer3_mx = layer3_mx.replace(0,1E-7)
layer3_mx = layer3_mx.fillna(1E-8)[reordered_feature_list]
layer3_mx = layer3_mx.to_numpy()
layer2_mx = layer2_mx.to_numpy()
layer1_mx = layer1_mx.to_numpy()
layer1_mx = scale_matrix(layer1_mx,isrow=False,simple_scale=simple_scale)
layer2_mx = scale_matrix(layer2_mx,isrow=False,simple_scale=simple_scale)
layer3_mx = scale_matrix(layer3_mx,isrow=False,simple_scale=simple_scale)
return layer1_mx,layer2_mx,layer3_mx
def insert_values_between_original_data(for_image_data_matrix):
new_matrix = []
new_matrix.append(for_image_data_matrix[0])
for i in range(1,len(for_image_data_matrix)):
new_matrix.append((for_image_data_matrix[i-1]+for_image_data_matrix[i])/2)
new_matrix.append(for_image_data_matrix[i])
return np.array(new_matrix)
def plot_colorful_CNN_images(layer1_mx,layer2_mx,layer3_mx,cate,path,interpolation_row=0,interpolation_col=0):
for i in list(set(cate)):
ll=[j for j in range(len(cate)) if cate[j] == i]
for_image_data_matrix1 = layer1_mx[ll]
for_image_data_matrix2 = layer2_mx[ll]
for_image_data_matrix3 = layer3_mx[ll]
category = i
for i in range(interpolation_row):
for_image_data_matrix1 = insert_values_between_original_data(for_image_data_matrix1)
for_image_data_matrix2 = insert_values_between_original_data(for_image_data_matrix2)
for_image_data_matrix3 = insert_values_between_original_data(for_image_data_matrix3)
for i in range(interpolation_col):
for_image_data_matrix1 = insert_values_between_original_data(for_image_data_matrix1.T).T
for_image_data_matrix2 = insert_values_between_original_data(for_image_data_matrix2.T).T
for_image_data_matrix3 = insert_values_between_original_data(for_image_data_matrix3.T).T
colorful = [[[for_image_data_matrix1[j][l],for_image_data_matrix2[j][l],for_image_data_matrix3[j][l]] for l in range(0,len(for_image_data_matrix1[j]))] for j in range(0,len(for_image_data_matrix1))]
matplotlib.image.imsave(path+'/combined_'+category+'.png',colorful)
matplotlib.image.imsave(path+'/layer3_'+category+'.png',for_image_data_matrix3)
matplotlib.image.imsave(path+'/layer2_'+category+'.png',for_image_data_matrix2)
matplotlib.image.imsave(path+'/layer1_'+category+'.png',for_image_data_matrix1)
def plot_colorful_images_wrapper(megadata_temp1,megadata_temp2,megadata_temp3,numeric_cols,image_col,interpolation_row,interpolation_col,path,generate_reordered_indices,simple_scale=True):
reordered_features = generate_reordered_features(megadata_temp1,numeric_cols,[],simple_scale)
reordered_indices = generate_reordered_indices(megadata_temp1,reordered_features)
reordered_df1 = pd.DataFrame(megadata_temp1, index=reordered_indices)[reordered_features+[image_col]]
reordered_df2 = pd.DataFrame(megadata_temp2, index=reordered_indices)[reordered_features+[image_col]]
reordered_df3 = pd.DataFrame(megadata_temp3, index=reordered_indices)[reordered_features+[image_col]]
mx1,mx2,mx3 = prepare_scaled_data(reordered_df1,reordered_df2,reordered_df3,reordered_features,simple_scale)
plot_colorful_CNN_images(mx1,mx2,mx3,reordered_df1[image_col].values.tolist(),path,interpolation_row,interpolation_col)
#####Select certain rows from dataFrame based on the combined conditions related to index1 and index2#####
def combined_conditions_filter(condition_map,data_frame,index1,index2):
dataFrame=data_frame.copy()
dataFrame[index1] = dataFrame[index1].astype(str)
dataFrame[index2] = dataFrame[index2].astype(str)
dataFrame['filter'] = dataFrame[index1] + '***' + dataFrame[index2]
lst = list(str(key)+'***'+str(value) for key,value in condition_map.items())
subComLevData = dataFrame[dataFrame['filter'].isin(lst)]
del subComLevData['filter']
return subComLevData
#Unit vectors transformation of a Matrix.
def generate_unit_modules(mx, isrow=True, is_scale=True, simple_scale=True):
test = mx.copy()
if(mx.min()<0):
is_scale=True
print('there is a negative value, the matrix is auto scaled')
if(is_scale):
test=scale_matrix(test,isrow=isrow,simple_scale=simple_scale)
if(isrow):
for i in range(0,len(test)):
if(test[i].sum()!=0):
test[i] = test[i]/test[i].sum()
else:
test_t = np.transpose(test)
for i in range(0,len(test_t)):
if(test_t[i].sum()!=0):
test_t[i] = test_t[i]/test_t[i].sum()
test = np.transpose(test_t)
return test
#return the top or bottom n indices of a list
def topOrBottomN(lst,n,isabs=False,isbottom=False):
if (isabs):
sortList = []
for i in range(0,len(lst)):
sortList.append(abs(lst[i]))
else:
sortList = lst
sortDF = pd.DataFrame({'sort':sortList})
sortDF['index'] = sortDF.index
sortDF = sortDF.sort_values(by='sort', ascending=isbottom)
indexList = sortDF['index'].tolist()
return indexList[0:n]
#scale matrix based on row or col by simple method or median_transform
def scale_matrix(tst,isrow=True,simple_scale=True):
test=np.copy(tst)
if(simple_scale):
if(isrow):
for i in range(0,len(test)):
if(test[i].max()==test[i].min()):
if(test[i].max()==0):
test[i]=np.zeros(len(test[i]))
else:
test[i]=np.ones(len(test[i]))
else:
test[i] = (test[i]-test[i].min())/(test[i].max() - test[i].min())
else:
test_t = np.transpose(test)
for i in range(0,len(test_t)):
if(test[i].max()==test[i].min()):
if(test[i].max()==0):
test[i]=np.zeros(len(test[i]))
else:
test[i]=np.ones(len(test[i]))
else:
test_t[i] = (test_t[i]-test_t[i].min())/(test_t[i].max() - test_t[i].min())
test = np.transpose(test_t)
else:
if(isrow):
for i in range(0,len(test)):
test[i] = median_transform(test[i],1,0)
else:
test_t = np.transpose(test)
for i in range(0,len(test_t)):
test_t[i] = median_transform(test[:,i],1,0)
test = np.transpose(test_t)
return test
#sort data frame
def rrd(DF,sort_column='ID',asc=True,reidx=True):
new_DF=DF.copy()
new_DF=DF.sort_values(by=sort_column, ascending=True)
if(reidx):
new_DF=new_DF.reset_index(drop=True)
return new_DF
# dataframe to matrix
def dtm(data_frame,matrix_features,sort_column='ID',asc=True):
data_frame_copy = data_frame.copy()
data_frame_copy = data_frame_copy.sort_values(by=sort_column, ascending=asc)
mtx = data_frame_copy[matrix_features].values
return mtx
# matrix to dataframe
def mtd(mtx,numeric_features, data_frame=pd.DataFrame(),basic_info_feautes=[], sort_column='ID',asc=True):
DF = pd.DataFrame(mtx,columns=numeric_features)
if((data_frame.size>0)&(len(basic_info_feautes)>0)):
DF[basic_info_feautes] = rrd(data_frame,sort_column,asc).reset_index(drop=True)[basic_info_feautes]
return rrd(DF,sort_column)
def scale_transform1(lst1,lst2,scale,lowerbound):
if(len(lst1)!=len(lst2)):
print('lst1 and lst2 are not in the same size')
return None
if(lst2.min()==lst2.max()):
if(lst2.min()==0):
return [0 for _ in range(len(lst))]
else:
return [1 for _ in range(len(lst))]
return ((lst1-lst2.min())/(lst2.max()-lst2.min()))*scale+lowerbound
def scale_transform2(lst1,lst2,scale,lowerbound):
if(len(lst1)!=len(lst2)):
print('lst1 and lst2 are not in the same size')
return None
if(min(lst1.min(),lst2.min())<0):
print('warning: there is a negative value')
return None
if(lst2.max()==0):
print('error: the max value is equal to zero')
return None
return (lst1/lst2.max())*scale+lowerbound
#scale list
def median_transform(lst,scale,lowerbound):
if(len(set(lst))<2):
return np.full(len(lst), (scale+lowerbound)/2)
if(lst.min()==lst.max()):
if(lst.min()==0):
return [0 for _ in range(len(lst))]
else:
return [1 for _ in range(len(lst))]
else:
scaled_list=scale_transform1(lst,lst,scale,lowerbound)
scaled_list = scaled_list/np.median(scaled_list)
lower_list=np.array([i for i in scaled_list if i<=1]).copy()
upper_list=np.array([i for i in scaled_list if i>1]).copy()
for i in range(len(scaled_list)):
if(scaled_list[i]<=1):
if(np.ptp(lower_list)==0):
scaled_list[i]=0
else:
scaled_list[i]=scale_transform1(scaled_list[i],lower_list,0.5*(scale+lowerbound),lowerbound)
else:
scaled_list[i]=scale_transform2(scaled_list[i],upper_list,0.5*(scale+lowerbound),0.5*(scale+lowerbound))
return scaled_list
def unify_df(rna_df,numeric_features,id_col,other_categorical_features):
rna_scaled_df=rna_df.copy()
rna_scaled_df['temp_indices_within_this_function']=rna_scaled_df[id_col]
temp_mx = dtm(rna_scaled_df,numeric_features,sort_column='temp_indices_within_this_function')
temp_mx = generate_unit_modules(temp_mx, isrow=True, is_scale=True, simple_scale=True)
rna_scaled_df = mtd(temp_mx,numeric_features,rna_scaled_df,other_categorical_features+['temp_indices_within_this_function'], sort_column='temp_indices_within_this_function',asc=True)
rna_scaled_df.index=rna_scaled_df['temp_indices_within_this_function']
rna_scaled_df[id_col]=rna_scaled_df['temp_indices_within_this_function']
del rna_scaled_df['temp_indices_within_this_function']
temp_df=rna_df.copy()
temp_df.index=temp_df[id_col]
#control the scale of the mean by dividing by len(numeric_features)
temp_df = pd.DataFrame(scale_transform1(temp_df[numeric_features].T.describe().T['mean'],temp_df[numeric_features].T.describe().T['mean'],1,0)/len(numeric_features))
temp_df.columns=['mean_from_unify_df']
rna_scaled_df=pd.merge(rna_scaled_df,temp_df,left_index=True,right_index=True)
rna_scaled_df=rna_scaled_df.reset_index(drop=True)
new_numeric_features=numeric_features+['mean_from_unify_df']
return rna_scaled_df,new_numeric_features
#####find distict items in two lists#####
def findDistinct(ind1,ind2):
return (list(np.setdiff1d(ind1, ind2)),list(np.setdiff1d(ind2, ind1)))
def handle_unbalanced_dataset(df,numeric_features,label,id_column):
max_count = df.groupby([label], as_index=False).count()[[label,id_column]].sort_values(by=id_column, ascending=False).iloc(0)[0][1]
iter_list = df.groupby([label], as_index=False).count()[[label,id_column]].sort_values(by=id_column, ascending=False).values
add_sample_size_dict={i[0]:max_count-i[1] for i in iter_list}
new_DF=df.copy()
num=0
for k,v in add_sample_size_dict.items():
sample_size = df[df[label]==k].shape[0]
sample_matrix = df[df[label]==k][numeric_features].values
new_matrix=[]
for i in range(v):
two_samples_list = random.sample(range(sample_size),2)
new_sample=(sample_matrix[two_samples_list[0]]+sample_matrix[two_samples_list[1]])/2
new_matrix.append(new_sample)
new_matrix = np.array(new_matrix)
if(len(new_matrix)==0):
continue
temp_DF=pd.DataFrame(new_matrix,columns=numeric_features)
temp_DF[id_column]=np.array(['fakeid'+str(j) for j in range(num,num+temp_DF.shape[0])])
temp_DF[label]=k
num=num+temp_DF.shape[0]
new_DF = new_DF.append(temp_DF, sort=False)
new_DF.index = new_DF[id_column]
return new_DF
def cross_validation_split_with_unbalance_data(df,numeric_features,label='Category',id_column='PlateID',test_size=0.2,handle_unbalance=True):
iter_list = df.groupby([label], as_index=False).count()[[label,id_column]].sort_values(by=id_column, ascending=False).values
select_size_dict={i[0]:int(test_size*i[1]) for i in iter_list}
sample_size_dict={i[0]:i[1] for i in iter_list}
columns_list=df.columns
train_matrix=[]
test_matrix=[]
train_index=[]
test_index=[]
for k,v in select_size_dict.items():
sample_matrix = df[df[label]==k].values
selected_list = random.sample(range(sample_size_dict[k]),v)
unselected_list = findDistinct(selected_list,list(range(sample_size_dict[k])))[1]
for idx in selected_list:
test_matrix.append(sample_matrix[idx])
test_index.append(df[df[label]==k].iloc[idx][id_column])
for idx in unselected_list:
train_matrix.append(sample_matrix[idx])
train_index.append(df[df[label]==k].iloc[idx][id_column])
train_DF=pd.DataFrame(train_matrix,columns=columns_list)
test_DF=pd.DataFrame(test_matrix,columns=columns_list)
train_DF.index=np.array(train_index)
test_DF.index=np.array(test_index)
if(handle_unbalance):
train_DF=handle_unbalanced_dataset(train_DF,numeric_features,label,id_column)
return train_DF[numeric_features],test_DF[numeric_features],train_DF[label],test_DF[label]
def DT_RF_models(dataSet,numeric_features,path,isDT = True,iteration=10,testSize =0.2,readList = ['Compound Name'], label = 'Category',DTdenotion='test',DT_maxdepth=2,numberOfTrees = 50,RF_maxdepth=6,isplot=False,id_column='id',handle_unbalance=True):
if(isDT):
model=tree.DecisionTreeClassifier(max_depth=DT_maxdepth)
else:
model=RandomForestClassifier(n_estimators=numberOfTrees,max_depth=RF_maxdepth)
readableDF = dataSet.copy()
X = readableDF[numeric_features]
Y = readableDF[label]
readableDF[id_column]=readableDF[id_column].astype(str)
accuracy = []
fullWrongList=[]
fullTest=np.array([])
fullPredict=[]
for j in range(0,iteration):
X_train, X_test, Y_train, Y_test = cross_validation_split_with_unbalance_data(readableDF,numeric_features,label=label,id_column=id_column,test_size=testSize,handle_unbalance=handle_unbalance)
model = model.fit(X_train,Y_train)
pre_Y = model.predict(X_test)
pre_Y_pro= model.predict_proba(X_test)
Y_test = pd.DataFrame(Y_test)
Y_test[id_column]=X_test.index
# Y_test['index']=np.array([i for i in range(0,Y_test.shape[0])])
# Only for RF
if(not isDT):
for i in range(0,numberOfTrees):
single_tree = model.estimators_[i]
export_graphviz(single_tree,out_file=path+str(j)+'---tree---'+str(i)+'.dot', feature_names = X.columns,rounded = True, precision = 1)
if(isplot):
(graph, ) = pydot.graph_from_dot_file(path+str(j)+'---tree---'+str(i)+'.dot')
graph.write_png(path+str(j)+'---tree---'+str(i)+'.png')
count=0
for i in range(0,len(pre_Y)):
fullTest=np.append(fullTest,Y_test.iloc[i][label])
fullPredict.append(pre_Y_pro[i])
if(pre_Y[i] != Y_test.iloc[i][label]):
count = count+1
string=''
for l in range(0,len(readList)):
string = string + str(readableDF[readableDF[id_column]==Y_test.iloc[i][id_column]][readList[l]].values[0])+'---'
best_preds = np.argmax(pre_Y_pro[i])
singleWrongList = [pre_Y[i],string+Y_test.iloc[i][label],best_preds,pre_Y_pro[i],str(j)]
fullWrongList.append(singleWrongList)
print('------------------accuracy = '+str(1-count/len(pre_Y))+'------------------')
accuracy.append(1-count/len(pre_Y))
#Only for DT, plot DT
if(isDT & isplot):
newData=handle_unbalanced_dataset(dataSet,numeric_features,label=label,id_column=id_column)
model = model.fit(newData[numeric_features],newData[label])
dot_data = tree.export_graphviz(model,out_file=None,feature_names=X.columns,class_names=dataSet.groupby([label],as_index=False).count()[label].tolist(),filled=True,rounded=True,special_characters=True)
graph = graphviz.Source(dot_data)
graph.render(DTdenotion,view=True)
print(np.array(accuracy).mean(),np.array(accuracy).std())
labelList = list(set(fullTest))
labelList.sort()
labelMap= {labelList[i]:i for i in range(len(labelList))}
newfullTest=[labelMap[fullTest[i]] for i in range(len(fullTest))]
return accuracy,fullWrongList,newfullTest,np.array(fullPredict),labelList
def print_full_wrong_list(full_wrong_list):
s = set()
for i in full_wrong_list:
strings = 'Pre-Label: '+i[0]+' Details: '+i[1]+' Probabilities: '+str(i[3])
s.add(strings)
for i in s:
print(i)
def generate_features_values_dict(file):
f=open(file)
text = f.readline()
edges_dict={}
values_dict={}
features_dict={}
while(text):
regex = re.match(r"(\d+)\ ->\ (\d+)", text)
if regex:
if regex.groups()[0] in edges_dict:
edges_dict[regex.groups()[0]].append(regex.groups()[1])
else:
edges_dict[regex.groups()[0]] = [regex.groups()[1]]
regex2 = re.match(r"(\d+)\ \[label=\".+\[(.+)\]\"\]", text)
if regex2:
values_dict[regex2.groups()[0]] = regex2.groups()[1].split(', ')
regex3 = re.match(r"(\d+)\ \[label=\"(?!gini)(.+)\ <=*", text)
if regex3:
features_dict[regex3.groups()[1]]=regex3.groups()[0]
# print(text)
text = f.readline()
features_values_dict={key:[ values_dict[edges_dict[value][0]],values_dict[edges_dict[value][1]] ] for (key,value) in features_dict.items() }
f.close()
return features_values_dict
def generate_RF_feature_importance(path,df,numeric_features,label):
dfc=df.copy()
categories=len(dfc[label].unique())
regex = re.compile(r"\ +", re.IGNORECASE)
files = [path+f for f in listdir(path) if f.endswith('.dot') if not f.startswith('.')]
all_features_dict = {feature:list(np.zeros(categories+1)) for feature in numeric_features}
for file in files:
features_values_dict = generate_features_values_dict(file)
for (key,value) in features_values_dict.items():
key = regex.sub(' ',key.strip(" "))
tempList=[]
count=0
for i in range(0,len(all_features_dict[key])-1):
tempList.append(all_features_dict[key][i]+int(value[1][i])-int(value[0][i]))
count=count+int(value[1][i])+int(value[0][i])
tempList.append(count+all_features_dict[key][len(all_features_dict[key])-1])
all_features_dict[key]=tempList
matrix = []
for (key,value) in all_features_dict.items():
abscount=0
list_temp=[key]
for i in range(0,len(value)-1):
abscount=abscount+abs(value[i])
for i in range(0,len(value)-1):
if(abscount>0):
list_temp.append(value[i]/abscount)
else:
list_temp.append(0)
list_temp.append(abscount)
matrix.append(list_temp)
DF = pd.DataFrame(matrix)
DF.columns = ['Features']+dfc.groupby([label],as_index=False).count()[label].tolist()+['Sample Size']
DF.fillna(0)
return DF
def transform_feature_importance(fullFeatureImportanceDF,label_list):
feature_importance_DF = fullFeatureImportanceDF.copy()
for i in label_list:
feature_importance_DF[i] = round(feature_importance_DF[i],3)
feature_importance_DF['abs_'+i] = abs(feature_importance_DF[i])
feature_importance_DF['max_value'] = feature_importance_DF[['abs_'+i for i in label_list]].T.max()
feature_importance_DF['median_value'] = feature_importance_DF[['abs_'+i for i in label_list]].T.median()
feature_importance_DF['sampleSize_value']=pow(feature_importance_DF['Sample Size'],0.25)
feature_importance_DF['Ability']=feature_importance_DF['max_value']*feature_importance_DF['median_value']*feature_importance_DF['sampleSize_value']*10+5
feature_importance_DF = feature_importance_DF.sort_values(by='Ability', ascending=False)
return feature_importance_DF[['Features']+label_list+['Sample Size','Ability']]
#####plot histogram based on a list of values#####
def plot_histogram(title, measured,outputFilePath, bins_number = 1000):
output_file(outputFilePath)
hist, edges = np.histogram(measured, density=True, bins=bins_number)
p = figure(title=title, plot_width = 750, plot_height = 750,tools='', background_fill_color="#fafafa")
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color="navy", line_color="white", alpha=0.5)
p.y_range.start = 0
p.legend.location = "center_right"
p.legend.background_fill_color = "#fefefe"
p.xaxis.axis_label = 'x'
p.yaxis.axis_label = 'Pr(x)'
p.grid.grid_line_color="white"
p.x_range = Range1d(0,1.01)
show(p)
return p
def plotBWScatter(DataFrame ,xvalue,yvalue, sizevalue, outputFilePath, readList,plotWidth = 1200, plotHeight = 900, titleName='features importance'):
hover = HoverTool()
tooltipString = ""
for ele in readList:
readTuple = (ele.lower(),ele)
tooltipString = tooltipString + """<br><font face="Arial" size="4">%s: @%s<font>""" % readTuple
hover.tooltips = tooltipString
tools= [hover,WheelZoomTool(),PanTool(),BoxZoomTool(),ResetTool(),SaveTool()]
source= ColumnDataSource(DataFrame)
p = figure(plot_width = plotWidth, plot_height = plotHeight, tools=tools,title=titleName,toolbar_location='right',x_axis_label=xvalue.lower(),y_axis_label=yvalue.lower(),background_fill_color='white',title_location = 'above')
p.title.text_font_size='15pt'
p.title.align = 'center'
p.xaxis.axis_label_text_font_size='12pt'
p.yaxis.axis_label_text_font_size='12pt'
p.x_range = Range1d(DataFrame[xvalue].min()*1.1,DataFrame[xvalue].max()*1.1)
p.y_range = Range1d(DataFrame[yvalue].min()*1.1,DataFrame[yvalue].max()*1.1)
p.circle(x = xvalue,y = yvalue,size=sizevalue,source=source,color='grey')
p.toolbar.active_scroll=p.select_one(WheelZoomTool)#set default active to scroll tool
if outputFilePath.endswith('png'):
export_png(p, filename=outputFilePath)
else:
output_file(outputFilePath)
show(p)
#k-means clustering method
def k_means_DF(data_frame,numeric_features,clusters=8,is_row=True):
clustering_data_validation = data_frame[numeric_features].copy()
if(is_row):
corr_validation_DF = clustering_data_validation.T.corr()
else:
corr_validation_DF = clustering_data_validation.corr()
kmeans = KMeans(n_clusters=clusters,random_state=100).fit(corr_validation_DF)
clusterDic = {corr_validation_DF.columns[i]:kmeans.labels_[i] for i in range(0,len(kmeans.labels_))}
npArray = np.array([[key,value] for (key,value) in clusterDic.items() ])
DF = pd.DataFrame(npArray)
DF.columns = ['element','group']
return DF
def plotHeatMap(corrDF , featureList,path_file):
output_file(path_file)
corrDF.columns.name = 'Features'
df = pd.DataFrame(corrDF[featureList].stack(), columns=['Distance']).reset_index()
df.columns=['level_0','Features','Distance']
source = ColumnDataSource(df)
colors = ["#75968f", "#a5bab7", "#c9d9d3", "#e2e2e2", "#dfccce", "#ddb7b1", "#cc7878", "#933b41", "#550b1d"]
mapper = LinearColorMapper(palette=colors, low=df.Distance.min(), high=df.Distance.max())
p = figure(plot_width=3500, plot_height=3500, title="HeatMap",
x_range=featureList, y_range=featureList,
toolbar_location=None, tools="", x_axis_location="above")
p.rect(x="Features", y="level_0", width=1, height=1, source=source,line_color=None, fill_color=transform('Distance', mapper))
color_bar = ColorBar(color_mapper=mapper, location=(0, 0),
ticker=BasicTicker(desired_num_ticks=len(colors)),
formatter=PrintfTickFormatter(format="%d%%"))
p.add_layout(color_bar, 'right')
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_text_font_size = "30pt"
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = 1.0
show(p)
def heatMap(DF , path_file, x_size=3500, y_size=3500,font_size="15pt"):
featureList=DF.columns.tolist()
indexList=DF.index.tolist()
DF.columns.name = 'Features'
df = pd.DataFrame(DF[featureList].stack(), columns=['Distance']).reset_index()
df.columns=['level_0','Features','Distance']
source = ColumnDataSource(df)
colors = ["#75968f", "#a5bab7", "#c9d9d3", "#e2e2e2", "#dfccce", "#ddb7b1", "#cc7878", "#933b41", "#550b1d"]
mapper = LinearColorMapper(palette=colors, low=df.Distance.min(), high=df.Distance.max())
p = figure(plot_width=x_size, plot_height=y_size, title="HeatMap",
x_range=featureList, y_range=indexList,
toolbar_location=None, tools="", x_axis_location="above")
p.rect(x="Features", y="level_0", width=1, height=1, source=source,line_color=None, fill_color=transform('Distance', mapper))
color_bar = ColorBar(color_mapper=mapper, location=(0, 0),
ticker=BasicTicker(desired_num_ticks=len(colors)),
formatter=PrintfTickFormatter(format="%.2f"))
p.add_layout(color_bar, 'right')
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = 1.0
p.axis.major_label_text_font_size = font_size
if path_file.endswith('png'):
export_png(p, filename=path_file)
else:
output_file(path_file)
show(p)
def plot_heatmap_for_kmeans_groups(data_frame,numeric_features,path,clusters=8,is_row=True):
result_DF = k_means_DF(data_frame,numeric_features,clusters,is_row)
for k in range(0,clusters):
group_filter = result_DF['group'].astype(str)==str(k)
subFeatureList = result_DF[group_filter]['element'].values
if(is_row):
subNormalData = data_frame[numeric_features].T[subFeatureList].copy()
else:
subNormalData = data_frame[subFeatureList].copy()
if(subNormalData.shape[1]<2):
continue
subcorrDF = subNormalData.corr()
subcorrDF.columns=[str(i) for i in subcorrDF.columns.tolist()]
assert len(subFeatureList) == subcorrDF.shape[0]
subDistMatrix = subcorrDF.values
for i in range(0,len(subDistMatrix)):
subDistMatrix[i]=1-subDistMatrix[i]
subDistMatrix = ssd.squareform(subDistMatrix)
sublinked = linkage(subDistMatrix,'ward','euclidean',True)
subFeatureDict= {i:[subcorrDF.columns[i]] for i in range(0,len(subcorrDF.columns))}
for i in range(0,len(sublinked)):
index = i+sublinked.shape[0]+1
firstList = subFeatureDict[sublinked[i][0]]
for j in subFeatureDict[sublinked[i][1]]:
firstList.append(j)
if(len(firstList)!=sublinked[i][3]):
print("the length is not equal")
subFeatureDict[index]=firstList
subFeatureList=subFeatureDict[sublinked.shape[0]*2]
strFeatureList = [str(i) for i in subFeatureList]
subcorrDF.index=subcorrDF.columns
subcorrDF=subcorrDF.T[subFeatureList].T
plotHeatMap(subcorrDF[subFeatureList].reset_index(drop=True),strFeatureList,path+'/heatmap-'+str(k)+'.html')
if clusters==1:
return strFeatureList
def plot_precision_recall_curve(full_test,full_predict,label_list,class_num=4,title='ROC curve'):
if(class_num==2):
full_test=label_binarize(full_test,classes=list(range(0,3)))
full_test=np.array([np.array([i[0],i[1]]) for i in full_test])
else:
full_test=label_binarize(full_test,classes=list(range(0,class_num)))
precision = dict()
recall = dict()
average_precision=dict()
for i in range(0,class_num):
precision[i],recall[i],_ = precision_recall_curve(full_test[:,i],full_predict[:,i])
average_precision[i] = average_precision_score(full_test[:,i],full_predict[:,i])
precision['micro'],recall['micro'],_=precision_recall_curve(full_test.ravel(),full_predict.ravel())
colors = cycle(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])
plt.figure(figsize=(7, 8))
labels = []
lines = []
for i, color in zip(range(class_num), colors):
l, = plt.plot(recall[i], precision[i], color=color, lw=2)
lines.append(l)
labels.append('Precision-recall for class {0} (AUC = {1:0.2f})'
''.format(label_list[i], average_precision[i]))
fig = plt.gcf()
fig.subplots_adjust(bottom=0.25)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title(title)
plt.legend(lines, labels, loc=(0, -.38), prop=dict(size=14))
plt.show()
def print_precision_recall_accuracy(full_test,full_predict,label_list,class_num=4):
right=0
wrong=0
for i in range(len(full_test)):
if(np.argmax(full_predict[i]) == int(full_test[i])):
right =right+1
else:
wrong=wrong+1
print("Overall Accuray: ",right/(right+wrong))
for n in range(class_num):
tp=0
fp=0
fn=0
for i in range(len(full_test)):
if(np.argmax(full_predict[i])==n):
if(np.argmax(full_predict[i]) == int(full_test[i])):
tp=tp+1
else:
fp=fp+1
elif(int(full_test[i])==n):
fn=fn+1
print(label_list[n],"label size:",tp+fn)
print(label_list[n],"Recall: ",tp/(tp+fn))
if((tp+fp)==0):
print(label_list[n],"Precision: ",0)
else:
print(label_list[n],"Precision: ",tp/(tp+fp))
##########Venn-Abers Predictor##########
### This part of codes is taken from https://github.com/ptocca/VennABERS, All credit of this part goes to the author of this repository.###
# Some elementary functions to speak the same language as the paper
# (at some point we'll just replace the occurrence of the calls with the function body itself)
def push(x,stack):
stack.append(x)
def pop(stack):
return stack.pop()
def top(stack):
return stack[-1]
def nextToTop(stack):
return stack[-2]
# perhaps inefficient but clear implementation
def nonleftTurn(a,b,c):
d1 = b-a
d2 = c-b
return np.cross(d1,d2)<=0
def nonrightTurn(a,b,c):
d1 = b-a
d2 = c-b
return np.cross(d1,d2)>=0
def slope(a,b):
ax,ay = a
bx,by = b
return (by-ay)/(bx-ax)
def notBelow(t,p1,p2):
p1x,p1y = p1
p2x,p2y = p2
tx,ty = t
m = (p2y-p1y)/(p2x-p1x)
b = (p2x*p1y - p1x*p2y)/(p2x-p1x)
return (ty >= tx*m+b)
kPrime = None
# Because we cannot have negative indices in Python (they have another meaning), I use a dictionary
def algorithm1(P):
global kPrime
S = []
P[-1] = np.array((-1,-1))
push(P[-1],S)
push(P[0],S)
#put P[0] at the end of S
for i in range(1,kPrime+1):
#nextToTop(S):S[len(S)-2] top(S):S[len(S)-1] pop(S):drop the last element
#cross product for 2 dimension vector return the value of axis z
#cross product vector of vec1 and vec2 is the perpendicular vector with the plane consist by vec1 and vec2
while len(S)>1 and nonleftTurn(nextToTop(S),top(S),P[i]):
pop(S)
push(P[i],S)
return S
def algorithm2(P,S):
global kPrime
Sprime = S[::-1] # reverse the stack
F1 = np.zeros((kPrime+1,))
for i in range(1,kPrime+1):
F1[i] = slope(top(Sprime),nextToTop(Sprime))
P[i-1] = P[i-2]+P[i]-P[i-1]
if notBelow(P[i-1],top(Sprime),nextToTop(Sprime)):
continue
pop(Sprime)
while len(Sprime)>1 and nonleftTurn(P[i-1],top(Sprime),nextToTop(Sprime)):
pop(Sprime)
push(P[i-1],Sprime)
return F1
def algorithm3(P):
global kPrime
P[kPrime+1] = P[kPrime]+np.array((1.0,0.0))
S = []
push(P[kPrime+1],S)
push(P[kPrime],S)
for i in range(kPrime-1,0-1,-1): # k'-1,k'-2,...,0
while len(S)>1 and nonrightTurn(nextToTop(S),top(S),P[i]):
pop(S)
push(P[i],S)
return S
def algorithm4(P,S):
global kPrime
Sprime = S[::-1] # reverse the stack
F0 = np.zeros((kPrime+1,))
for i in range(kPrime,1-1,-1): # k',k'-1,...,1
F0[i] = slope(top(Sprime),nextToTop(Sprime))
P[i] = P[i-1]+P[i+1]-P[i]
if notBelow(P[i],top(Sprime),nextToTop(Sprime)):
continue
pop(Sprime)
while len(Sprime)>1 and nonrightTurn(P[i],top(Sprime),nextToTop(Sprime)):
pop(Sprime)
push(P[i],Sprime)
return F0[1:]
def prepareData(calibrPoints):
global kPrime
#sort score_label_list based on ascending score
ptsSorted = sorted(calibrPoints)
#xs score np.array, ys, label np.array, both sorted
xs = np.fromiter((p[0] for p in ptsSorted),float)
ys = np.fromiter((p[1] for p in ptsSorted),float)
ptsUnique,ptsIndex,ptsInverse,ptsCounts = np.unique(xs,
return_index=True,
return_counts=True,
return_inverse=True)
a = np.zeros(ptsUnique.shape)
#a: for a unique score, how many items labeled 1.
np.add.at(a,ptsInverse,ys)
# now a contains the sums of ys for each unique value of the objects
w = ptsCounts
yPrime = a/w
#yPrime: the purity of label for each unique score
yCsd = np.cumsum(w*yPrime) # Might as well do just np.cumsum(a)
#yCsd accumulation of label1 through unique score list
xPrime = np.cumsum(w)
#xPrime: accumulation of observations through unique score list
kPrime = len(xPrime)
#kPrime: the number of unique scores
return yPrime,yCsd,xPrime,ptsUnique
def computeF(xPrime,yCsd):
P = {0:np.array((0,0))}
P.update({i+1:np.array((k,v)) for i,(k,v) in enumerate(zip(xPrime,yCsd))})
#P is (i->(xPrime[i],yCsd[i]))
S = algorithm1(P)
F1 = algorithm2(P,S)
# P = {}
# P.update({i+1:np.array((k,v)) for i,(k,v) in enumerate(zip(xPrime,yCsd))})
S = algorithm3(P)
F0 = algorithm4(P,S)
return F0,F1
def getFVal(F0,F1,ptsUnique,testObjects):
pos0 = np.searchsorted(ptsUnique[1:],testObjects,side='right')
pos1 = np.searchsorted(ptsUnique[:-1],testObjects,side='left')+1
return F0[pos0],F1[pos1]
def ScoresToMultiProbs(calibrPoints,testObjects):
# sort the points, transform into unique objects, with weights and updated values
yPrime,yCsd,xPrime,ptsUnique = prepareData(calibrPoints)
# compute the F0 and F1 functions from the CSD
F0,F1 = computeF(xPrime,yCsd)
# compute the values for the given test objects
p0,p1 = getFVal(F0,F1,ptsUnique,testObjects)
return p0,p1
def computeF1(yCsd,xPrime):
global kPrime
P = {0:np.array((0,0))}
P.update({i+1:np.array((k,v)) for i,(k,v) in enumerate(zip(xPrime,yCsd))})
S = algorithm1(P)
F1 = algorithm2(P,S)
return F1
def ScoresToMultiProbsV2(calibrPoints,testObjects):
# sort the points, transform into unique objects, with weights and updated values
yPrime,yCsd,xPrime,ptsUnique = prepareData(calibrPoints)
# compute the F0 and F1 functions from the CSD
F1 = computeF1(yCsd,xPrime)
pos1 = np.searchsorted(ptsUnique[:-1],testObjects,side='left')+1
p1 = F1[pos1]
yPrime,yCsd,xPrime,ptsUnique = prepareData((-x,1-y) for x,y in calibrPoints)
F0 = 1 - computeF1(yCsd,xPrime)
pos0 = np.searchsorted(ptsUnique[:-1],testObjects,side='left')+1
p0 = F0[pos0]
return p0,p1
def generate_label_from_probability(p0,p1,testScores,isprint=True):
p = p1/(1-p0+p1)
full_test=np.array([np.array([1-i,i]) for i in p])
t_p=[(int(round(testScores[i])),int(round(p[i])),testScores[i],p[i]) for i in range(0,len(p))]
#label from score, label from probability, score, probability
count=0
for i in range(0,len(t_p)):
if (t_p[i][0]!=t_p[i][1]):
count = count+1
if(isprint):
print("differ",count,t_p[i])
return t_p,full_test
##########End of Venn-Abers Predictor##########
def xgboostModel_for_venn(train,test,selectedData_Indices,n_vap=10,label = 'Control',category = 'Category',num_round = 100):
train_temp=train.copy()
test_temp=test.copy()
train_temp['cv_id']=train_temp.index
labelList = [label,'ZZZZZZZ']
train_temp.loc[train_temp[category]!=label,category]='ZZZZZZZ'
test_temp.loc[test_temp[category]!=label,category]='ZZZZZZZ'
score_and_label_list=[]
test_score=[]
regex = re.compile(r"\[|\]|<|\ ", re.IGNORECASE)
param = {'max_depth':2,'eta':0.3,'silent':1,'objective':'binary:logistic','learningrate':0.1} #'binary:logistic' 'multi:softprob' 'num_class':2,
train_temp.columns = [regex.sub('_',col) for col in train_temp.columns.values]
test_temp.columns = [regex.sub('_',col) for col in test_temp.columns.values]
selectedData_Indices = [regex.sub('_',col) for col in selectedData_Indices]
labelEncoder = LabelEncoder()
for _ in range(n_vap):
X_train,X_test,Y_train,Y_test = cross_validation_split_with_unbalance_data(train_temp.reset_index(drop=True).copy(),selectedData_Indices,label=category,id_column='cv_id',test_size=0.2,handle_unbalance=False)
Y_train = labelEncoder.fit_transform(Y_train.values)
Y_test = labelEncoder.fit_transform(Y_test.values)
dtrain = xgb.DMatrix(X_train,label=Y_train)
dtest = xgb.DMatrix(X_test,label=Y_test)
bst = xgb.train(param,dtrain,num_round,feval='map5eval',maximize=True)
preds = bst.predict(dtest)
best_preds = np.asarray([round(value) for value in preds])
Y_test = pd.DataFrame(Y_test).reset_index()
for i in range(0,len(best_preds)):
score_and_label_list.append((preds[i],Y_test.iloc[i][0]))
count=0
X_train=train_temp[selectedData_Indices]
X_test=test_temp[selectedData_Indices]
Y_train=train_temp[category]
Y_test=test_temp[category]
Y_train = labelEncoder.fit_transform(Y_train.values)
Y_test = labelEncoder.fit_transform(Y_test.values)
dtrain = xgb.DMatrix(X_train,label=Y_train)
dtest = xgb.DMatrix(X_test,label=Y_test)
bst = xgb.train(param,dtrain,num_round,feval='map5eval',maximize=True)
preds = bst.predict(dtest)
best_preds = np.asarray([round(value) for value in preds])
Y_test = pd.DataFrame(Y_test).reset_index()
for i in range(0,len(best_preds)):
test_score.append(preds[i])
if(best_preds[i] != Y_test.iloc[i][0]):
count=count+1
fullPredict=[np.array([1-i,i]) for i in list(preds)]
p0,p1 = ScoresToMultiProbs(score_and_label_list,test_score)
label_from_probability,full_predic_venn = generate_label_from_probability(p0,p1,test_score,False)
readable_pre=[i[0] for i in full_predic_venn]
test[label]=readable_pre
return test,Y_test[0].to_numpy(),np.array(fullPredict),labelList
def tSNEPlot(oriData,data_Indices,read_list,color_col,storing_loc,size_col = 5, iters=1000, perp=2, title='tSNE',num_components=2):
tsne = TSNE(n_components=num_components,random_state=0,n_iter=iters,perplexity=perp)
tSNE_DF = oriData.copy()
tSNE_DF=tSNE_DF.reset_index(drop=True)
tSNE_DF_2d = (tSNE_DF[data_Indices] - tSNE_DF[data_Indices].mean()) / (tSNE_DF[data_Indices].max() - tSNE_DF[data_Indices].min()).fillna(0)
tSNE_DF_2d = tsne.fit_transform(tSNE_DF_2d.to_numpy())
tSNE_DF_2d = pd.DataFrame(tSNE_DF_2d).reset_index(drop=True)
tSNE_DF_2d.columns=[str(i) for i in range(1,1+num_components)]
for i in read_list+[color_col]:
tSNE_DF_2d[i] = tSNE_DF[i]
tSNE_DF_2d[color_col]=tSNE_DF_2d[color_col].astype(str)
plotColorScatter(tSNE_DF_2d ,xvalue = '1',yvalue = '2', sizevalue = size_col, outputFilePath=storing_loc,plotWidth = 750, plotHeight = 750, readList = read_list,titleName=title,colorColumn=color_col,colorPattern=viridis)
return tSNE_DF_2d
def plotColorScatter(DataFrame ,xvalue = '0',yvalue = '1', sizevalue = 'size', outputFilePath='/abc/test.html',plotWidth = 750, plotHeight = 750, readList = ['1','2'],titleName='tSNE', colorColumn="Category", colorPattern=viridis):
factors = DataFrame[colorColumn].unique()
if len(factors)<8:
color_map = factor_cmap(colorColumn,factors=factors,palette=['#f03b20','#feb24c','#ffeda0','#636363','#a1d99b','#31a354','#3182bd'])
else:
color_map = factor_cmap(colorColumn,factors=factors,palette=colorPattern(len(factors)))
hover = HoverTool()
tooltipString = ""
for ele in readList:
ele=str(ele)
readTuple = (ele.lower(),ele)
tooltipString = tooltipString + """<br><font face="Arial" size="4">%s: @%s<font>""" % readTuple
hover.tooltips = tooltipString
tools= [hover,WheelZoomTool(),PanTool(),BoxZoomTool(),ResetTool(),SaveTool()]
source= ColumnDataSource(DataFrame)
output_file(outputFilePath)
p = figure(plot_width = plotWidth, plot_height = plotHeight, tools=tools,title=titleName,toolbar_location='right',x_axis_label=xvalue.lower(),y_axis_label=yvalue.lower(),background_fill_color='white',title_location = 'above')
p.title.text_font_size='15pt'
p.title.align = 'center'
p.xaxis.axis_label_text_font_size='12pt'
p.yaxis.axis_label_text_font_size='12pt'
p.x_range = Range1d(DataFrame[xvalue].min()*1.1,DataFrame[xvalue].max()*1.1)
p.y_range = Range1d(DataFrame[yvalue].min()*1.1,DataFrame[yvalue].max()*1.1)
p.circle(x = xvalue,y = yvalue,size=sizevalue,source=source,color=color_map,legend=colorColumn)
p.legend.location = "top_left"
p.toolbar.active_scroll=p.select_one(WheelZoomTool)
if outputFilePath.endswith('png'):
export_png(p, filename=outputFilePath)
else:
output_file(outputFilePath)
show(p)
def print_full_wrong_list(full_wrong_list):
s = set()
for i in full_wrong_list:
strings = 'Pre-Label: '+i[0]+' Original_data: '+i[1]+' Probability: '+str(i[3])
s.add(strings)
for i in s:
print(i)
def xgboost_multi_classification(input_df,numeric_features_validation,iteration=10,test_size=0.2,max_depth=2,num_trees=50,label_column='Category',id_column='PlateID',handle_unbalance=True,readList=['PlateID','Compound Name']):
XGBData = input_df.copy()
selectedData_Indices = numeric_features_validation.copy() # data_Indices
num_class=len(XGBData[label_column].unique().tolist())
regex = re.compile(r"\[|\]|<|\ ", re.IGNORECASE)
param = {'max_depth':max_depth,'eta':0.3,'silent':1,'objective':'multi:softprob','num_class':num_class,'learningrate':0.1}
num_round = num_trees
labelList = XGBData.groupby([label_column],as_index=False).mean()[label_column].tolist()
label = 0
accuracy = []
X = XGBData.reset_index()[selectedData_Indices]
Y = XGBData.reset_index()[label_column]
X.columns = [regex.sub('_',col) for col in X.columns.values]
XGBData.columns = [regex.sub('_',col) for col in XGBData.columns.values]
selectedData_Indices = [regex.sub('_',col) for col in selectedData_Indices]
labelEncoder = LabelEncoder()
labelEncoded = labelEncoder.fit_transform(XGBData.reset_index()[label_column].values)
fullWrongList=[]
fullTest=np.array([])
fullPredict=[]
for j in range(0,iteration):
X_train, X_test, Y_train, Y_test = cross_validation_split_with_unbalance_data(XGBData,selectedData_Indices,label=label_column,id_column=id_column,test_size=test_size,handle_unbalance=handle_unbalance)
Y_train = labelEncoder.fit_transform(Y_train.values)
Y_test = labelEncoder.fit_transform(Y_test.values)
fullTest=np.concatenate((fullTest,Y_test),axis=0)
dtrain = xgb.DMatrix(X_train,label=Y_train)
dtest = xgb.DMatrix(X_test,label=Y_test)
bst = xgb.train(param,dtrain,num_round,feval='map5eval',maximize=True)
preds = bst.predict(dtest)
fullPredict=fullPredict+list(preds)
best_preds = np.asarray([np.argmax(line) for line in preds])
precision = precision_score(Y_test,best_preds,average='macro')
Y_test = pd.DataFrame(Y_test).reset_index()
count=0
for i in range(0,len(best_preds)):
if(best_preds[i] != Y_test.iloc[i][label]):
count=count+1
string=''
for l in range(0,len(readList)):
string = string + str(XGBData.reset_index().iloc[X_test.reset_index(drop=True).index[i]][readList[l]])+'---'
singleWrongList = [labelList[best_preds[i]],string+labelList[Y_test.iloc[i][label]],str(j),preds[i]]
fullWrongList.append(singleWrongList)
print('------------------accuracy = '+str(1-count/len(best_preds))+'------------------')
accuracy.append(1-count/len(best_preds))
#bst.dump_model(storePath)
pArray = np.array(accuracy)
print(pArray.mean(),pArray.std())
return pArray,fullWrongList,fullTest,np.array(fullPredict),labelList
def combined_eXGBT_classifier(training_set,numeric_features_validation,testing_set,n_vap=10,label_column = 'Category',max_depth=2,num_trees=50):
num_class=len(training_set[label_column].unique().tolist())
df_te = testing_set.copy()
for i in training_set[label_column].unique().tolist():
df_te,full_test,full_predict,label_list =xgboostModel_for_venn(training_set,df_te,numeric_features_validation,n_vap,label =i,category = label_column,num_round = num_trees)
XGBData = training_set.copy()
selectedData_Indices = numeric_features_validation # data_Indices
regex = re.compile(r"\[|\]|<|\ ", re.IGNORECASE)
param = {'max_depth':max_depth,'eta':0.3,'silent':1,'objective':'multi:softprob','num_class':num_class,'learningrate':0.1}
labelList = XGBData.groupby([label_column],as_index=False).mean()[label_column].tolist()
X = XGBData.reset_index()[selectedData_Indices]
Y = XGBData.reset_index()[label_column]
Z = df_te[selectedData_Indices]
X.columns = [regex.sub('_',col) for col in X.columns.values]
Z.columns = [regex.sub('_',col) for col in Z.columns.values]
labelEncoder = LabelEncoder()
labelEncoded = labelEncoder.fit_transform(Y.values)
dtrain = xgb.DMatrix(X,label=labelEncoded)
bst = xgb.train(param,dtrain,num_trees,feval='map5eval',maximize=True)
dtest = xgb.DMatrix(Z)
preds = bst.predict(dtest)
best_preds = np.asarray([np.argmax(line) for line in preds])
readable_pre=[labelList[i] for i in best_preds]
df_te['multi_eXGBT_pre_label']=readable_pre
return df_te
def transform_predict_result_DF(predict_result_DF,label_col,threshold=0.1):
id_col='predict_result_DF_indices'
predict_result_DF[id_col]=predict_result_DF.index
#This is for specific purposes.
try:
label_list = predict_result_DF[label_col].unique().tolist()
predict_result_DF['max']=predict_result_DF[label_list].T.max()
except KeyError as keyE:
label_list = predict_result_DF['multi_eXGBT_pre_label'].unique().tolist()
predict_result_DF['max']=predict_result_DF[label_list].T.max()
print('Notice: The predicted labels were used instead of full labels')
min_Filter = predict_result_DF['max']<threshold
predict_result_DF.loc[min_Filter,'F_label']=predict_result_DF.loc[min_Filter,'multi_eXGBT_pre_label']
max_Filter = predict_result_DF['max']>=threshold
for i in label_list:
analogue_filter = predict_result_DF['max']==predict_result_DF[i]
predict_result_DF.loc[analogue_filter&max_Filter,'F_label']=i
predict_result_DF = predict_result_DF.rename({'max': 'probability'}, axis='columns')
temp1= predict_result_DF.groupby([id_col,'F_label'], as_index=False).mean()[[id_col,'F_label','probability']]
temp1['ID']=temp1[id_col].astype(str)+temp1['probability'].astype(str)
temp2= temp1.groupby([id_col], as_index=False).max()[[id_col,'probability']]
temp2['ID']=temp2[id_col].astype(str)+temp2['probability'].astype(str)
temp3=temp2.merge(temp1, on='ID', how='left')[[id_col+'_x','probability_x','F_label']]
temp3.columns=[id_col,'confidence','predicted_label']
temp3.groupby(['predicted_label'], as_index=False).count()[['predicted_label','confidence']]
temp3=temp3.merge(predict_result_DF, on=id_col, how='left')[[id_col,'confidence','predicted_label',label_col]]
fake_filter = temp3[id_col].astype(str).str.startswith('fake')
return predict_result_DF,temp3[~fake_filter]
def generate_expressed_matrix(gene_dfc,select_gene ,group_col = 'group',id_col='cell'):
np_temp = gene_dfc[select_gene].to_numpy()
lst=[]
for i in np_temp:
lst_t=[]
for j in i:
if j==0:
lst_t.append(0)
else:
lst_t.append(1)
lst.append(lst_t)
lst=np.array(lst)
zero_test_df = pd.DataFrame(lst,columns=select_gene)
zero_test_df[[id_col,group_col]]=gene_dfc[[id_col,group_col]]
return zero_test_df
def Drode_DE_gene_detection(gene_dfc,select_gene,feature='gene',id_col='cell',group_col='group',is_unify=True):
if(is_unify):
gene_dfc_unify,_ = unify_df(gene_dfc,select_gene,id_col,[group_col])
else:
gene_dfc_unify=gene_dfc.copy()
gene_dfc_unify= gene_dfc_unify.reset_index(drop=True)
zero_test_df = generate_expressed_matrix(gene_dfc = gene_dfc_unify,select_gene = select_gene,group_col = group_col,id_col=id_col)
differentiated_df = pd.DataFrame()
for gene in select_gene:
print(gene)
temp = zero_test_df.groupby(by=[group_col,gene],as_index=False)[[id_col]].count().copy()
temp = pd.pivot_table(temp, index=group_col, columns=gene, values=id_col)
if zero_test_df[zero_test_df[gene]==0].shape[0]==0:
temp[0]=np.zeros(temp.shape[0])
temp['non_0_rate']=np.ones(temp.shape[0])
elif zero_test_df[zero_test_df[gene]==1].shape[0]==0:
temp[1]=np.zeros(temp.shape[0])
temp['non_0_rate']=np.zeros(temp.shape[0])
else:
temp=temp.fillna(0)
temp['non_0_rate']=temp[1]/(temp[0]+temp[1])
temp['size']=temp[0]+temp[1]
temp[group_col]=temp.index
temp_np = temp.to_numpy()
lst=[]
for i in range(len(temp_np)):
for j in range(i+1,len(temp_np)):
x = gene_dfc_unify[(gene_dfc_unify[group_col]==str(temp_np[i][4])) & (gene_dfc_unify[gene]!=0)][gene].to_numpy()
y = gene_dfc_unify[(gene_dfc_unify[group_col]==str(temp_np[j][4])) & (gene_dfc_unify[gene]!=0)][gene].to_numpy()
true_mean_x = gene_dfc[(gene_dfc[group_col]==str(temp_np[i][4])) & (gene_dfc[gene]!=0)][gene].to_numpy()
true_mean_y = gene_dfc[(gene_dfc[group_col]==str(temp_np[j][4])) & (gene_dfc[gene]!=0)][gene].to_numpy()
if((len(x)<3) & (len(y)<3)):
continue
elif len(x)<3:
mean_x=0
mean_y=y.mean()
same_distribution_pval = 1
elif len(y)<3:
mean_x=x.mean()
mean_y=0
same_distribution_pval = 1
else:
mean_x=x.mean()
mean_y=y.mean()
same_distribution_pval = (ks_2samp(x, y)[1])
if(len(true_mean_x)>0):
true_mean_x=true_mean_x.mean()
else:
true_mean_x=0
if(len(true_mean_y)>0):
true_mean_y=true_mean_y.mean()
else:
true_mean_y=0
if temp_np[i][0]+temp_np[j][0]==0: #all zeros
same_0rate_pval=1
elif temp_np[i][0]+temp_np[j][0]==temp_np[i][3]+temp_np[j][3]: #all ones
same_0rate_pval=1
else:
zscore, same_0rate_pval = sm.stats.proportions_ztest([temp_np[i][0], temp_np[j][0]], [temp_np[i][3], temp_np[j][3]], alternative='two-sided')
if same_0rate_pval<=0.05:
differentiated_pval=1-same_0rate_pval
elif same_distribution_pval<=0.05:
differentiated_pval=0.95-same_distribution_pval
else:
differentiated_pval=0.9-same_0rate_pval*same_distribution_pval
lst.append([gene,temp_np[i][4],temp_np[j][4],differentiated_pval,1-same_0rate_pval,1-same_distribution_pval,temp_np[i][2],temp_np[j][2],temp_np[i][1],temp_np[j][1],true_mean_x,true_mean_y])
temp_df = pd.DataFrame(lst,columns=[feature,'group_1','group_2','deprob','d0prob','d1prob','posrate_1','posrate_2','pos_1','pos_2','posmean_1','posmean_2'])
differentiated_df=differentiated_df.append(temp_df)
return differentiated_df
def select_de_gene(drode_de_gene_df,score_col='deprob',feature='gene',num=20):
drode_de_gene_df = drode_de_gene_df[drode_de_gene_df[score_col]>0.9].copy()
drode_de_gene_df['group']=drode_de_gene_df['group_1']+'***'+drode_de_gene_df['group_2']
groups=drode_de_gene_df['group'].unique().tolist()
group_df=pd.DataFrame()
for group in groups:
temp_df = drode_de_gene_df[drode_de_gene_df['group']==group][['gene',score_col,'group']].copy()
temp_df = temp_df.sort_values(by=score_col,ascending=False).reset_index(drop=True)
group_df = group_df.append(temp_df.iloc[0:num])
de_gene_dWT4 = group_df[feature].unique().tolist()
return de_gene_dWT4
def cluster_cells(gene_dfc,select_gene,group='CD4SP',group_col='group',id_col='cell',path='/home/ivan/Desktop/Project2/MyData/pipeline'):
cluster_df = gene_dfc[gene_dfc[group_col]==group][select_gene+[id_col,group_col]]
cluster_df = cluster_df.reset_index(drop=True)
cluster_df = rrd(cluster_df,id_col,True,False)
cluster_df = cluster_df.replace(0,1e-7)
mx=cluster_df[select_gene].to_numpy()
scaled_matrix = generate_unit_modules(mx, isrow=True, is_scale=False, simple_scale=True)
dis_mx=[]
for i in range(len(scaled_matrix)):
lst=[]
for j in range(len(scaled_matrix)):
lst.append(entropy(scaled_matrix[i], scaled_matrix[j], base=None))
dis_mx.append(lst)
dis_mx = np.array(dis_mx)
dis_df = pd.DataFrame(dis_mx,columns=cluster_df[id_col].tolist())
dis_df.index = cluster_df[id_col].tolist()
lst = plot_heatmap_for_kmeans_groups(data_frame=dis_df,numeric_features=cluster_df[id_col].tolist(),path=path, clusters=1, is_row=True)
return lst
def de_gene_description(drode_de_gene_df):
drode_de_gene_df_2=drode_de_gene_df.copy()
drode_de_gene_df_2['group_1']=drode_de_gene_df['group_2']
drode_de_gene_df_2['group_2']=drode_de_gene_df['group_1']
drode_de_gene_df_2['posmean_1']=drode_de_gene_df['posmean_2']
drode_de_gene_df_2['posmean_2']=drode_de_gene_df['posmean_1']
drode_de_gene_df_2['posrate_1']=drode_de_gene_df['posrate_2']
drode_de_gene_df_2['posrate_2']=drode_de_gene_df['posrate_1']
drode_de_gene_df_2=drode_de_gene_df_2.append(drode_de_gene_df)
cate_lst = drode_de_gene_df_2['group_1'].unique().tolist()
temp_mean_df = | pd.pivot_table(drode_de_gene_df_2, index='gene', columns='group_2', values='posmean_2') | pandas.pivot_table |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from recordlinkage.preprocessing import clean
from recordlinkage.preprocessing import phonenumbers
from recordlinkage.preprocessing import phonetic
from recordlinkage.preprocessing import phonetic_algorithms
from recordlinkage.preprocessing import value_occurence
class TestCleaningStandardise(object):
def test_clean(self):
values = pd.Series([
'Mary-ann', 'Bob :)', 'Angel', 'Bob (alias Billy)', 'Mary ann',
'John', np.nan
])
expected = pd.Series(
['mary ann', 'bob', 'angel', 'bob', 'mary ann', 'john', np.nan])
clean_series = clean(values)
# Check if series are identical.
| pdt.assert_series_equal(clean_series, expected) | pandas.util.testing.assert_series_equal |
import json
import logging
import socketio
from .constants import *
_LOGGER = logging.getLogger(__name__)
ringalarm_devices_list = []
required_columns = [DEVICE_ZID, DEVICE_NAME, DEVICE_BATTERY_STATUS, DEVICE_BATTERY_LEVEL, DEVICE_TYPE, \
DEVICE_ROOM_ID, DEVICE_TAMPER_STATUS, \
DEVICE_ON, DEVICE_MOTION_STATUS, DEVICE_FAULTED, \
DEVICE_CONTROLLER, DEVICE_MAPPED_TYPE, DEVICE_SOURCE, DEVICE_LOCKED, DEVICE_CALLBACK]
custom_columns = [DEVICE_CONTROLLER, DEVICE_MAPPED_TYPE, DEVICE_SOURCE, DEVICE_CALLBACK]
def get_oauth_token(username, password):
import requests
import socketio
data = {'username': username, 'grant_type': 'password', 'scope': 'client',
'client_id': 'ring_official_android', 'password': password}
response = requests.post(OATH_ENDPOINT, data=data)
statusCode = response.status_code
oauth_token = None
responseJSON = response.json()
if statusCode == 200:
oauth_token = responseJSON.get('access_token', None)
_LOGGER.info("Oauth Token obtained")
return oauth_token
def get_locations(oauth_token):
import requests
data = {}
headers = {'content-type': 'application/x-www-form-urlencoded', 'authorization': 'Bearer ' + oauth_token}
response = requests.get(LOCATIONS_ENDPOINT, data=data, headers=headers).json()
user_locations = response.get('user_locations', None)
_LOGGER.info("User locations are " + str(user_locations))
return user_locations
class RingLocation(object):
def __init__(self, oauth_token):
self.is_connected = False
self.token = None
self.params = None
self.asset_devices_present = None
self.total_hubs = 0
self.hubs_devices_obtained = 0
self.locations = []
self.location_id = None
self.token = oauth_token
#self.token = self.get_oauth_token()
self.sio=None
if (self.token):
self.is_connected = True
def set_callbacks(self, **kwargs):
self.async_add_device_callback = kwargs.get('async_add_device')
self.async_update_device_callback = kwargs.get('async_update_device')
def send_command_ring(self, zid, dst, cmd, data={}):
_payload = {
"body": [
{
"zid": zid,
"command": {
"v1": [
{
"commandType": cmd,
"data": data
}
]
}
}
],
"datatype": "DeviceInfoSetType",
"dst": dst,
"msg": "DeviceInfoSet",
"seq": 3
}
self.sio.emit("message", _payload)
def get_devices(self, location_id):
#import socketio
hubs = RingHubs(location_id, self.token)
self.sio = socketio.Client()
self.sio.connect(hubs.wss_url, transports='websocket')
for hub in hubs.assets:
asset = hub.get('uuid', None)
self.total_hubs = self.total_hubs + 1
initial_request_get_device_list = {"msg": "DeviceInfoDocGetList", "dst": asset, "seq": 2}
self.sio.emit("message", initial_request_get_device_list)
_LOGGER.info("Total hubs " + str(self.total_hubs))
@self.sio.event
def SessionInfo(data):
# print(json.dumps(data, indent=4))
pass
@self.sio.event
def DeviceInfoSet(data):
pass
@self.sio.event
def DataUpdate(data):
if data["datatype"] == 'HubDisconnectionEventType':
print ("Hub is is disconnected")
try:
if data["datatype"] == "DeviceInfoDocType":
entity_dict = {}
if self.async_update_device_callback:
updated_entities = _build_update_entity_list(data)
self.async_update_device_callback(updated_entities)
except KeyError:
pass
@self.sio.event
def message(data):
import pandas as pd
if data['msg'] == 'DeviceInfoSet':
pass
else:
try:
if data["datatype"] == "DeviceInfoDocType":
global ringalarm_devices_list
_build_initial_entity_list(data)
self.hubs_devices_obtained = self.hubs_devices_obtained + 1
if self.hubs_devices_obtained == self.total_hubs:
r = pd.DataFrame()
for i in ringalarm_devices_list:
r = | pd.concat([r, i], ignore_index=True, sort=False) | pandas.concat |
import os
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
import numpy as np
import json
from sklearn.metrics import accuracy_score
from src.data.strava_data_load_preprocess import (
load_week_start_times_data,
load_lgbm_model_results,
load_logreg_model_results,
load_lgbm_heatmap,
load_logreg_heatmap,
load_lgbm_model,
load_logreg_model,
)
ACTIVITY_COLOR_MAP = {
"AlpineSki": "#636EFA",
"Ride": "#00CC96",
"NordicSki": "#EF553B",
"Run": "#AB63FA",
"Swim": "#FFA15A",
"Ski": "#19D3F3",
"Workout": "#FF6692",
"WeightTraining": "#B6E880",
}
def activity_pie_plot(df, val, label, title):
"""Builds a plotly pie plot broken by activity.
Args:
df (DataFrame): raw data to include in the pie plot
val (string): which column to plot.
label (string): what the lables should display in the legend,
title (string): Title to be displayed on the plot
Returns:
[Plotly Express Figure]: The complete pie plot of activity types
"""
fig = px.pie(
df,
values=val,
names=label,
# color_discrete_sequence= kaha_color_list,
title=title,
labels={"type": "Activity Type"},
)
fig.update_traces(textposition="inside", textinfo="label+percent", sort=False)
fig.update_layout(
showlegend=True,
title_x=0.5,
title_font_size=25,
font_size=15,
legend={"traceorder": "normal"},
)
return fig
def plot_lgbm_model_predictions(
train_data, plot_data, labels, data_set, lgbm_model=None
):
"""Generates the LGBM results plot using plotly express and prediction region mask.
Returns:
Plotly Express Figure: the LGBM model results plot including prediction region mask.
"""
try:
lgbm_results = load_lgbm_model_results(data_set)
except FileNotFoundError:
print("No LGBM " + data_set + " Results Found, generating")
lgbm_model = load_lgbm_model()
prediction = lgbm_model.predict(
train_data, num_iteration=lgbm_model.best_iteration
)
lgbm_results = pd.DataFrame({"label": labels, "prediction": prediction})
acc = accuracy_score(lgbm_results.label, lgbm_results.prediction.round())
# results = pd.DataFrame({'label': labels, 'prediction': prediction}, columns = ['label','prediction']).to_csv(os.path.join('data', 'lgbm_model_' + f'{acc*100:.1f}_acc'))
print(f"Accuracy: {acc:.3f}")
x_min, x_max = plot_data["morn"].min() - 0.02, plot_data["morn"].max() + 0.02
y_min, y_max = plot_data["aft"].min() - 0.02, plot_data["aft"].max() + 0.02
data_plot = px.scatter(
x=plot_data["morn"],
y=plot_data["aft"],
color=np.where(lgbm_results.prediction.round() == 1, "unemployed", "employed"),
color_discrete_map={
"unemployed": "#009FFD",
"employed": "#FFA400",
},
symbol=np.where(
lgbm_results.prediction.round() == np.array(lgbm_results.label),
"Correct",
"Incorrect",
),
symbol_map={"Correct": "circle", "Incorrect": "x"},
labels={"color": "Label", "symbol": "Model Prediction"},
).update_traces(marker=dict(size=10, line_width=2, line_color="black"))
data_plot.update_layout(
# title=f'Employment Predictions from Weekday Activities Started During Work Hours<br>Model Accuracy: {acc*100:.0f}%',
title=f"LightGBM Model Accuracy: {acc*100:.0f}%",
xaxis=dict(
title="Activities During Morning Work Hours",
tickformat=",.0%",
range=[x_min, x_max],
),
yaxis=dict(
title="Activities During Afternoon Work Hours",
tickformat=",.0%",
range=[y_min, y_max],
),
title_x=0.5,
title_font_size=15,
font_size=10,
margin=dict(l=20, r=20, t=50, b=20),
legend=dict(
# orientation="h",
yanchor="top",
y=1.0,
xanchor="right",
x=1,
),
)
# ensure both train and plot data are matched to prevent breaking of the plotly figure.
if len(train_data.iloc[0]) == len(plot_data.iloc[0]):
try:
lgbm_heatmap = load_lgbm_heatmap(data_set)
except IndexError:
# no exisiting heatmap, create classification confidence heat map
print("No Heatmap values for " + data_set + " found, generating...")
h = 0.005
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
y_ = np.arange(y_min, y_max, h)
Z = lgbm_model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# save off heatmap values
heat_values = []
for x in range(0, len(xx[0])):
for y in range(0, len(y_)):
heat_values.append([xx[0][x], y_[y], Z[y][x]])
lgbm_heatmap = pd.DataFrame(heat_values, columns=["X", "Y", "Z"])
lgbm_heatmap.to_csv(
os.path.join("data", "processed", data_set + "_lgbm_model_heatmap.csv")
)
data_plot.add_trace(
go.Heatmap(
x=lgbm_heatmap.X,
y=lgbm_heatmap.Y,
z=lgbm_heatmap.Z,
colorscale=[
[0, "#FFA400"],
[0.4, "#FFA400"],
[0.6, "#009FFD"],
[1, "#009FFD"],
], # [[0,'#FFA400'], [1,'#009FFD']], #[0.45, '#009FFD'], [0.55,'#FFA400'],
opacity=0.3,
showscale=False,
)
)
return data_plot
def plot_logreg_model_predictions(data, labels, data_set):
"""Generate the plotly express plot of the logisitc regression model results including prediction
confidence mask values.
Args:
data (DataFrame): dataset including months and working/not working info
labels (list): classification labels to be displayed in the plots legend
data_set (string): which dataset to pull for display.
Returns:
Plotly Express Figure: The final formatted plot ready for display.
"""
# first try to load already processed data, if not use the models to generate
try:
logreg_results = load_logreg_model_results(data_set)
except FileNotFoundError:
print("No logreg " + data_set + " Results Found, generating")
logreg_model = load_logreg_model()
prediction = logreg_model.predict(data)
logreg_results = pd.DataFrame({"label": labels, "prediction": prediction})
acc = accuracy_score(logreg_results.label, logreg_results.prediction)
x_min, x_max = data["morn"].min() - 0.02, data["morn"].max() + 0.02
y_min, y_max = data["aft"].min() - 0.02, data["aft"].max() + 0.02
print(f"Accuracy: {acc:.3f}")
# results = pd.DataFrame({'label': labels, 'prediction': prediction}, columns = ['label','prediction']).to_csv(os.path.join('data', 'logreg_model_' + f'{acc*100:.1f}_acc'))
data_plot = px.scatter(
x=data["morn"],
y=data["aft"],
color=np.where(logreg_results.label == 1, "unemployed", "employed"),
color_discrete_map={
"unemployed": "#009FFD",
"employed": "#FFA400",
},
symbol=np.where(
logreg_results.prediction == np.array(logreg_results.label),
"Correct",
"Incorrect",
),
symbol_map={"Correct": "circle", "Incorrect": "x"},
labels={"color": "Label", "symbol": "Model Prediction"},
).update_traces(marker=dict(size=10, line_width=2, line_color="black"))
data_plot.update_layout(
# title=f'Logistic Regression Model<br>Model Accuracy: {acc*100:.0f}%',
title=f"Logistic Regression Model Accuracy: {acc*100:.0f}%",
xaxis=dict(
title="Activities During Morning Work Hours",
tickformat=",.0%",
range=[x_min, x_max],
),
yaxis=dict(
title="Activities During Afternoon Work Hours",
tickformat=",.0%",
range=[y_min, y_max],
),
title_x=0.5,
title_font_size=15,
font_size=10,
margin=dict(l=20, r=20, t=50, b=20),
legend=dict(
# orientation="h",
yanchor="top",
y=1.0,
xanchor="right",
x=1,
),
)
# first try to load already processed data, if not use the models to generate
try:
logreg_heatmap = load_logreg_heatmap(data_set)
except IndexError:
# no exisiting heatmap Create classification confidence heat map
logreg_model = load_logreg_model()
print("No Heatmap values for " + data_set + " found, generating...")
h = 0.005
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
y_ = np.arange(y_min, y_max, h)
Z = logreg_model.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 0]
Z = Z.reshape(xx.shape)
# save off heatmap values
heat_values = []
for x in range(0, len(xx[0])):
for y in range(0, len(y_)):
heat_values.append([xx[0][x], y_[y], Z[y][x]])
logreg_heatmap = pd.DataFrame(heat_values, columns=["X", "Y", "Z"])
logreg_heatmap.to_csv(
os.path.join("data", "processed", data_set + "_logreg_model_heatmap.csv")
)
data_plot.add_trace(
go.Heatmap(
x=logreg_heatmap.X,
y=logreg_heatmap.Y,
z=logreg_heatmap.Z,
colorscale=[
[0, "#009FFD"],
[0.45, "#009FFD"],
[0.55, "#FFA400"],
[1, "#FFA400"],
], # [[0,'#FFA400'], [1,'#009FFD']], #[0.45, '#009FFD'], [0.55,'#FFA400'],
opacity=0.3,
showscale=False,
)
)
return data_plot
def plot_training_data(train_data, train_labels, test_data, test_labels):
combined_data = pd.concat([train_data, test_data])
combined_labels = pd.concat([train_labels, test_labels])
x_min, x_max = (
combined_data["morn"].min() - 0.02,
combined_data["morn"].max() + 0.02,
)
y_min, y_max = combined_data["aft"].min() - 0.02, combined_data["aft"].max() + 0.02
data_plot = px.scatter(
x=combined_data["morn"],
y=combined_data["aft"],
color=combined_labels,
color_discrete_map={"unemployed": "#009FFD", "employed": "#FFA400"},
labels={
"color": "Label",
},
).update_traces(marker=dict(size=10, line_width=2, line_color="black"))
data_plot.update_layout(
title=f"Data Set of Employed & Unemployed Months",
xaxis=dict(
title="Activities During Morning Work Hours",
tickformat=",.0%",
range=[x_min, x_max],
),
yaxis=dict(
title="Activities During Afternoon Work Hours",
tickformat=",.0%",
range=[y_min, y_max],
),
title_x=0.5,
title_font_size=17,
font_size=12,
margin=dict(l=20, r=20, t=50, b=20),
legend=dict(
# orientation="h",
yanchor="top",
y=1.0,
xanchor="right",
x=1,
),
)
return data_plot
def plot_weekly_start_times(activity_df, start_year, end_year, work_hours, title_descr):
start_time_fig = go.Figure()
work_perc = 0
# Load or generate the data for activity start times across an entire year
try:
summary_week_start_times = load_week_start_times_data()
except FileNotFoundError:
print("No Yearly Week Summary Data Found, generating it now...")
summary_week_start_times = {}
for year in range(
| pd.DatetimeIndex(activity_df.start_date_local) | pandas.DatetimeIndex |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# You can run this test by first running `nPython.exe` (with mono or otherwise):
# $ ./nPython.exe ReportChartTests.py
import numpy as np
import pandas as pd
from datetime import datetime
from ReportCharts import ReportCharts
charts = ReportCharts()
## Test GetReturnsPerTrade
backtest = list(np.random.normal(0, 1, 1000))
live = list(np.random.normal(0.5, 1, 400))
result = charts.GetReturnsPerTrade([], [])
result = charts.GetReturnsPerTrade(backtest, [])
result = charts.GetReturnsPerTrade(backtest, live)
## Test GetCumulativeReturnsPlot
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2012-10-01T00:00:00', periods=365)]
strategy = np.linspace(1, 25, 365)
benchmark = np.linspace(2, 26, 365)
backtest = [time, strategy, time, benchmark]
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2013-10-01T00:00:00', periods=50)]
strategy = np.linspace(25, 29, 50)
benchmark = np.linspace(26, 30, 50)
live = [time, strategy, time, benchmark]
result = charts.GetCumulativeReturns()
result = charts.GetCumulativeReturns(backtest)
result = charts.GetCumulativeReturns(backtest, live)
## Test GetDailyReturnsPlot
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2012-10-01T00:00:00', periods=365)]
data = list(np.random.normal(0, 1, 365))
backtest = [time, data]
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2013-10-01T00:00:00', periods=120)]
data = list(np.random.normal(0.5, 1.5, 120))
live = [time, data]
empty = [[], []]
result = charts.GetDailyReturns(empty, empty)
result = charts.GetDailyReturns(backtest, empty)
result = charts.GetDailyReturns(backtest, live)
## Test GetMonthlyReturnsPlot
backtest = {'2016': [0.5, 0.7, 0.2, 0.23, 1.3, 1.45, 1.67, -2.3, -0.5, 1.23, 1.23, -3.5],
'2017': [0.5, 0.7, 0.2, 0.23, 1.3, 1.45, 1.67, -2.3, -0.5, 1.23, 1.23, -3.5][::-1]}
live = {'2018': [0.5, 0.7, 0.2, 0.23, 1.3, 1.45, 1.67, -2.3, -0.5, 1.23, 1.23, -3.5],
'2019': [1.5, 2.7, -3.2, -0.23, 4.3, -2.45, -1.67, 2.3, np.nan, np.nan, np.nan, np.nan]}
result = charts.GetMonthlyReturns({}, {})
result = charts.GetMonthlyReturns(backtest, pd.DataFrame())
result = charts.GetMonthlyReturns(backtest, live)
## Test GetAnnualReturnsPlot
time = ['2012', '2013', '2014', '2015', '2016']
strategy = list(np.random.normal(0, 1, 5))
backtest = [time, strategy]
time = ['2017', '2018']
strategy = list(np.random.normal(0.5, 1.5, 2))
live = [time, strategy]
empty = [[], []]
result = charts.GetAnnualReturns()
result = charts.GetAnnualReturns(backtest)
result = charts.GetAnnualReturns(backtest, live)
## Test GetDrawdownPlot
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2012-10-01', periods=365)]
data = list(np.random.uniform(-5, 0, 365))
backtest = [time, data]
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2013-10-01', periods=100)]
data = list(np.random.uniform(-5, 0, 100))
live = [time, data]
worst = [{'Begin': datetime(2012, 10, 1), 'End': datetime(2012, 10, 11)},
{'Begin': datetime(2012, 12, 1), 'End': datetime(2012, 12, 11)},
{'Begin': datetime(2013, 3, 1), 'End': datetime(2013, 3, 11)},
{'Begin': datetime(2013, 4, 1), 'End': datetime(2013, 4, 1)},
{'Begin': datetime(2013, 6, 1), 'End': datetime(2013, 6, 11)}]
empty = [[], []]
result = charts.GetDrawdown(empty, empty, {})
result = charts.GetDrawdown(backtest, empty, worst)
result = charts.GetDrawdown(backtest, live, worst)
## Test GetCrisisPlots (backtest only)
equity = list(np.linspace(1, 25, 365))
benchmark = list(np.linspace(2, 26, 365))
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2012-10-01 00:00:00', periods=365)]
backtest = [time, equity, benchmark]
empty = [[], [], []]
result = charts.GetCrisisEventsPlots(empty, 'empty_crisis')
result = charts.GetCrisisEventsPlots(backtest, 'dummy_crisis')
## Test GetRollingBetaPlot
empty = [[], [], [], []]
twelve = [np.nan for x in range(180)] + list(np.random.uniform(-1, 1, 185))
six = list(np.random.uniform(-1, 1, 365))
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2012-10-01 00:00:00', periods=365)]
backtest = [time, six, twelve]
result = charts.GetRollingBeta([time, six, time, twelve], empty)
result = charts.GetRollingBeta([time, six, [], []], empty)
result = charts.GetRollingBeta(empty, empty)
twelve = [np.nan for x in range(180)] + list(np.random.uniform(-1, 1, 185))
six = list(np.random.uniform(-1, 1, 365))
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2013-10-01 00:00:00', periods=365)]
live = [time, six, time, twelve]
result = charts.GetRollingBeta(live)
## Test GetRollingSharpeRatioPlot
data = list(np.random.uniform(1, 3, 365 * 2))
time = [ | pd.Timestamp(x) | pandas.Timestamp |
from flask import request, url_for
from flask_api import FlaskAPI, status, exceptions
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from surprise import NMF
from surprise import KNNWithMeans
from surprise import accuracy
from surprise.model_selection import KFold
from surprise import SVD, SVDpp
from surprise import Dataset
from surprise.model_selection import cross_validate, train_test_split
from surprise import Reader
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from surprise import KNNBasic
import matplotlib.pyplot as plt
from sklearn.neighbors import NearestNeighbors
import os
import json
from flask import Flask
from flask_cors import CORS
app = FlaskAPI(__name__)
CORS(app)
notes = {
0: 'do the shopping',
1: 'build the codez',
2: 'paint the door',
}
algo = SVD()
def note_repr(key):
return {
'url': request.host_url.rstrip('/') + url_for('notes_detail', key=key),
'text': notes[key]
}
@app.route("/", methods=['GET', 'POST'])
def notes_list():
"""
List or create notes.
"""
if request.method == 'POST':
note = str(request.data.get('text', ''))
idx = max(notes.keys()) + 1
notes[idx] = note
return note_repr(idx), status.HTTP_201_CREATED
# request.method == 'GET'
return [note_repr(idx) for idx in sorted(notes.keys())]
@app.route("/<int:key>/", methods=['GET', 'PUT', 'DELETE'])
def notes_detail(key):
"""
Retrieve, update or delete note instances.
"""
if request.method == 'PUT':
note = str(request.data.get('text', ''))
notes[key] = note
return note_repr(key)
elif request.method == 'DELETE':
notes.pop(key, None)
return '', status.HTTP_204_NO_CONTENT
# request.method == 'GET'
if key not in notes:
raise exceptions.NotFound()
return note_repr(key)
@app.route("/getFutureSubjects/", methods=['GET', 'POST'])
def getFutureSubjects():
data = pd.read_csv('hack.csv')
l_data = request.json
for l in l_data:
learner_id = l['learnerID']
learner_data = data[data["LearnerID"] == learner_id].head(1)
learner_schoolid = learner_data['Schoolid'].values[0]
learner_curr_year = learner_data['CurretYearName'].values[0]
data['year'] = data['MasterYearName'].str.slice(5)
data['year'] = pd.to_numeric(data['year'], errors='coerce').fillna(0).astype(np.int64)
data['current_year'] = data['CurretYearName'].str.slice(5)
data['current_year'] = pd.to_numeric(data['current_year'], errors='coerce').fillna(0).astype(np.int64)
current_yr = data[data["LearnerID"] == learner_id].sort_values(['year'], ascending=[0]).iloc[[0],:]["year"].values[0] + 1
past_data = data[(data['year'] <= current_yr) & (data['year'] != 0) & (data['current_year'] > current_yr) & (data['LearnerID'] != learner_id)]
df = past_data.pivot_table(index = ['LearnerID'], values = 'Points.1', columns = 'MasterSubjectName').fillna(0).reset_index()
learner_data = data[data["LearnerID"] == learner_id]
learner_pivot = learner_data.pivot_table(index = ['LearnerID'], values = 'Points.1', columns = 'MasterSubjectName').fillna(0).reset_index()
for l in l_data:
print('asdasdasdasd')
print(l['subject'])
print(l['marks'])
learner_pivot[l['subject']] = l['marks']
final_pivot = pd.concat([learner_pivot, df], ignore_index=False, sort=True).fillna(0)
final_pivot[final_pivot["LearnerID"] == learner_id]
model_knn = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=5, n_jobs=-1).fit(df)
values, indexes = model_knn.kneighbors(final_pivot[final_pivot["LearnerID"] == learner_id].values.reshape(1,-1))
similar_leaners = dict(zip(indexes[0], values[0]))
final_learner_pivot = final_pivot[final_pivot["LearnerID"] == learner_id]
sorted_leaners = [(k, similar_leaners[k]) for k in sorted(similar_leaners, key=similar_leaners.get, reverse=True)]
index_arr = []
values_arr = []
for l in sorted_leaners:
index_arr.append(l[0])
values_arr.append(l[1])
similar_learners_df = df.loc[index_arr, :]
similar_learners_df['similarity'] = values_arr
final_learner_pivot['similarity'] = 1
similar_learners_without_self = similar_learners_df
similar_learners_df = pd.concat([similar_learners_df,final_learner_pivot], ignore_index=False, sort=False).fillna(0)
learners = data[(data['year'] == (current_yr + 2)) & (data['LearnerID'].isin(similar_learners_without_self['LearnerID'].values))]
#similar_learners_df
subjects = learners.MasterSubjectName.unique()
learners = learners[['LearnerID', 'Points.1', 'SchoolSubjectName']].rename(columns={"Points.1": "Marks"})
arr = []
# arr.append(learners.to_json(orient='records'))
# grid_obj = {
# "grid":str(learners.to_json(orient='records'))
# }
# arr.append(grid_obj)
#return similar_learners_df.to_json(orient='records')
for s in subjects:
id = str(learner_id) + '-' + str(learner_schoolid)
subject = learner_curr_year + '-' + s
obj = {
"learnerID": learner_id,
"subject": subject,
"marks": (algo.predict(id, subject)[3] * 10)
}
arr.append(obj)
return json.dumps(arr)
@app.route("/getSubjects/<int:learner_id>/", methods=['GET', 'PUT', 'DELETE'])
def getSubjects(learner_id):
data = pd.read_csv('hack.csv')
learner_data = data[data["LearnerID"] == learner_id].head(1)
learner_schoolid = learner_data['Schoolid'].values[0]
learner_curr_year = learner_data['CurretYearName'].values[0]
data['year'] = data['MasterYearName'].str.slice(5)
data['year'] = pd.to_numeric(data['year'], errors='coerce').fillna(0).astype(np.int64)
data['current_year'] = data['CurretYearName'].str.slice(5)
data['current_year'] = | pd.to_numeric(data['current_year'], errors='coerce') | pandas.to_numeric |
"""
This script reads all the bootstrap performance result files, plots histograms, and calculates averages.
t-tests are done to compute p-values and confidence intervals are computed
"""
import pandas as pd
import os
import matplotlib.pyplot as plt
import matplotlib
from scipy import stats
matplotlib.rcParams.update({'font.size': 8})
# specify folder locations
rnn_results_folder = "C:/Users/<NAME>/PycharmProjects/Tensorflow/mmps043_results_storm_bootstrap_rnn/"
lstm_results_folder = "C:/Users/<NAME>/PycharmProjects/Tensorflow/mmps043_results_storm_bootstrap_lstm/"
# extract forecast data for RNN
rnn_rmse_t1_list, rnn_rmse_t9_list, rnn_rmse_t18_list = [], [], []
rnn_nse_t1_list, rnn_nse_t9_list, rnn_nse_t18_list = [], [], []
rnn_mae_t1_list, rnn_mae_t9_list, rnn_mae_t18_list = [], [], []
for file in os.listdir(rnn_results_folder):
data = rnn_results_folder + file
if file.endswith("_RMSE.csv"):
# print(file)
rmse_df = pd.read_csv(data)
rmse_t1, rmse_t9, rmse_t18 = rmse_df[["0"]].iloc[0], rmse_df[["0"]].iloc[8], rmse_df[["0"]].iloc[17]
rnn_rmse_t1_list.append(rmse_t1[0])
rnn_rmse_t9_list.append(rmse_t9[0])
rnn_rmse_t18_list.append(rmse_t18[0])
if file.endswith("_NSE.csv"):
nse_df = pd.read_csv(data)
nse_t1, nse_t9, nse_t18 = nse_df[["0"]].iloc[0], nse_df[["0"]].iloc[8], nse_df[["0"]].iloc[17]
rnn_nse_t1_list.append(nse_t1[0])
rnn_nse_t9_list.append(nse_t9[0])
rnn_nse_t18_list.append(nse_t18[0])
if file.endswith("_MAE.csv"):
mae_df = pd.read_csv(data)
mae_t1, mae_t9, mae_t18 = mae_df[["0"]].iloc[0], mae_df[["0"]].iloc[8], mae_df[["0"]].iloc[17]
rnn_mae_t1_list.append(mae_t1[0])
rnn_mae_t9_list.append(mae_t9[0])
rnn_mae_t18_list.append(mae_t18[0])
# write extracted data to data frames
rnn_RMSE_df = | pd.DataFrame([rnn_rmse_t1_list, rnn_rmse_t9_list, rnn_rmse_t18_list]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
NbrOfNodes = 3
key200 = ' TIME: GANDRA STEP: 200.000 FRAME: 1.000'
#--------------------------------------------------------------------------
# File for gain parameter 0.05
#--------------------------------------------------------------------------
file_g01 = open('axisymm_k005.rsn', 'r')
gain01 = file_g01.readlines()
g01 = pd.Series(gain01)
g01 = g01.replace(r'\n','', regex=True)
g01 = g01.replace(r'\r\n','', regex=True)
g01 = g01.replace(r'\r','', regex=True)
index_Time_g01 = g01[g01.str.contains('TIME', case=False, regex=False)]
index_TimeValues_g01 = index_Time_g01.index.values
#--------------------------------------------------------------------------
G01 = {}
for idx in index_Time_g01.index.values:
index_start = idx + 1
index_end = index_start + NbrOfNodes
tmp_df = g01[index_start:index_end].str.strip()
tmp_df = tmp_df.str.split(' ',expand=True)
np.array(tmp_df.values, dtype=float)
G01[g01[idx]]=np.array(tmp_df.values, dtype=float)
Data_g01 = np.array([], dtype=np.int64)
Data_g01.shape = (-1, 7)
for key in sorted(G01.keys()):
Data_g01 = np.append(Data_g01,[G01[key][0,:]], axis=0)
Data_g01.sort(axis=0)
#--------------------------------------------------------------------------
# File for gain parameter 0.07
#--------------------------------------------------------------------------
file_g02 = open('axisymm_k007.rsn', 'r')
gain02 = file_g02.readlines()
g02 = pd.Series(gain02)
g02 = g02.replace(r'\n','', regex=True)
g02 = g02.replace(r'\r\n','', regex=True)
g02 = g02.replace(r'\r','', regex=True)
index_Time_g02 = g02[g02.str.contains('TIME', case=False, regex=False)]
index_TimeValues_g02 = index_Time_g02.index.values
#--------------------------------------------------------------------------
G02 = {}
for idx in index_Time_g02.index.values:
index_start = idx + 1
index_end = index_start + NbrOfNodes
tmp_df = g02[index_start:index_end].str.strip()
tmp_df = tmp_df.str.split(' ',expand=True)
np.array(tmp_df.values, dtype=float)
G02[g02[idx]]=np.array(tmp_df.values, dtype=float)
Data_g02 = np.array([], dtype=np.int64)
Data_g02.shape = (-1, 7)
for key in sorted(G02.keys()):
Data_g02 = np.append(Data_g02,[G02[key][0,:]], axis=0)
Data_g02.sort(axis=0)
#--------------------------------------------------------------------------
# File for gain parameter 0.09
#--------------------------------------------------------------------------
file_g03 = open('axisymm_k009.rsn', 'r')
gain03 = file_g03.readlines()
g03 = pd.Series(gain03)
g03 = g03.replace(r'\n','', regex=True)
g03 = g03.replace(r'\r\n','', regex=True)
g03 = g03.replace(r'\r','', regex=True)
index_Time_g03 = g03[g03.str.contains('TIME', case=False, regex=False)]
index_TimeValues_g03 = index_Time_g03.index.values
#--------------------------------------------------------------------------
G03 = {}
for idx in index_Time_g03.index.values:
index_start = idx + 1
index_end = index_start + NbrOfNodes
tmp_df = g03[index_start:index_end].str.strip()
tmp_df = tmp_df.str.split(' ',expand=True)
np.array(tmp_df.values, dtype=float)
G03[g03[idx]]=np.array(tmp_df.values, dtype=float)
Data_g03 = np.array([], dtype=np.int64)
Data_g03.shape = (-1, 7)
for key in sorted(G03.keys()):
Data_g03 = np.append(Data_g03,[G03[key][0,:]], axis=0)
Data_g03.sort(axis=0)
#--------------------------------------------------------------------------
# File for gain parameter 0.11
#--------------------------------------------------------------------------
file_g04 = open('axisymm_k011.rsn', 'r')
gain04 = file_g04.readlines()
g04 = pd.Series(gain04)
g04 = g04.replace(r'\n','', regex=True)
g04 = g04.replace(r'\r\n','', regex=True)
g04 = g04.replace(r'\r','', regex=True)
index_Time_g04 = g04[g04.str.contains('TIME', case=False, regex=False)]
index_TimeValues_g04 = index_Time_g04.index.values
#--------------------------------------------------------------------------
G04 = {}
for idx in index_Time_g04.index.values:
index_start = idx + 1
index_end = index_start + NbrOfNodes
tmp_df = g04[index_start:index_end].str.strip()
tmp_df = tmp_df.str.split(' ',expand=True)
np.array(tmp_df.values, dtype=float)
G04[g04[idx]]=np.array(tmp_df.values, dtype=float)
Data_g04 = np.array([], dtype=np.int64)
Data_g04.shape = (-1, 7)
for key in sorted(G04.keys()):
Data_g04 = np.append(Data_g04,[G04[key][0,:]], axis=0)
Data_g04.sort(axis=0)
#--------------------------------------------------------------------------
# File for gain parameter 0.13
#--------------------------------------------------------------------------
file_g05 = open('axisymm_k013.rsn', 'r')
gain05 = file_g05.readlines()
g05 = pd.Series(gain05)
g05 = g05.replace(r'\n','', regex=True)
g05 = g05.replace(r'\r\n','', regex=True)
g05 = g05.replace(r'\r','', regex=True)
index_Time_g05 = g05[g05.str.contains('TIME', case=False, regex=False)]
index_TimeValues_g05 = index_Time_g05.index.values
#--------------------------------------------------------------------------
G05 = {}
for idx in index_Time_g05.index.values:
index_start = idx + 1
index_end = index_start + NbrOfNodes
tmp_df = g05[index_start:index_end].str.strip()
tmp_df = tmp_df.str.split(' ',expand=True)
np.array(tmp_df.values, dtype=float)
G05[g05[idx]]=np.array(tmp_df.values, dtype=float)
Data_g05 = np.array([], dtype=np.int64)
Data_g05.shape = (-1, 7)
for key in sorted(G05.keys()):
Data_g05 = np.append(Data_g05,[G05[key][0,:]], axis=0)
Data_g05.sort(axis=0)
#--------------------------------------------------------------------------
# File for gain parameter 0.15
#--------------------------------------------------------------------------
file_g06 = open('axisymm_k015.rsn', 'r')
gain06 = file_g06.readlines()
g06 = pd.Series(gain06)
g06 = g06.replace(r'\n','', regex=True)
g06 = g06.replace(r'\r\n','', regex=True)
g06 = g06.replace(r'\r','', regex=True)
index_Time_g06 = g06[g06.str.contains('TIME', case=False, regex=False)]
index_TimeValues_g06 = index_Time_g06.index.values
#--------------------------------------------------------------------------
G06 = {}
for idx in index_Time_g06.index.values:
index_start = idx + 1
index_end = index_start + NbrOfNodes
tmp_df = g06[index_start:index_end].str.strip()
tmp_df = tmp_df.str.split(' ',expand=True)
np.array(tmp_df.values, dtype=float)
G06[g06[idx]]=np.array(tmp_df.values, dtype=float)
Data_g06 = np.array([], dtype=np.int64)
Data_g06.shape = (-1, 7)
for key in sorted(G06.keys()):
Data_g06 = np.append(Data_g06,[G06[key][0,:]], axis=0)
Data_g06.sort(axis=0)
#--------------------------------------------------------------------------
# File for gain parameter 0.05
#--------------------------------------------------------------------------
file_g07 = open('Braeu2017_005.png.dat', 'r')
gain07 = file_g07.readlines()
g07 = pd.Series(gain07)
g07 = g07.replace(r'\n','', regex=True)
g07 = g07.replace(r'\r\n','', regex=True)
g07 = g07.replace(r'\r','', regex=True)
#--------------------------------------------------------------------------
for line in g07:
tmp_df = g07.str.strip()
tmp_df = tmp_df.str.split(' ',expand=True)
np.array(tmp_df.values, dtype=float)
Data_g07 = np.array(tmp_df.values, dtype=float)
#--------------------------------------------------------------------------
# File for gain parameter 0.07
#--------------------------------------------------------------------------
file_g08 = open('Braeu2017_007.png.dat', 'r')
gain08 = file_g08.readlines()
g08 = pd.Series(gain08)
g08 = g08.replace(r'\n','', regex=True)
g08 = g08.replace(r'\r\n','', regex=True)
g08 = g08.replace(r'\r','', regex=True)
#--------------------------------------------------------------------------
for line in g08:
tmp_df = g08.str.strip()
tmp_df = tmp_df.str.split(' ',expand=True)
np.array(tmp_df.values, dtype=float)
Data_g08 = np.array(tmp_df.values, dtype=float)
#--------------------------------------------------------------------------
# File for gain parameter 0.09
#--------------------------------------------------------------------------
file_g09 = open('Braeu2017_009.png.dat', 'r')
gain09 = file_g09.readlines()
g09 = pd.Series(gain09)
g09 = g09.replace(r'\n','', regex=True)
g09 = g09.replace(r'\r\n','', regex=True)
g09 = g09.replace(r'\r','', regex=True)
#--------------------------------------------------------------------------
for line in g09:
tmp_df = g09.str.strip()
tmp_df = tmp_df.str.split(' ',expand=True)
np.array(tmp_df.values, dtype=float)
Data_g09 = np.array(tmp_df.values, dtype=float)
#--------------------------------------------------------------------------
# File for gain parameter 0.11
#--------------------------------------------------------------------------
file_g10 = open('Braeu2017_011.png.dat', 'r')
gain10 = file_g10.readlines()
g10 = | pd.Series(gain10) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# ## Analyze A/B Test Results
#
#
# ## Table of Contents
# - [Introduction](#intro)
# - [Part I - Probability](#probability)
# - [Part II - A/B Test](#ab_test)
# - [Part III - Regression](#regression)
#
#
# <a id='intro'></a>
# ### Introduction
#
# A/B tests are very commonly performed by data analysts and data scientists. It is important that you get some practice working with the difficulties of these.
#
# For this project, I will be working to understand the results of an A/B test run by an e-commerce website. My goal is to work through this notebook to help the company understand if they should implement the new page, keep the old page, or perhaps run the experiment longer to make their decision.
#
# ### *Step-by-step solution*
#
# <a id='probability'></a>
# #### Part I - Probability
#
# To get started, let's import our libraries.
# In[1]:
import pandas as pd
import numpy as np
import random
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
random.seed(42)
# `1.` Now, read in the `ab_data.csv` data. Store it in `df`.
#
# a. Read in the dataset and take a look at the top few rows here:
# In[2]:
#read csv file and show firsts rows
df=pd.read_csv('ab_data.csv')
df.head()
# b. Use the cell below to find the number of rows in the dataset.
# In[3]:
df.shape[0]
# c. The number of unique users in the dataset.
# In[4]:
df.user_id.nunique()
# d. The proportion of users converted.
# In[5]:
df.converted.value_counts() #checking total number of converted users
# In[6]:
# proportion of users converted
df.converted.sum()/df.shape[0]
# e. The number of times the `new_page` and `treatment` don't match.
# In[7]:
len(df[(df['group']=="treatment")!=(df['landing_page']=="new_page")])
# f. Do any of the rows have missing values?
# In[8]:
df.info()
# **None of the rows have missing values**
# `2.` For the rows where **treatment** does not match with **new_page** or **control** does not match with **old_page**, we cannot be sure if this row truly received the new or old page.
#
# a. Now use the answer to the quiz to create a new dataset that meets the specifications from the quiz. Store your new dataframe in **df2**.
# In[9]:
# Create a new dataframe without the rows in witch treatment does not match with new_page
# or control does not match with old_page
df2 = df[(df['group']=="treatment")==(df['landing_page']=="new_page")]
df2.head()
# In[10]:
# Double Check all of the correct rows were removed - this should be 0
df2[((df2['group'] == 'treatment') == (df2['landing_page'] == 'new_page')) == False].shape[0]
# In[11]:
#Get information about the new dataframe
df2.info()
# `3.` Use **df2** to answer questions:
# a. How many unique **user_id**s are in **df2**?
# In[12]:
df2.user_id.nunique()
# b. There is one **user_id** repeated in **df2**. What is it?
# In[13]:
# Print the duplicated user_id
duplicated_id=df2.loc[df2.user_id.duplicated(), 'user_id']
duplicated_id
# The user_id repeated is: 773192
# c. What is the row information for the repeat **user_id**?
# In[14]:
# Show the duplicated user_id information
df2[df2.duplicated(['user_id'], keep=False)]
# d. Remove **one** of the rows with a duplicate **user_id**, but keep your dataframe as **df2**.
# In[15]:
# Delete duplicated id_user
df2 = df2.drop_duplicates(['user_id'], keep='first')
# In[16]:
df2.user_id.nunique()
# In[17]:
df2.info()
# `4.` Use **df2** to answer the questions:
#
# a. What is the probability of an individual converting regardless of the page they receive?
# In[18]:
df2.converted.mean()
# b. Given that an individual was in the `control` group, what is the probability they converted?
# In[19]:
df2[df2['group']=='control']['converted'].mean()
# c. Given that an individual was in the `treatment` group, what is the probability they converted?
# In[20]:
df2[df2['group']=='treatment']['converted'].mean()
# d. What is the probability that an individual received the new page?
# In[21]:
len(df2[df2['landing_page']=='new_page']) / df2.shape[0]
# e. Consider the results from a. through d. above, and explain below whether you think there is sufficient evidence to say that the new treatment page leads to more conversions.
# By simple observations of our data and using probabilities, we came to obtain these results:
#
# 1. The **conversion probability** for a user who sees the old page (**control group**) is **12.04%**
# 2. The **conversion probability** for a user who sees the new page (**treatment group**) is **11.88%**
# 3. 50% of total users see the new page and 50% the old page, so **the sample is not unbalanced**.
#
# ### Therefore, based solely on the observed data, we conclude that the conversion rate is slightly higher with the old page.
# <a id='ab_test'></a>
# ### Part II - A/B Test
#
# Because of the time stamp associated with each event, I could technically run a hypothesis test continuously as each observation was observed.
#
# However, then the hard question is do you stop as soon as one page is considered significantly better than another or does it need to happen consistently for a certain amount of time? How long do you run to render a decision that neither page is better than another?
#
#
# `1.` For now, I will consider I need to make the decision just based on all the data provided. If I assume that the old page is better unless the new page proves to be definitely better at a Type I error rate of 5%, what should the null and alternative hypotheses be?
# $$H_0: p_{old} - p_{new} \geq 0$$
# $$H_1: p_{old} - p_{new} < 0$$
# `2.` I will now assume under the null hypothesis, $p_{new}$ and $p_{old}$ both have "true" success rates equal to the **converted** success rate regardless of page - that is $p_{new}$ and $p_{old}$ are equal. Furthermore, I also assume they are equal to the **converted** rate in **ab_data.csv** regardless of the page. <br><br>
#
# By using a sample size for each page equal to the ones in **ab_data.csv**. <br><br>
#
# I perform the sampling distribution for the difference in **converted** between the two pages over 10,000 iterations of calculating an estimate from the null. <br><br>
# a. What is the **conversion rate** for $p_{new}$ under the null?
# In[22]:
p_new=df2.converted.mean()
p_new
# b. What is the **conversion rate** for $p_{old}$ under the null? <br><br>
# In[23]:
p_old=df2.converted.mean()
p_old
# In[24]:
p_new-p_old
# c. What is $n_{new}$, the number of individuals in the treatment group?
# In[25]:
n_new=len(df2.query("group == 'treatment'"))
n_new
# d. What is $n_{old}$, the number of individuals in the control group?
# In[26]:
n_old=len(df2.query("group == 'control'"))
n_old
# e. Simulate $n_{new}$ transactions with a conversion rate of $p_{new}$ under the null. Store these $n_{new}$ 1's and 0's in **new_page_converted**.
# In[27]:
# simulation of n_new transactions with a conversion rate of p_new under the null.
new_page_converted = np.random.choice([0, 1], size = n_new, p = [p_new, 1-p_new])
# f. Simulate $n_{old}$ transactions with a conversion rate of $p_{old}$ under the null. Store these $n_{old}$ 1's and 0's in **old_page_converted**.
# In[28]:
# simulation of n_old transactions with a conversion rate of p_old under the null.
old_page_converted = np.random.choice([0, 1], size = n_old, p = [p_old, 1-p_old])
# g. Find $p_{new}$ - $p_{old}$ for your simulated values from part (e) and (f).
# In[29]:
# Compute the difference observed between the simulation of conversion for both new and old pages
obs_diff= new_page_converted.mean() - old_page_converted.mean()
obs_diff
# h. Create 10,000 $p_{new}$ - $p_{old}$ values using the same simulation process you used in parts (a) through (g) above. Store all 10,000 values in a NumPy array called **p_diffs**.
# In[30]:
# Create a sampling distribution of the difference in proportions with bootstrapping
p_diffs = []
for _ in range(10000):
new_page_converted = np.random.choice([1, 0],n_new,replace = True,p = [p_new, 1-p_new])
old_page_converted = np.random.choice([1, 0],n_old,replace = True,p = [p_old, 1-p_old])
p_new2 = new_page_converted.mean()
p_old2 = old_page_converted.mean()
p_diffs.append(p_new2-p_old2)
# i. Plot a histogram of the **p_diffs**. Does this plot look like what you expected? Use the matching problem in the classroom to assure you fully understand what was computed here.
# In[31]:
plt.hist(p_diffs);
plt.ylabel('Number of Simulations')
plt.xlabel('Difference in Probability (p_new - p_old)')
plt.title('10,000 simulations Plot');
# In[32]:
plt.hist(p_diffs);
plt.ylabel('Number of Simulations')
plt.xlabel('Difference in Probability (p_new - p_old)')
plt.title('10,000 simulations Plot');
plt.axvline(x=obs_diff.mean(), color = 'red'); # where our obs_diff mean falls on null dist
# j. What proportion of the **p_diffs** are greater than the actual difference observed in **ab_data.csv**?
# In[33]:
act_obs_diffs = df2[df2['group']=='treatment']['converted'].mean() - df2[df2['group']=='control']['converted'].mean()
print(act_obs_diffs)
(p_diffs > act_obs_diffs).mean()
# k. Explanation of what I just computed in part **j.** answering the questions: What is this value called in scientific studies? What does this value mean in terms of whether or not there is a difference between the new and old pages?
# the value calculated as the proportion of the p_diffs that are greater than the actual difference observed in our data, is the one known as **p-value**.
#
# The formal definition of a p-value is the **probability of observing your statistic (or one more extreme in favor of the alternative) if the null hypothesis is true.**
#
# When the **p-value is large, we have evidence that our statistic was likely to come from the null hypothesis**, but how do we know if our p-value is large enough? By comparing our p-value to our type I error threshold (α), we can make our decision about which hypothesis we will choose as follows:
#
# $$pval \leq \alpha \rightarrow Reject H_0$$
# $$pval \geq \alpha \rightarrow Fail.To.Reject H_0$$
#
# - As we can see our p-value is 0.9072.
# - The alpha data (Type I error rate), has been given in the problem statement: 5% (0.05)
#
# $$ pval = 0.904 > 0.05 = \alpha$$
#
# Therefore, we fail to reject the null hypothesis. We do not have sufficient evidence to suggest the new page converted rate is better than the old page converted rate.
# l. We could also use a built-in to achieve similar results. Though using the built-in might be easier to code, the above portions are a walkthrough of the ideas that are critical to correctly thinking about statistical significance. Below, I calculate the number of conversions for each page, as well as the number of individuals who received each page. Let `n_old` and `n_new` refer the the number of rows associated with the old page and new pages, respectively.
# In[34]:
import statsmodels.api as sm
convert_old = len(df2.query('landing_page=="old_page" and converted==1'))
convert_new = len(df2.query('landing_page=="new_page" and converted==1'))
n_old = len(df2.query('landing_page=="old_page"'))
n_new = len(df2.query('landing_page=="new_page"'))
# m. Use `stats.proportions_ztest` to compute your test statistic and p-value. [Here](https://docs.w3cub.com/statsmodels/generated/statsmodels.stats.proportion.proportions_ztest/) is a helpful link on using the built in.
# In[35]:
# description: sm.stats.proportions_ztest(count, nobs, value=None, alternative='two-sided', prop_var=False) where:
# - count = the number of successes for each independent sample --> convert_old, convert_new
# - nobs = the number of trials or observations, with the same length as count --> n_old, n_new
# - alternative = smaller, means that the alternative hypothesis is prop < value
# Returns: A. zstat(z-score): test statistic for the z-test; B. p-value: p-value for the z-test
# Compute zstat and p_value
zstat, p_value = sm.stats.proportions_ztest([convert_old, convert_new], [n_old, n_new], alternative='smaller')
# Display zstat and p_value
print(zstat, p_value)
# n. What do the z-score and p-value computed in the previous question mean for the conversion rates of the old and new pages? Do they agree with the findings in parts **j.** and **k.**?
# In[36]:
from scipy.stats import norm
#significance our z_score is
norm.cdf(zstat)
# In[37]:
#critical value of 95% confidence
norm.ppf(1-(0.05))
# From our results we obtain:
#
# $$ z_-score = 1.310 > 1.645 = Critical_-value$$
#
# **We fail to reject the null hypothesis**, therefore, as we concluded in the questions j. and k., we do not have sufficient evidence to suggest the new page converted rate is better than the old page converted rate. --> **Same conclusion as in j. and k.**
# <a id='regression'></a>
# ### Part III - A regression approach
#
# `1.` In this final part, you will see that the result you achieved in the A/B test in Part II above can also be achieved by performing regression.<br><br>
#
# a. Since each row is either a conversion or no conversion, what type of regression should you be performing in this case?
# **Logistic Regression, because it predicts a probability between 0 and 1.**
# b. The goal is to use **statsmodels** to fit the regression model you specified in part **a.** to see if there is a significant difference in conversion based on which page a customer receives. However, you first need to create in df2 a column for the intercept, and create a dummy variable column for which page each user received. Add an **intercept** column, as well as an **ab_page** column, which is 1 when an individual receives the **treatment** and 0 if **control**.
# In[38]:
# Add an intercept column
df2['intercept'] = 1
# Create a dummy variable column
df2['ab_page'] = | pd.get_dummies(df2['group']) | pandas.get_dummies |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import dash_table
import pandas as pd
import numpy as np
import plotly.express as px
from viz.app import app
# Data Management Section: import and massage
econ_data = | pd.read_csv('./viz/data/economic/results_summary_bycrop.csv') | pandas.read_csv |
#!/usr/bin/env python
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, ECG Sex Classification"
__credits__ = ["<NAME>"]
__license__ = "GPL"
__version__ = "1.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.impute import SimpleImputer
from src import train as trainer, feature_extraction as fe
def train_features(feature_path):
"""""
Preprocess and train features
Parameters:
----------
feature_path: String path to .csv Feature file
Returns:
---------
None
"""""
print('Start Training...')
features = | pd.read_csv(feature_path, index_col=0) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
# Command line :
# python -m benchmark.S3D2.CALIB-R
import os
import logging
from config import SEED
from config import _ERROR
from config import _TRUTH
import pandas as pd
from utils.log import set_logger
from utils.log import flush
from utils.log import print_line
from utils.evaluation import evaluate_neural_net
from utils.evaluation import evaluate_config
from utils.evaluation import evaluate_regressor
from utils.model import get_model
from utils.model import get_optimizer
from utils.model import train_or_load_neural_net
from visual.misc import plot_params
from problem.synthetic3D import S3D2 as Generator
from problem.synthetic3D import S3D2Config as Config
from problem.synthetic3D import param_generator
from model.regressor import Regressor
# from archi.reducer import A3ML3 as ARCHI
from archi.reducer import A1AR8MR8L1 as ARCHI
# from archi.net import AR9R9 as ARCHI
from ..my_argparser import REG_parse_args
DATA_NAME = 'S3D2'
BENCHMARK_NAME = DATA_NAME+'-calib'
CALIB = "Calib_r"
CALIB_PARAM_NAME = "r"
class TrainGenerator:
def __init__(self, param_generator, data_generator):
self.param_generator = param_generator
self.data_generator = data_generator
def generate(self, n_samples):
n_samples = Config().N_TRAINING_SAMPLES if n_samples is None else n_samples
r, lam, mu = self.param_generator()
X, y, w = self.data_generator.generate(r, lam, mu, n_samples)
return X, r, w, None
def build_model(args, i_cv):
args.net = ARCHI(n_in=3, n_out=2, n_unit=args.n_unit)
args.optimizer = get_optimizer(args)
model = get_model(args, Regressor)
model.base_name = CALIB
model.set_info(DATA_NAME, BENCHMARK_NAME, 0)
return model
def main():
# BASIC SETUP
logger = set_logger()
args = REG_parse_args(main_description="Training launcher for Regressor on S3D2 benchmark")
logger.info(args)
flush(logger)
# Setup model
logger.info("Setup model")
model = build_model(args, 0)
os.makedirs(model.results_directory, exist_ok=True)
# Setup data
logger.info("Setup data")
config = Config()
config_table = evaluate_config(config)
config_table.to_csv(os.path.join(model.results_directory, 'config_table.csv'))
seed = SEED + 99999
train_generator = TrainGenerator(param_generator, Generator(seed))
valid_generator = Generator(seed+1)
test_generator = Generator(seed+2)
i_cv = 0
result_row = {'i_cv': i_cv}
# TRAINING / LOADING
train_or_load_neural_net(model, train_generator, retrain=args.retrain)
# CHECK TRAINING
result_row.update(evaluate_neural_net(model, prefix='valid'))
evaluate_regressor(model, prefix='valid')
print_line()
result_table = [run_iter(model, result_row, i, test_config, valid_generator, test_generator)
for i, test_config in enumerate(config.iter_test_config())]
result_table = | pd.DataFrame(result_table) | pandas.DataFrame |
from time import sleep
from old.src.core import Generator_Shui5
from old.src.Model import DataModel
import multiprocessing
import pandas as pd
url_que = multiprocessing.Queue()
res_list = []
def url_put():
for i in range(1, 602):
url_que.put('https://www.shui5.cn/article/NianDuCaiShuiFaGui/108_' + str(i))
for i in range(1, 962):
url_que.put('https://www.shui5.cn/article/DiFangCaiShuiFaGui/145_' + str(i))
def multi_pages(url_one):
Page_ = Generator_Shui5.PageReaderForCent(url_one)
res_list.extend(Page_.getTopic())
def multi_creeper():
TP = multiprocessing.Pool(8)
while not url_que.empty():
TP.apply_async(multi_pages(url_que.get()))
TP.close()
TP.join()
DataModel.IntoSqlite( | pd.DataFrame(res_list) | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_core.ipynb (unless otherwise specified).
__all__ = ['makeMixedDataFrame', 'getCrashes', 'is_numeric', 'drop_singletons', 'discretize']
# Cell
import pandas as pd
from pandas.api.types import is_numeric_dtype as isnum
#from matplotlib.pyplot import rcParams
# Cell
def makeMixedDataFrame():
'''Return a constant mixed-type dataframe [float, float, str, datetime]'''
return pd.DataFrame(
{'A': {0: 0.0, 1: 1.0, 2: 2.0, 3: 3.0, 4: 4.0},
'B': {0: 0.0, 1: 1.0, 2: 0.0, 3: 1.0, 4: 0.0},
'C': {0: 'foo1', 1: 'foo2', 2: 'foo3', 3: 'foo4', 4: 'foo5'},
'D': {0: pd.Timestamp('2009-01-01 00:00:00'),
1: pd.Timestamp('2009-01-02 00:00:00'),
2: pd.Timestamp('2009-01-05 00:00:00'),
3: pd.Timestamp('2009-01-06 00:00:00'),
4: pd.Timestamp('2009-01-07 00:00:00')}}
)
def getCrashes(dataset='car_crashes'):
try:
import seaborn as sns
return sns.load_dataset(dataset)
except ModuleNotFoundError:
return pd.read_csv(f'https://raw.githubusercontent.com/mwaskom/seaborn-data/master/{dataset}.csv')
# Cell
def is_numeric(col:str):
"""Returns True iff already numeric, or can be coerced.
Usage: df.apply(is_numeric)
Usage: is_numeric(df['colname'])
Returns Boolean series.
From:
https://stackoverflow.com/questions/54426845/how-to-check-if-a-pandas-dataframe-contains-only-numeric-column-wise
"""
return isnum(col) or | pd.to_numeric(col, errors='coerce') | pandas.to_numeric |
import pytz
import pytest
import dateutil
import warnings
import numpy as np
from datetime import timedelta
from itertools import product
import pandas as pd
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas.core.indexes.datetimes import cdate_range
from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp, Timedelta,
date_range, TimedeltaIndex, _np_version_under1p10, Index,
datetime, Float64Index, offsets, bdate_range)
from pandas.tseries.offsets import BMonthEnd, CDay, BDay
from pandas.tests.test_base import Ops
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setup_method(self, method):
super(TestDatetimeIndexOps, self).setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
pytest.raises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
pytest.raises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
assert idx1.is_monotonic
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timestamp('2011-01-01', tz=tz)
assert idx.max() == Timestamp('2011-01-03', tz=tz)
assert idx.argmin() == 0
assert idx.argmax() == 2
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
assert pd.isna(getattr(obj, op)())
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
assert np.min(dr) == Timestamp('2016-01-15 00:00:00', freq='D')
assert np.max(dr) == Timestamp('2016-01-20 00:00:00', freq='D')
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, errmsg, np.min, dr, out=0)
tm.assert_raises_regex(ValueError, errmsg, np.max, dr, out=0)
assert np.argmin(dr) == 0
assert np.argmax(dr) == 5
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(
ValueError, errmsg, np.argmin, dr, out=0)
tm.assert_raises_regex(
ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
assert elt.round(freq='H') == expected_elt
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
rng.round(freq='foo')
with tm.assert_raises_regex(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assert_raises_regex(ValueError, msg, rng.round, freq='M')
tm.assert_raises_regex(ValueError, msg, elt.round, freq='M')
# GH 14440 & 15578
index = pd.DatetimeIndex(['2016-10-17 12:00:00.0015'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.002000'], tz=tz)
tm.assert_index_equal(result, expected)
for freq in ['us', 'ns']:
tm.assert_index_equal(index, index.round(freq))
index = pd.DatetimeIndex(['2016-10-17 12:00:00.00149'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001000'], tz=tz)
tm.assert_index_equal(result, expected)
index = pd.DatetimeIndex(['2016-10-17 12:00:00.001501031'])
result = index.round('10ns')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001501030'])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning():
ts = '2016-10-17 12:00:00.001501031'
pd.DatetimeIndex([ts]).round('1010ns')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assert_raises_regex(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
assert result == expected
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
assert result == expected
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
assert result == expected
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
assert idx.resolution == expected
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with pytest.raises(TypeError):
dti + dti
with pytest.raises(TypeError):
dti_tz + dti_tz
with pytest.raises(TypeError):
dti_tz + dti
with pytest.raises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
assert idx[0] in idx
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]),
check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
tm.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = idx.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
assert result == Timestamp('2011-01-01', tz=idx.tz)
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.append(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.drop_duplicates()
tm.assert_index_equal(idx, result)
assert result.freq is None
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
assert result == Timestamp('2011-01-01', tz=idx.tz)
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
assert result.freq == freq
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
assert pd.DatetimeIndex._na_value is pd.NaT
assert pd.DatetimeIndex([])._na_value is pd.NaT
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
assert not idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
assert idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.asobject)
assert idx.asobject.equals(idx)
assert idx.asobject.equals(idx.asobject)
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.asobject)
assert not idx.asobject.equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
assert not idx.equals(idx3)
assert not idx.equals(idx3.copy())
assert not idx.equals(idx3.asobject)
assert not idx.asobject.equals(idx3)
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
class TestDateTimeIndexToJulianDate(object):
def test_1700(self):
r1 = Float64Index([2345897.5, 2345898.5, 2345899.5, 2345900.5,
2345901.5])
r2 = date_range(start=Timestamp('1710-10-01'), periods=5,
freq='D').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_2000(self):
r1 = Float64Index([2451601.5, 2451602.5, 2451603.5, 2451604.5,
2451605.5])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='D').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_hour(self):
r1 = Float64Index(
[2451601.5, 2451601.5416666666666666, 2451601.5833333333333333,
2451601.625, 2451601.6666666666666666])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='H').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_minute(self):
r1 = Float64Index(
[2451601.5, 2451601.5006944444444444, 2451601.5013888888888888,
2451601.5020833333333333, 2451601.5027777777777777])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='T').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_second(self):
r1 = Float64Index(
[2451601.5, 2451601.500011574074074, 2451601.5000231481481481,
2451601.5000347222222222, 2451601.5000462962962962])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='S').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
# GH 10699
@pytest.mark.parametrize('klass,assert_func', zip([Series, DatetimeIndex],
[tm.assert_series_equal,
tm.assert_index_equal]))
def test_datetime64_with_DateOffset(klass, assert_func):
s = klass(date_range('2000-01-01', '2000-01-31'), name='a')
result = s + pd.DateOffset(years=1)
result2 = pd.DateOffset(years=1) + s
exp = klass(date_range('2001-01-01', '2001-01-31'), name='a')
assert_func(result, exp)
assert_func(result2, exp)
result = s - pd.DateOffset(years=1)
exp = klass(date_range('1999-01-01', '1999-01-31'), name='a')
assert_func(result, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = klass([Timestamp('2000-01-16 00:15:00', tz='US/Central'),
Timestamp('2000-02-16', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'),
Timestamp('2000-02-29', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
# array of offsets - valid for Series only
if klass is Series:
with tm.assert_produces_warning(PerformanceWarning):
s = klass([Timestamp('2000-1-1'), Timestamp('2000-2-1')])
result = s + Series([pd.offsets.DateOffset(years=1),
pd.offsets.MonthEnd()])
exp = klass([Timestamp('2001-1-1'), Timestamp('2000-2-29')
])
assert_func(result, exp)
# same offset
result = s + Series([pd.offsets.DateOffset(years=1),
pd.offsets.DateOffset(years=1)])
exp = klass([Timestamp('2001-1-1'), Timestamp('2001-2-1')])
assert_func(result, exp)
s = klass([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-03-31'),
Timestamp('2000-02-29'),
Timestamp('2000-12-31'),
Timestamp('2000-05-15'),
Timestamp('2001-06-15')])
# DateOffset relativedelta fastpath
relative_kwargs = [('years', 2), ('months', 5), ('days', 3),
('hours', 5), ('minutes', 10), ('seconds', 2),
('microseconds', 5)]
for i, kwd in enumerate(relative_kwargs):
op = pd.DateOffset(**dict([kwd]))
assert_func(klass([x + op for x in s]), s + op)
assert_func(klass([x - op for x in s]), s - op)
op = pd.DateOffset(**dict(relative_kwargs[:i + 1]))
assert_func(klass([x + op for x in s]), s + op)
assert_func(klass([x - op for x in s]), s - op)
# assert these are equal on a piecewise basis
offsets = ['YearBegin', ('YearBegin', {'month': 5}),
'YearEnd', ('YearEnd', {'month': 5}),
'MonthBegin', 'MonthEnd',
'SemiMonthEnd', 'SemiMonthBegin',
'Week', ('Week', {'weekday': 3}),
'BusinessDay', 'BDay', 'QuarterEnd', 'QuarterBegin',
'CustomBusinessDay', 'CDay', 'CBMonthEnd',
'CBMonthBegin', 'BMonthBegin', 'BMonthEnd',
'BusinessHour', 'BYearBegin', 'BYearEnd',
'BQuarterBegin', ('LastWeekOfMonth', {'weekday': 2}),
('FY5253Quarter', {'qtr_with_extra_week': 1,
'startingMonth': 1,
'weekday': 2,
'variation': 'nearest'}),
('FY5253', {'weekday': 0,
'startingMonth': 2,
'variation':
'nearest'}),
('WeekOfMonth', {'weekday': 2,
'week': 2}),
'Easter', ('DateOffset', {'day': 4}),
('DateOffset', {'month': 5})]
with warnings.catch_warnings(record=True):
for normalize in (True, False):
for do in offsets:
if isinstance(do, tuple):
do, kwargs = do
else:
do = do
kwargs = {}
for n in [0, 5]:
if (do in ['WeekOfMonth', 'LastWeekOfMonth',
'FY5253Quarter', 'FY5253'] and n == 0):
continue
op = getattr(pd.offsets, do)(n,
normalize=normalize,
**kwargs)
assert_func(klass([x + op for x in s]), s + op)
assert_func(klass([x - op for x in s]), s - op)
assert_func(klass([op + x for x in s]), op + s)
@pytest.mark.parametrize('years,months', product([-1, 0, 1], [-2, 0, 2]))
def test_shift_months(years, months):
s = DatetimeIndex([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-02-29'),
Timestamp('2000-12-31')])
actual = DatetimeIndex(tslib.shift_months(s.asi8, years * 12 +
months))
expected = DatetimeIndex([x + offsets.DateOffset(
years=years, months=months) for x in s])
tm.assert_index_equal(actual, expected)
class TestBusinessDatetimeIndex(object):
def setup_method(self, method):
self.rng = bdate_range(START, END)
def test_comparison(self):
d = self.rng[10]
comp = self.rng > d
assert comp[11]
assert not comp[9]
def test_pickle_unpickle(self):
unpickled = tm.round_trip_pickle(self.rng)
assert unpickled.offset is not None
def test_copy(self):
cp = self.rng.copy()
repr(cp)
tm.assert_index_equal(cp, self.rng)
def test_repr(self):
# only really care that it works
repr(self.rng)
def test_getitem(self):
smaller = self.rng[:5]
exp = DatetimeIndex(self.rng.view(np.ndarray)[:5])
tm.assert_index_equal(smaller, exp)
assert smaller.offset == self.rng.offset
sliced = self.rng[::5]
assert sliced.offset == BDay() * 5
fancy_indexed = self.rng[[4, 3, 2, 1, 0]]
assert len(fancy_indexed) == 5
assert isinstance(fancy_indexed, DatetimeIndex)
assert fancy_indexed.freq is None
# 32-bit vs. 64-bit platforms
assert self.rng[4] == self.rng[np.int_(4)]
def test_getitem_matplotlib_hackaround(self):
values = self.rng[:, None]
expected = self.rng.values[:, None]
tm.assert_numpy_array_equal(values, expected)
def test_shift(self):
shifted = self.rng.shift(5)
assert shifted[0] == self.rng[5]
assert shifted.offset == self.rng.offset
shifted = self.rng.shift(-5)
assert shifted[5] == self.rng[0]
assert shifted.offset == self.rng.offset
shifted = self.rng.shift(0)
assert shifted[0] == self.rng[0]
assert shifted.offset == self.rng.offset
rng = date_range(START, END, freq=BMonthEnd())
shifted = rng.shift(1, freq=BDay())
assert shifted[0] == rng[0] + BDay()
def test_summary(self):
self.rng.summary()
self.rng[2:2].summary()
def test_summary_pytz(self):
bdate_range('1/1/2005', '1/1/2009', tz=pytz.utc).summary()
def test_summary_dateutil(self):
bdate_range('1/1/2005', '1/1/2009', tz=dateutil.tz.tzutc()).summary()
def test_equals(self):
assert not self.rng.equals(list(self.rng))
def test_identical(self):
t1 = self.rng.copy()
t2 = self.rng.copy()
assert t1.identical(t2)
# name
t1 = t1.rename('foo')
assert t1.equals(t2)
assert not t1.identical(t2)
t2 = t2.rename('foo')
assert t1.identical(t2)
# freq
t2v = Index(t2.values)
assert t1.equals(t2v)
assert not t1.identical(t2v)
class TestCustomDatetimeIndex(object):
def setup_method(self, method):
self.rng = | cdate_range(START, END) | pandas.core.indexes.datetimes.cdate_range |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 3 09:49:54 2020
@author: enzo
"""
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import pandas as pd
import numpy as np
from sklearn.base import ClassifierMixin
class CombClass(ClassifierMixin):
def __init__(self):
return
def _buildmodels(self):
from sklearn.neural_network import MLPClassifier
mlp = MLPClassifier(hidden_layer_sizes=(256,128,64,32),activation="relu",random_state=1)
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=50,max_features='log2')
import xgboost as XGB
xgb = XGB.XGBClassifier()
self.mlp = mlp
self.rf = rf
self.xgb = xgb
def fit(self, X, y):
self._buildmodels()
self.mlp = self.mlp.fit(X,y)
self.rf = self.rf.fit(X,y)
self.xgb = self.xgb.fit(X,y)
self.classes_ = self.xgb.classes_
return self
def predict(self, X):
y_xgb_prob=self.xgb.predict_proba(X)
y_rf_prob = self.rf.predict_proba(X)
y_mlp_prob= self.mlp.predict_proba(X)
# self.proba = pd.DataFrame((y_xgb_prob+y_rf_prob+y_mlp_prob)/3, index = X.index, columns=self.xgb.classes_)
self.proba = (y_xgb_prob+y_rf_prob+y_mlp_prob)/3
self.prediction = self.classes_[self.proba.argmax(axis=1)]
return self.prediction
def predict_proba(self, X):
y_xgb_prob=self.xgb.predict_proba(X)
y_rf_prob = self.rf.predict_proba(X)
y_mlp_prob= self.mlp.predict_proba(X)
# self.proba = pd.DataFrame((y_xgb_prob+y_rf_prob+y_mlp_prob)/3, index = X.index, columns=self.xgb.classes_)
self.proba = (y_xgb_prob+y_rf_prob+y_mlp_prob)/3
return self.proba
def train_model_reg(ml_df, non_feature_vect, target_class='class_target', target_reg = 'value_target',
verbose = 3, save_model = True):
#%%
#non_features=['well_plate_name', 'well_name', 'class_target', 'value_target', 'wp_image_prop','wp_image_version', 'dict_values']
features = list(ml_df.columns.difference(non_feature_vect))
# Separating out the features
X = ml_df[features].copy()
# Separating out the target
y_class= ml_df[target_class]
y_reg = ml_df[target_reg]
# Standardizing the features
# X = pd.DataFrame(MinMaxScaler().fit_transform(X.values), index=X.index, columns=X.columns)
X.loc[:,X.dtypes ==np.float] = MinMaxScaler().fit_transform(X.loc[:,X.dtypes ==np.float].values)
X = pd.get_dummies(X)
targets = ml_df[target_class].unique()
# labels = np.sort(targets)
# split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train_class, y_test_class = train_test_split(X, y_class, test_size=0.20)
combined_model = CombClass().fit(X_train, y_train_class)
# Concateno le probabilità
X_train_prob = pd.DataFrame(combined_model.predict_proba(X_train),index=X_train.index, columns=combined_model.classes_)
X_train = pd.concat([X_train,X_train_prob], axis=1)
X_test_prob = pd.DataFrame(combined_model.predict_proba(X_test),index=X_test.index, columns=combined_model.classes_)
X_test = pd.concat([X_test,X_test_prob], axis=1)
#Concateno le classi predette
# X_train = pd.concat([X_train,pd.Series(combined_model.predict(X_train),
# index = X_train.index, name='class_target')], axis=1)
# X_test = pd.concat([X_test,pd.Series(combined_model.predict(X_test),
# index = X_test.index, name='class_target')], axis=1)
#Concateno le classi vere
# X_train = pd.concat([X_train,pd.Series(y_class[X_train.index],
# index = X_train.index, name='class_target')], axis=1)
# X_test = pd.concat([X_test,pd.Series(y_class[X_test.index],
# index = X_test.index, name='class_target')], axis=1)
X_train = pd.get_dummies(X_train); X_test = pd.get_dummies(X_test)
y_train_reg = y_reg[y_train_class.index]; y_test_reg = y_reg[y_test_class.index]
scaler_y = MinMaxScaler()
y_train_reg = pd.Series(scaler_y.fit_transform(y_train_reg.values.reshape(-1, 1)).ravel(),
index=y_train_reg.index, name=target_reg)
#Predict Random Forest
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(n_estimators=50,max_features='log2', criterion='mae')
rf.fit(X_train, y_train_reg)
y_rf_pred = scaler_y.inverse_transform(rf.predict(X_test).reshape(-1, 1)).ravel()
y_rf_pred = pd.Series(y_rf_pred, index=X_test.index, name='rf_pred')
# Predict XGB
import xgboost as XGB
xgb = XGB.XGBRegressor()
xgb.fit(X_train, y_train_reg)
y_xgb_pred = scaler_y.inverse_transform(xgb.predict(X_test).reshape(-1, 1)).ravel().astype('float64')
y_xgb_pred = pd.Series(y_xgb_pred, index=X_test.index, name='xgb_pred')
# Predict MLP
from sklearn.neural_network import MLPRegressor
mlp = MLPRegressor(hidden_layer_sizes=(256,128,64,32),
max_iter=1000,
activation="relu").fit(X_train, y_train_reg)
y_mlp_pred = scaler_y.inverse_transform(mlp.predict(X_test).reshape(-1, 1)).ravel()
y_mlp_pred = | pd.Series(y_mlp_pred, index=X_test.index, name='mlp_pred') | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 29 19:25:05 2017
@author: <NAME>
Data preprocessing steps
"""
import pandas as pd
import numpy as np
#Data processing
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import MinMaxScaler, MaxAbsScaler, RobustScaler, StandardScaler
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.base import TransformerMixin, BaseEstimator
from itertools import chain
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
# Returns the data as is - used when we don't want some other type
class IdentityTransformer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None, *args, **kwargs):
return self
def transform(self, X, y=None, *args, **kwargs):
return X
#Selects columns of the data
class ItemSelector(BaseEstimator, TransformerMixin):
def __init__(self, key):
self.key = key
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X[self.key]
#Returns None - used for training dummy estimators
class DummyTransformer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
#Return a single column of zeros as the dummy needs no X
return np.zeros(shape=(X.shape[0], 1))
# Scale numeric values and impute missing values
class NumericTransformer(BaseEstimator, TransformerMixin):
def __init__(self, numeric_columns, scaler=None, imputer='mean'):
self.numeric_columns=numeric_columns
self.scaler=scaler
self.imputer=imputer
# Scale then impute data
def fit(self, X, y=None, *args, **kwargs):
steps = [('imputer', Imputer(strategy=self.imputer))]
if self.scaler == 'MinMax':
scl = MinMaxScaler()
elif self.scaler == 'MaxAbs':
scl = MaxAbsScaler()
elif self.scaler == 'Standard':
scl = StandardScaler()
elif self.scaler == 'Robust':
scl = RobustScaler()
if self.scaler is not None:
steps += [('scaler', scl)]
self.pipe = Pipeline(steps)
self.pipe.fit(X.reindex(columns=self.numeric_columns))
return self
def transform(self, X, y=None, *args, **kwargs):
return self.pipe.transform(X.reindex(columns=self.numeric_columns))
# One hot encode categorical data
class CategoricTransformer(BaseEstimator, TransformerMixin):
#Set up the categorical columns
def __init__(self, cat_columns):
self.cat_columns=cat_columns
# Get the column names of the dummy data
# More efficient than just making dummies and reading cols
def fit(self, X, y=None, *args, **kwargs):
X2 = X.reindex(columns=self.cat_columns, copy=True)
#Make the cat columns have the cat datatype;
for col in self.cat_columns:
X2[col] = X2[col].apply(str).astype('category')
self.cat_map_ = {col: X2[col].cat.categories for col in self.cat_columns}
self.dummy_columns_ = {col: ["_".join([str(col), v])
for v in self.cat_map_[col]]
for col in self.cat_columns}
self.transformed_columns_ = pd.Index(
list(chain.from_iterable(self.dummy_columns_[k]
for k in self.cat_columns)))
return self
#Only previously seen values will be encoded
def transform(self, X, y=None, *args, **kwargs):
X2 = X.reindex(columns=self.cat_columns, copy=True)
# Include all categories that may be missing
for col in self.cat_columns:
X2[col] = X2[col].apply(str).astype('category', categories=self.cat_map_[col])
return | pd.get_dummies(X2, sparse=True) | pandas.get_dummies |
"""Tests for the sdv.constraints.tabular module."""
import uuid
from datetime import datetime
from unittest.mock import Mock
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, Unique, UniqueCombinations)
def dummy_transform_table(table_data):
return table_data
def dummy_reverse_transform_table(table_data):
return table_data
def dummy_is_valid_table(table_data):
return [True] * len(table_data)
def dummy_transform_table_column(table_data, column):
return table_data
def dummy_reverse_transform_table_column(table_data, column):
return table_data
def dummy_is_valid_table_column(table_data, column):
return [True] * len(table_data[column])
def dummy_transform_column(column_data):
return column_data
def dummy_reverse_transform_column(column_data):
return column_data
def dummy_is_valid_column(column_data):
return [True] * len(column_data)
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid_table'
# Run
instance = CustomConstraint(
transform=dummy_transform_table,
reverse_transform=dummy_reverse_transform_table,
is_valid=is_valid_fqn
)
# Assert
assert instance._transform == dummy_transform_table
assert instance._reverse_transform == dummy_reverse_transform_table
assert instance._is_valid == dummy_is_valid_table
def test__run_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy transform function with ``table_data`` argument.
Side Effects:
- Run transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` argument.
Side Effects:
- Run reverse transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = reverse_transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` argument.
Side Effects:
- Run is valid function once with ``table_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table)
# Run
instance = CustomConstraint(is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
assert called[0][1] == 'a'
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run reverse transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
assert called[0][1] == 'a'
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` and ``column`` argument.
Side Effects:
- Run is valid function once with ``table_data`` and ``column`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
assert called[0][1] == 'a'
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy transform function with ``column_data`` argument.
Side Effects:
- Run transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy reverse transform function with ``column_data`` argument.
Side Effects:
- Run reverse transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy is valid function with ``column_data`` argument.
Side Effects:
- Run is valid function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
np.testing.assert_array_equal(is_valid, expected_out)
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == tuple(columns)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init__with_one_column(self):
"""Test the ``UniqueCombinations.__init__`` method with only one constraint column.
Expect a ``ValueError`` because UniqueCombinations requires at least two
constraint columns.
Side effects:
- A ValueError is raised
"""
# Setup
columns = ['c']
# Run and assert
with pytest.raises(ValueError):
UniqueCombinations(columns=columns)
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``UniqueCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``UniqueCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test__validate_scalar(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = 'b'
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = 'b'
scalar = 'high'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_list(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = ['b']
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = ['b']
scalar = 'low'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_error(self):
"""Test the ``_validate_scalar`` method.
This method raises an error when the the scalar column is a list.
Input:
- scalar_column = 0
- column_names = 'b'
Side effect:
- Raise error since the scalar is a list
"""
# Setup
scalar_column = [0]
column_names = 'b'
scalar = 'high'
# Run / Assert
with pytest.raises(TypeError):
GreaterThan._validate_scalar(scalar_column, column_names, scalar)
def test__validate_inputs_high_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
Output:
- low == ['a']
- high == 3
- constraint_columns = ('a')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar='high', drop=None)
# Assert
low == ['a']
high == 3
constraint_columns == ('a',)
def test__validate_inputs_low_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 3
- high = 'b'
- scalar = 'low'
- drop = None
Output:
- low == 3
- high == ['b']
- constraint_columns = ('b')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=3, high='b', scalar='low', drop=None)
# Assert
low == 3
high == ['b']
constraint_columns == ('b',)
def test__validate_inputs_scalar_none(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3 # where 3 is a column name
- scalar = None
- drop = None
Output:
- low == ['a']
- high == [3]
- constraint_columns = ('a', 3)
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar=None, drop=None)
# Assert
low == ['a']
high == [3]
constraint_columns == ('a', 3)
def test__validate_inputs_scalar_none_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a']
- high = ['b', 'c']
- scalar = None
- drop = None
Output:
- low == ['a']
- high == ['b', 'c']
- constraint_columns = ('a', 'b', 'c')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=['a'], high=['b', 'c'], scalar=None, drop=None)
# Assert
low == ['a']
high == ['b', 'c']
constraint_columns == ('a', 'b', 'c')
def test__validate_inputs_scalar_none_two_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a', 0]
- high = ['b', 'c']
- scalar = None
- drop = None
Side effect:
- Raise error because both high and low are more than one column
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=['a', 0], high=['b', 'c'], scalar=None, drop=None)
def test__validate_inputs_scalar_unknown(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'unknown'
- drop = None
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high='b', scalar='unknown', drop=None)
def test__validate_inputs_drop_error_low(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 2
- high = 'b'
- scalar = 'low'
- drop = 'low'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=2, high='b', scalar='low', drop='low')
def test__validate_inputs_drop_error_high(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
- drop = 'high'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high=3, scalar='high', drop='high')
def test__validate_inputs_drop_success(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'high'
- drop = 'low'
Output:
- low = ['a']
- high = 0
- constraint_columns == ('a')
"""
# Run / Assert
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=0, scalar='high', drop='low')
assert low == ['a']
assert high == 0
assert constraint_columns == ('a',)
def test___init___(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == ['a']
assert instance._high == ['b']
assert instance._strict is False
assert instance._scalar is None
assert instance._drop is None
assert instance.constraint_columns == ('a', 'b')
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='transform')
# Assert
assert instance.rebuild_columns == ['b']
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init___high_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 'a'
- high = 0
- strict = True
- drop = 'low'
- scalar = 'high'
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._drop = 'low'
- instance._scalar == 'high'
"""
# Run
instance = GreaterThan(low='a', high=0, strict=True, drop='low', scalar='high')
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
assert instance.constraint_columns == ('a',)
def test___init___low_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 0
- high = 'a'
- strict = True
- drop = 'high'
- scalar = 'low'
Side effects:
- instance._low == 0
- instance._high == 'a'
- instance._stric == True
- instance._drop = 'high'
- instance._scalar == 'low'
"""
# Run
instance = GreaterThan(low=0, high='a', strict=True, drop='high', scalar='low')
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
assert instance.constraint_columns == ('a',)
def test___init___strict_is_false(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater_equal``
when ``strict`` is set to ``False``.
Input:
- low = 'a'
- high = 'b'
- strict = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=False)
# Assert
assert instance.operator == np.greater_equal
def test___init___strict_is_true(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater``
when ``strict`` is set to ``True``.
Input:
- low = 'a'
- high = 'b'
- strict = True
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True)
# Assert
assert instance.operator == np.greater
def test__init__get_columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'high'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'low'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
instance._columns_to_reconstruct == ['a']
def test__init__get_columns_to_reconstruct_scalar_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 0
- scalar = 'high'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high=0, scalar='high')
instance._columns_to_reconstruct == ['a']
def test__get_value_column_list(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
"""
# Setup
instance = GreaterThan(low='a', high='b')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = table_data[['a']].values
np.testing.assert_array_equal(out, expected)
def test__get_value_scalar(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
- scalar = 'low'
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = 3
assert out == expected
def test__get_diff_columns_name_low_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b#'], scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b##']
assert out == expected
def test__get_diff_columns_name_high_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b#']
assert out == expected
def test__get_diff_columns_name_scalar_is_none(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b#', scalar=None)
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b##a']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_low(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a#', 'c'], high='b', scalar=None)
table_data = pd.DataFrame({
'a#': [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a##b', 'c#b']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_high(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['b', 'c'], scalar=None)
table_data = pd.DataFrame({
0: [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b#0', 'c#0']
assert out == expected
def test__check_columns_exist_success(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
instance._check_columns_exist(table_data, 'high')
def test__check_columns_exist_error(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='c')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
with pytest.raises(KeyError):
instance._check_columns_exist(table_data, 'high')
def test__fit_only_one_datetime_arg(self):
"""Test the ``Between._fit`` method by passing in only one arg as datetime.
If only one of the high / low args is a datetime type, expect a ValueError.
Input:
- low is an int column
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
instance = GreaterThan(low='a', high=pd.to_datetime('2021-01-01'), scalar='high')
# Run and assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(ValueError):
instance._fit(table_data)
def test__fit__low_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__low_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='c', high=3, scalar='high')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='c', scalar='low')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `low` if ``instance._scalar`` is ``'high'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` if ``instance._scalar`` is ``'low'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__diff_columns_one_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({'a': [1, 2, 3]})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['a#']
def test__fit__diff_columns_multiple_columns(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the two columns in ``instance.constraint_columns`` separated
by a token if there both columns are in that set.
Input:
- Table with two column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['b#a']
def test__fit_int(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'i' for dtype in instance._dtype])
def test__fit_float(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_datetime(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'M' for dtype in instance._dtype])
def test__fit_type__high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``low`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'high'``.
Input:
- Table that contains two constrained columns with the low one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_type__low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'low'``.
Input:
- Table that contains two constrained columns with the high one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test__fit_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], scalar='low')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_high_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is a column name, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=False, scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_low_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is a column name, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low='a', high=2, strict=False, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is multi column, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=2, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is multi column, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=2, high=['a', 'b'], strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If scalar is none, and high is multi column, then
the values in that column should all be higher than
in the low column.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low='b', high=['a', 'c'], strict=False)
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a datetime and low is a column,
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below `high`.
Output:
- True should be returned for the rows where the low
column is below `high`.
"""
# Setup
high_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low='a', high=high_dt, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [datetime(2020, 5, 17), datetime(2020, 2, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a datetime and high is a column,
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below `low`.
Output:
- True should be returned for the rows where the high
column is above `low`.
"""
# Setup
low_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low=low_dt, high='a', strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [datetime(2021, 9, 17), datetime(2021, 7, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_nans(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a NaN row, expect that `is_valid` returns True.
Input:
- Table with a NaN row
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, None, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_one_nan(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a row in which we compare one NaN value with one
non-NaN value, expect that `is_valid` returns True.
Input:
- Table with a row that contains only one NaN value.
Output:
- True should be returned for the row with the NaN value.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, 5, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test__transform_int_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_high(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_low(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_float_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type float.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_datetime_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``_transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
instance._is_datetime = True
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test__transform_high_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'high'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high=5, strict=True, scalar='high')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['a']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(5), np.log(4), np.log(3)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_low_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'low'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low=2, high='b', strict=True, scalar='low')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(3), np.log(4), np.log(5)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(3), np.log(2), np.log(1)],
'b#': [np.log(0), np.log(-1), np.log(-2)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(-1), np.log(0), np.log(1)],
'b#': [np.log(2), np.log(3), np.log(4)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high='b', strict=True)
instance._diff_columns = ['a#', 'c#']
instance.constraint_columns = ['a', 'c']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'c#': [np.log(-2)] * 3,
})
pd.testing.assert_frame_equal(out, expected)
def test_reverse_transform_int_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_float_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype float.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to float values
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as float values
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('float')]
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'b': [4.1, 5.2, 6.3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the high column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the low column replaced by the high one - 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a': [1, 2, 3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- subtract from the high column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the low column replaced by the high one - one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column when the row is invalid
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + one second
for all invalid rows, and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-01T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2]
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_scalar`` is ``'low'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=True, scalar='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 6, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_scalar`` is ``'high'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high=3, strict=True, scalar='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 0],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_scalar`` is ``'high'``.
- ``_low`` is set to multiple columns.
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(5).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3/-4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [0, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'b#': [np.log(5)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 0, 0],
'b': [0, -1, -1],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_scalar`` is ``'low'``.
- ``_high`` is set to multiple columns.
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(5).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value +3/+4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'b#': [np.log(5)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [6, 6, 4],
'b': [7, 7, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_low`` = ['a', 'c'].
- ``_high`` = ['b'].
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(-2).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value +3/-4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high=['b'], strict=True)
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'c#']
instance._columns_to_reconstruct = ['a', 'c']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(1)] * 3,
'c#': [np.log(1)] * 3,
})
out = instance.reverse_transform(transformed)
print(out)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_multi_column_positive(self):
"""Test the ``GreaterThan.reverse_transform`` method for positive constraint.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Input:
- Table with given data.
Output:
- Same table with with replaced rows and dropped columns.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], strict=True, scalar='low')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, -1],
'c': [7, 8, 9],
'a#': [np.log(2), np.log(3), np.log(4)],
'b#': [np.log(5), np.log(6), np.log(0)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 0],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_multi_column_negative(self):
"""Test the ``GreaterThan.reverse_transform`` method for negative constraint.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Input:
- Table with given data.
Output:
- Same table with with replaced rows and dropped columns.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, strict=True, scalar='high')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [-1, -2, 1],
'b': [-4, -5, -1],
'c': [7, 8, 9],
'a#': [np.log(2), np.log(3), np.log(0)],
'b#': [np.log(5), np.log(6), np.log(2)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [-1, -2, 0],
'b': [-4, -5, -1],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
class TestPositive():
def test__init__(self):
"""
Test the ``Positive.__init__`` method.
The method is expected to set the ``_low`` instance variable
to 0, the ``_scalar`` variable to ``'low'``. The rest of the
parameters should be passed. Check that ``_drop`` is set to
``None`` when ``drop`` is ``False``.
Input:
- strict = True
- low = 'a'
- drop = False
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar == 'low'
- instance._drop = None
"""
# Run
instance = Positive(columns='a', strict=True, drop=False)
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop is None
def test__init__drop_true(self):
"""
Test the ``Positive.__init__`` method with drop is ``True``.
Check that ``_drop`` is set to 'high' when ``drop`` is ``True``.
Input:
- strict = True
- low = 'a'
- drop = True
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar == 'low'
- instance._drop = 'high'
"""
# Run
instance = Positive(columns='a', strict=True, drop=True)
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
class TestNegative():
def test__init__(self):
"""
Test the ``Negative.__init__`` method.
The method is expected to set the ``_high`` instance variable
to 0, the ``_scalar`` variable to ``'high'``. The rest of the
parameters should be passed. Check that ``_drop`` is set to
``None`` when ``drop`` is ``False``.
Input:
- strict = True
- low = 'a'
- drop = False
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar = 'high'
- instance._drop = None
"""
# Run
instance = Negative(columns='a', strict=True, drop=False)
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop is None
def test__init__drop_true(self):
"""
Test the ``Negative.__init__`` method with drop is ``True``.
Check that ``_drop`` is set to 'low' when ``drop`` is ``True``.
Input:
- strict = True
- low = 'a'
- drop = True
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar = 'high'
- instance._drop = 'low'
"""
# Run
instance = Negative(columns='a', strict=True, drop=True)
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
def new_column(data):
"""Formula to be used for the ``TestColumnFormula`` class."""
if data['a'] is None or data['b'] is None:
return None
return data['a'] + data['b']
class TestColumnFormula():
def test___init__(self):
"""Test the ``ColumnFormula.__init__`` method.
It is expected to create a new Constraint instance,
import the formula to use for the computation, and
set the specified constraint column.
Input:
- column = 'col'
- formula = new_column
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column)
# Assert
assert instance._column == column
assert instance._formula == new_column
assert instance.constraint_columns == ('col', )
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``ColumnFormula.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == (column,)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``ColumnFormula.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column,
handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test_is_valid_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a valid data.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a non-valid data.
If the data does not fulfill the formula, result is a series of ``False`` values.
Input:
- Table data not fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 2, 3]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_with_nans(self):
"""Test the ``ColumnFormula.is_valid`` method for with a formula that produces nans.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, None],
'c': [5, 7, None]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test__transform(self):
"""Test the ``ColumnFormula._transform`` method.
It is expected to drop the indicated column from the table.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data without the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_without_dropping_column(self):
"""Test the ``ColumnFormula._transform`` method without dropping the column.
If `drop_column` is false, expect to not drop the constraint column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column, drop_column=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_missing_column(self):
"""Test the ``ColumnFormula._transform`` method when the constraint column is missing.
When ``_transform`` is called with data that does not contain the constraint column,
expect to return the data as-is.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data, unchanged (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'd': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'd': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform(self):
"""Test the ``ColumnFormula.reverse_transform`` method.
It is expected to compute the indicated column by applying the given formula.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 1, 1]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestRounding():
def test___init__(self):
"""Test the ``Rounding.__init__`` method.
It is expected to create a new Constraint instance
and set the rounding args.
Input:
- columns = ['b', 'c']
- digits = 2
"""
# Setup
columns = ['b', 'c']
digits = 2
# Run
instance = Rounding(columns=columns, digits=digits)
# Assert
assert instance._columns == columns
assert instance._digits == digits
def test___init__invalid_digits(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``digits`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 20
"""
# Setup
columns = ['b', 'c']
digits = 20
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits)
def test___init__invalid_tolerance(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``tolerance`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 2
- tolerance = 0.1
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 0.1
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits, tolerance=tolerance)
def test_is_valid_positive_digits(self):
"""Test the ``Rounding.is_valid`` method for a positive digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 1e-3
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12, 5.51, None, 6.941, 1.129],
'c': [5.315, 7.12, 1.12, 9.131, 12.329],
'd': ['a', 'b', 'd', 'e', None],
'e': [123.31598, -1.12001, 1.12453, 8.12129, 1.32923]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, True, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_negative_digits(self):
"""Test the ``Rounding.is_valid`` method for a negative digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b']
digits = -2
tolerance = 1
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [401, 500, 6921, 799, None],
'c': [5.3134, 7.1212, 9.1209, 101.1234, None],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_zero_digits(self):
"""Test the ``Rounding.is_valid`` method for a zero digits argument.
Input:
- Table data not with the desired decimal places (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 0
tolerance = 1e-4
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, None, 3, 4],
'b': [4, 5.5, 1.2, 6.0001, 5.99999],
'c': [5, 7.12, 1.31, 9.00001, 4.9999],
'd': ['a', 'b', None, 'd', 'e'],
'e': [2.1254, 17.12123, 124.12, 123.0112, -9.129434]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_reverse_transform_positive_digits(self):
"""Test the ``Rounding.reverse_transform`` method with positive digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.12345, None, 5.100, 6.0001, 1.7999],
'c': [1.1, 1.234, 9.13459, 4.3248, 6.1312],
'd': ['a', 'b', 'd', 'e', None]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.123, None, 5.100, 6.000, 1.800],
'c': [1.100, 1.234, 9.135, 4.325, 6.131],
'd': ['a', 'b', 'd', 'e', None]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_negative_digits(self):
"""Test the ``Rounding.reverse_transform`` method with negative digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b']
digits = -3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41234.5, None, 5000, 6001, 5928],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41000.0, None, 5000.0, 6000.0, 6000.0],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_zero_digits(self):
"""Test the ``Rounding.reverse_transform`` method with zero digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 0
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12345, None, 5.0, 6.01, 7.9],
'c': [1.1, 1.0, 9.13459, None, 8.89],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.0, None, 5.0, 6.0, 8.0],
'c': [1.0, 1.0, 9.0, None, 9.0],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def transform(data, low, high):
"""Transform to be used for the TestBetween class."""
data = (data - low) / (high - low) * 0.95 + 0.025
return np.log(data / (1.0 - data))
class TestBetween():
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``Between.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
column = 'col'
# Run
instance = Between(column=column, low=10, high=20, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == (column,)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``Between.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
column = 'col'
# Run
instance = Between(column=column, low=10, high=20, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test_fit_only_one_datetime_arg(self):
"""Test the ``Between.fit`` method by passing in only one arg as datetime.
If only one of the bound parameters is a datetime type, expect a ValueError.
Input:
- low is an int scalar
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
column = 'a'
low = 0.0
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high)
# Run and assert
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [4, 5, 6],
})
with pytest.raises(ValueError):
instance.fit(table_data)
def test_transform_scalar_scalar(self):
"""Test the ``Between.transform`` method by passing ``low`` and ``high`` as scalars.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [4, 5, 6],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_scalar_column(self):
"""Test the ``Between._transform`` method with ``low`` as scalar and ``high`` as a column.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0.5, 1, 6],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_scalar(self):
"""Test the ``Between._transform`` method with ``low`` as a column and ``high`` as scalar.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_column(self):
"""Test the ``Between._transform`` method by passing ``low`` and ``high`` as columns.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
'c': [0.5, 1, 6]
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_datetime_datetime(self):
"""Test the ``Between._transform`` method by passing ``low`` and ``high`` as datetimes.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
- High and Low as datetimes
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [4, 5, 6],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'a#1900-01-01T00:00:00.000000000#2021-01-01T00:00:00.000000000': transform(
table_data[column], low, high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_datetime_column(self):
"""Test the ``Between._transform`` method with ``low`` as datetime and ``high`` as a column.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#1900-01-01T00:00:00.000000000#b': transform(
table_data[column], low, table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_datetime(self):
"""Test the ``Between._transform`` method with ``low`` as a column and ``high`` as datetime.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'a#b#2021-01-01T00:00:00.000000000': transform(
table_data[column], table_data[low], high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_column_datetime(self):
"""Test the ``Between._transform`` method with ``low`` and ``high`` as datetime columns.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
]
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as scalars.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
table_data = pd.DataFrame({
'b': [4, 5, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_column(self):
"""Test ``Between.reverse_transform`` with ``low`` as scalar and ``high`` as a column.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
table_data = pd.DataFrame({
'b': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` as a column and ``high`` as scalar.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_column(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as columns.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_datetime_datetime(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as datetime.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
- High and low as datetimes
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
table_data = pd.DataFrame({
'b': [4, 5, 6],
'a': [
| pd.to_datetime('2020-09-03') | pandas.to_datetime |
# Copyright (c) ZenML GmbH 2021. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import pandas as pd
from sklearn import datasets
from sklearn.model_selection import train_test_split
from whylogs import DatasetProfile # type: ignore
from zenml.integrations.constants import SKLEARN, WHYLOGS
from zenml.integrations.whylogs.steps import whylogs_profiler_step
from zenml.integrations.whylogs.visualizers import WhylogsVisualizer
from zenml.integrations.whylogs.whylogs_step_decorator import enable_whylogs
from zenml.logger import get_logger
from zenml.pipelines import pipeline
from zenml.repository import Repository
from zenml.steps import Output, step
from zenml.steps.step_context import StepContext
logger = get_logger(__name__)
# Simply set these environment variables to point to a Whylabs account and all
# whylogs DatasetProfile artifacts that are produced by a pipeline run will
# also be uploaded to Whylabs:
#
# import os
# os.environ["WHYLABS_API_KEY"] = "YOUR-API-KEY"
# os.environ["WHYLABS_DEFAULT_ORG_ID"] = "YOUR-ORG-ID"
# An existing zenml step can be easily extended to include whylogs profiling
# features by adding the @enable_whylogs decorator. The only prerequisite is
# that the step already include a step context parameter.
#
# Applying the @enable_whylogs decorator gives the user access to a `whylogs`
# step sub-context field which intermediates and facilitates the creation of
# whylogs DatasetProfile artifacts.
#
# The whylogs sub-context transparently incorporates ZenML specific
# information, such as project, pipeline name and specialized tags, into all
# dataset profiles that are generated with it. It also simplifies the whylogs
# profile generation process by abstracting away some of the whylogs specific
# details, such as whylogs session and logger initialization and management.
#
# NOTE: remember that cache needs to be explicitly enabled for steps that take
# in step contexts
@enable_whylogs
@step(enable_cache=True)
def data_loader(
context: StepContext,
) -> Output(data=pd.DataFrame, profile=DatasetProfile,):
"""Load the diabetes dataset."""
X, y = datasets.load_diabetes(return_X_y=True, as_frame=True)
# merge X and y together
df = | pd.merge(X, y, left_index=True, right_index=True) | pandas.merge |
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'),
'estimate1': 2.,
'estimate2': 4.,
FISCAL_QUARTER_FIELD_NAME: 2.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
dummy_df = pd.DataFrame({SID_FIELD_NAME: 0},
columns=[SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
'estimate'],
index=[0])
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1,
bad_dataset2,
good_dataset)
for c in dataset.columns}
p = Pipeline(columns)
with self.assertRaises(ValueError) as e:
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2")
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with self.assertRaises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = ["split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof"]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(itertools.product(
(NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader),
))
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
with self.assertRaises(ValueError):
loader(dummy_df,
{column.name: val for column, val in
columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"))
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp('2015-01-28')
q1_knowledge_dates = [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-04'),
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-11')]
q2_knowledge_dates = [pd.Timestamp('2015-01-14'),
pd.Timestamp('2015-01-17'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-23')]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')] # One day late
q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early
pd.Timestamp('2015-01-26')]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates +
cls.q2_knowledge_dates,
4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (q1e1 < q1e2 and
q2e1 < q2e2 and
# All estimates are < Q2's event, so just constrain Q1
# estimates.
q1e1 < cls.q1_release_dates[0] and
q1e2 < cls.q1_release_dates[0]):
sid_estimates.append(cls.create_estimates_df(q1e1,
q1e2,
q2e1,
q2e2,
sid))
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates +
sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
'estimate': [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid
})
@classmethod
def create_estimates_df(cls,
q1e1,
q1e2,
q2e1,
q2e2,
sid):
return pd.DataFrame({
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
'estimate': [.1, .2, .3, .4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
})
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert_true(sid_estimates.isnull().all().all())
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[self.get_expected_estimate(
q1_knowledge[q1_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
q2_knowledge[q2_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
date.tz_localize(None),
).set_index([[date]]) for date in sid_estimates.index],
axis=0)
assert_equal(all_expected[sid_estimates.columns],
sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateLoaderTestCase(NextEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q2_knowledge.iloc[-1:]
elif (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateLoaderTestCase(PreviousEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate': [1., 2.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(columns=[cls.columns[col] + '1'
for col in cls.columns] +
[cls.columns[col] + '2'
for col in cls.columns],
index=cls.trading_days)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ('1', '2')
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(
expected[expected_name]
)
else:
expected[expected_name] = expected[
expected_name
].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge([{c.name + '1': c.latest for c in dataset1.columns},
{c.name + '2': c.latest for c in dataset2.columns}])
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + '1' for col in self.columns]
q2_columns = [col.name + '2' for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(sorted(np.array(q1_columns + q2_columns)),
sorted(results.columns.values))
assert_equal(self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1))
class NextEstimateMultipleQuarters(
WithEstimateMultipleQuarters, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'),
raw_name + '1'
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'),
raw_name + '1'
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ['estimate', 'event_date']:
expected.loc[
pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'),
col_name + '2'
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp('2015-01-01'): | pd.Timestamp('2015-01-09') | pandas.Timestamp |
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
__author__ = ["<NAME>"]
__all__ = [
"TEST_YS",
"TEST_SPS",
"TEST_ALPHAS",
"TEST_FHS",
"TEST_STEP_LENGTHS_INT",
"TEST_STEP_LENGTHS",
"TEST_INS_FHS",
"TEST_OOS_FHS",
"TEST_WINDOW_LENGTHS_INT",
"TEST_WINDOW_LENGTHS",
"TEST_INITIAL_WINDOW_INT",
"TEST_INITIAL_WINDOW",
"VALID_INDEX_FH_COMBINATIONS",
"INDEX_TYPE_LOOKUP",
"TEST_RANDOM_SEEDS",
"TEST_N_ITERS",
]
import numpy as np
import pandas as pd
from sktime.utils._testing.series import _make_series
# We here define the parameter values for unit testing.
TEST_CUTOFFS_INT = [np.array([21, 22]), np.array([3, 7, 10])]
# The following timestamps correspond
# to the above integers for `_make_series(all_positive=True)`
TEST_CUTOFFS_TIMESTAMP = [
pd.to_datetime(["2000-01-22", "2000-01-23"]),
pd.to_datetime(["2000-01-04", "2000-01-08", "2000-01-11"]),
]
TEST_CUTOFFS = [*TEST_CUTOFFS_INT, *TEST_CUTOFFS_TIMESTAMP]
TEST_WINDOW_LENGTHS_INT = [1, 5]
TEST_WINDOW_LENGTHS_TIMEDELTA = [pd.Timedelta(1, unit="D"), pd.Timedelta(5, unit="D")]
TEST_WINDOW_LENGTHS_DATEOFFSET = [ | pd.offsets.Day(1) | pandas.offsets.Day |
from BoostInference_no_parallelization import Booster
import sys, pandas as pd, numpy as np
import glob, pickle
from sklearn.metrics import roc_auc_score, precision_recall_curve, auc
if len(sys.argv)<5:
print('python file.py df-val-PhyloPGM-input df-test-PhyloPGM-output info_tree fname_df_pgm_output')
exit(0)
fname_dtrain = sys.argv[1]
fname_dtest = sys.argv[2]
info_tree = sys.argv[3]
fname_df_pgm_output = sys.argv[4]
# given_pseudo_count = float(sys.argv[3])
print('fname_dtrain:', fname_dtrain,
'fname_dtest:', fname_dtest,
# 'given_pseudo_count:', given_pseudo_count
)
dtrain = pd.read_csv(fname_dtrain, index_col=0)
list_species = list(dtrain.columns[:-1])
print('list_species:', len(list_species))
dtrain.columns = list_species + ['label']
# print(dtrain.head()); exit(0)
dtest = pd.read_csv(fname_dtest, index_col=0)
dtest.columns = list_species + ['label']
print('dtrain:', dtrain.shape)
print('dtest:', dtest.shape)
dtest = dtest[dtest.hg38.notna()]
print('dtest not hg38:', dtest.shape)
# exit(0)
given_tree = pickle.load(open(info_tree, 'rb'))
tree = {}
# for k, v in given_tree['tree'].items():
for k, v in given_tree.items():
# tree[k] = v[0]
tree[k] = v
print('tree:', len(tree))
# exit(0)
alpha = 0.1
num_pos_train = dtrain[dtrain.label==1].shape[0]
num_neg_train = dtrain[dtrain.label==0].shape[0]
num_pos_test = dtest[dtest.label==1].shape[0]
num_neg_test = dtest[dtest.label==0].shape[0]
print('num_pos_train:', num_pos_train,
'num_neg_train:', num_neg_train,
'num_pos_test:', num_pos_test,
'num_neg_test:', num_neg_test
)
base_auc = roc_auc_score(dtest.label, dtest.hg38)
def scikitlearn_calc_auPRC(y_true, y_score):
precision, recall, _ = precision_recall_curve(y_true, y_score)
return auc(recall, precision)
base_aupr = scikitlearn_calc_auPRC(dtest.label, dtest.hg38)
print('base_auc:', base_auc,
'base_aupr:', base_aupr
)
# exit(0)
def get_batch(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
BATCH_SIZE = 100000
tmp_mod_df_test_final = | pd.DataFrame() | pandas.DataFrame |
import pytest
import pandas as pd
from pandas import Timestamp
from datetime import date
from pyramm.ops.top_surface import build_top_surface, append_surface_details_to_segments
@pytest.fixture
def original():
return pd.DataFrame.from_records(
[
{
"road_id": 100,
"start_m": 20,
"end_m": 200,
"surface_date": date(2020, 1, 1),
"name": "A",
},
{
"road_id": 100,
"start_m": 20,
"end_m": 60,
"surface_date": date(2020, 1, 1),
"name": "A",
},
{
"road_id": 101,
"start_m": 230,
"end_m": 500,
"surface_date": date(2021, 1, 1),
"name": "B",
},
{
"road_id": 101,
"start_m": 230,
"end_m": 260,
"surface_date": date(2021, 1, 1),
"name": "C",
},
{
"road_id": 102,
"start_m": 0,
"end_m": 9999,
"surface_date": date(2018, 6, 6),
"name": "D",
},
]
).set_index(["road_id", "start_m", "end_m"])
class TestTopSurface:
class TestBuildTopSurface:
def test_build_top_surface(self, original):
index_names = ["road_id", "start_m", "end_m"]
custom = pd.DataFrame.from_records(
[
{
"road_id": 100,
"start_m": 0,
"end_m": 30,
"surface_date": date(2020, 1, 2),
"name": "1",
},
{
"road_id": 100,
"start_m": 20,
"end_m": 30,
"surface_date": date(2019, 12, 1),
"name": "2",
},
{
"road_id": 100,
"start_m": 60,
"end_m": 100,
"surface_date": date(2020, 1, 5),
"name": "3",
},
{
"road_id": 100,
"start_m": 190,
"end_m": 220,
"surface_date": date(2020, 1, 10),
"name": "4",
},
{
"road_id": 100,
"start_m": 70,
"end_m": 90,
"surface_date": date(2020, 1, 5),
"name": "5",
},
]
)
new = build_top_surface([original, custom])
expected = (
pd.DataFrame.from_records(
[
{
"road_id": 100,
"start_m": 0,
"end_m": 30,
"surface_date": Timestamp(2020, 1, 2),
"name": "1",
},
{
"road_id": 100,
"start_m": 30,
"end_m": 60,
"surface_date": Timestamp(2020, 1, 1),
"name": "A",
},
{
"road_id": 100,
"start_m": 60,
"end_m": 70,
"surface_date": Timestamp(2020, 1, 5),
"name": "3",
},
{
"road_id": 100,
"start_m": 70,
"end_m": 90,
"surface_date": Timestamp(2020, 1, 5),
"name": "5",
},
{
"road_id": 100,
"start_m": 90,
"end_m": 100,
"surface_date": Timestamp(2020, 1, 5),
"name": "3",
},
{
"road_id": 100,
"start_m": 100,
"end_m": 190,
"surface_date": | Timestamp(2020, 1, 1) | pandas.Timestamp |
# coding: utf-8
# In[213]:
import numpy as np
import pandas as pd
import random
import csv
from sklearn.utils import shuffle
# In[214]:
same = pd.read_csv(r'C:\Users\<NAME>\Desktop\ASSIGNMENTS\ML\HumanObserved-Dataset\HumanObserved-Dataset\HumanObserved-Features-Data\same_pairs.csv',usecols=['img_id_A','img_id_B','target'])
# In[215]:
#same.head()
# In[216]:
diff = | pd.read_csv(r'C:\Users\<NAME>\Desktop\ASSIGNMENTS\ML\HumanObserved-Dataset\HumanObserved-Dataset\HumanObserved-Features-Data\diffn_pairs.csv') | pandas.read_csv |
import pandas as pd
def merge_cellphone_genes(cluster_counts: pd.DataFrame, genes_expanded: pd.DataFrame) -> pd.DataFrame:
"""
Merges cluster genes with CellPhoneDB values
"""
multidata_counts = pd.merge(cluster_counts, genes_expanded, left_index=True, right_on='ensembl')
return multidata_counts
def get_counts_proteins_of_complexes(cluster_counts: pd.DataFrame, multidatas: pd.DataFrame,
complex_composition):
cluster_clean = cluster_counts.drop(complex_composition.columns.values, errors='ignore', axis=1)
complex_components_data = pd.merge(multidatas, complex_composition, left_on='id_multidata',
right_on='complex_multidata_id')
complex_components_data = pd.merge(complex_components_data, cluster_clean, left_on='protein_multidata_id',
right_on='id_multidata', suffixes=['_complex', ''])
complex_components_data.rename(columns={'name_complex': 'complex_name'}, index=str, inplace=True)
if complex_components_data.empty:
return | pd.DataFrame() | pandas.DataFrame |
import logging
import re
import pandas as pd
from unidecode import unidecode
from comvest.utilities.io import files, read_from_db, write_result, read_result
from comvest.utilities.logging import progresslog, resultlog
pd.options.mode.chained_assignment = None # default='warn'
def validacao_curso(df, col, date):
cursos = df_cursos.loc[df_cursos['ano_vest'] == date]['cod_curso'].tolist()
# Codigos que nao constam na lista de cursos serao remapeados para missing
df[col].fillna(-1, inplace=True)
df[col] = df[col].map(lambda cod: int(cod) if int(cod) in cursos else '')
df[col] = pd.to_numeric(df[col], errors='coerce').astype('Int64')
return df
# Função para concatenar dia, mês e ano
def data_nasc(row, df):
if ('DATA_NASC' in df.columns) or ('DAT_NASC' in df.columns) or ('DTNASC' in df.columns):
if 'DATA_NASC' in df.columns:
data = row['DATA_NASC']
elif 'DAT_NASC' in df.columns:
data = row['DAT_NASC']
else:
data = row['DTNASC']
data = str(data).split('.')[0]
if data == 'nan': return ('')
if len(data) <= 6:
data = data[:-2] + '19' + data[-2:]
ano = data[-4:]
mes = data[-6:-4]
dia = data.replace(data[-6:], '')
if len(data) < 8:
dia = '0' + dia
res = dia + mes + ano
elif all(x in df.columns for x in ('DIA','MES','ANO')):
dia = str(row['DIA']).zfill(2)
mes = str(row['MES']).zfill(2)
ano = str(row['ANO'])
if len(ano) < 4:
ano = '19' + ano
res = "{0}{1}{2}".format(dia, mes, ano)
else:
# Documento sem coluna(s) com data de nascimento
res = ''
return res
def tratar_inscricao(df):
# Checa Número de Inscrição de acordo com as diferentes variações no nome da coluna e retira o '\.0' da string
if 'INSC' in df.columns:
df['INSC'] = df['INSC'].astype("string").replace('\.0', '', regex=True)
elif 'INSC_CAND' in df.columns:
df['INSC'] = df['INSC_CAND'].astype("string").replace('\.0', '', regex=True)
elif 'INSC_cand' in df.columns:
df['INSC'] = df['INSC_cand'].astype("string").replace('\.0', '', regex=True)
elif 'INSCRICAO' in df.columns:
df['INSC'] = df['INSCRICAO'].astype("string").replace('\.0', '', regex=True)
df['INSC'] = pd.to_numeric(df['INSC'], errors='coerce', downcast='integer').astype('Int64')
return df
def tratar_CPF(df):
# Checa se existe a coluna de CPF
if 'CPF' in df.columns:
df['CPF'] = df['CPF'].map(lambda cpf: str(cpf).zfill(11))
else:
df.insert(loc=1, column='CPF', value='-')
return df
def tratar_doc(df):
if any(col in df.columns for col in {'RG','DOC3'}):
df.rename({'RG':'DOC','DOC3':'DOC'}, axis=1, inplace=True)
df['DOC'] = df['DOC'].str.replace(' ','')
return df
def tratar_nome(df):
# Se o nome é dado por NOME_CAND ou NOMEOFIC, entao renomeia a coluna para NOME
if 'NOME_CAND' in df.columns:
df.rename({'NOME_CAND': 'NOME'}, axis=1, inplace=True)
elif 'NOMEOFIC' in df.columns:
df.rename({'NOMEOFIC': 'NOME'}, axis=1, inplace=True)
elif 'NOME_cand' in df.columns:
df.rename({'NOME_cand': 'NOME'}, axis=1, inplace=True)
return df
def tratar_nome_pai(df):
if 'PAI' in df.columns:
df.rename({'PAI': 'NOME_PAI'}, axis=1, inplace=True)
return df
def tratar_nome_mae(df):
if 'MAE' in df.columns:
df.rename({'MAE': 'NOME_MAE'}, axis=1, inplace=True)
return df
def tratar_nacionalidade(df):
for col in df.columns:
if col in {'NACIO','NACION','NACIONALID','NACIONALIDADE'}:
df.rename({col: 'NACIONALIDADE'}, axis=1, inplace=True)
df['NACIONALIDADE'] = pd.to_numeric(df['NACIONALIDADE'], errors='coerce', downcast='integer').astype('Int64')
df['NACIONALIDADE'].replace(0, pd.NA, inplace=True)
return df
return df
def tratar_mun_nasc(df):
for col in df.columns:
if col in {'MUNICIPIO_NASC','MU_NASC','MUNIC_NASC','CIDNASC','CIDNAS'}:
df.rename({col: 'MUN_NASC'}, axis=1, inplace=True)
df['MUN_NASC'] = df['MUN_NASC'].map(lambda mun: unidecode(str(mun)).upper() if str(mun) != '-' else '')
return df
return df
def tratar_uf_nasc(df):
for col in df.columns:
if col in {'UFNASC','EST_NASC','UFNAS'}:
df.rename({col: 'UF_NASC'}, axis=1, inplace=True)
df['UF_NASC'] = df['UF_NASC'].map(lambda uf: unidecode(str(uf)).upper() if str(uf) != '-' else '')
return df
return df
def tratar_cep(df):
for col in df.columns:
if col in {'CEP','CEPEND','CEP_END','CEP3'}:
df.rename({col: 'CEP_RESID'}, axis=1, inplace=True)
fill = df['CEP_RESID'].map(lambda cep: len(re.sub('\D','',str(cep)))).max()
fill = 8 if fill > 8 else fill
df['CEP_RESID'] = df['CEP_RESID'].map(lambda cep: re.sub('\D','',str(cep)).zfill(fill))
return df
if 'CEP_RESID' not in df.columns:
df['CEP_RESID'] = ''
return df
def tratar_mun_resid(df):
for col in df.columns:
if col in {'MUEND','MUNIC_END','MUNICIPIO','CID','CIDEND'}:
df.rename({col: 'MUN_RESID'}, axis=1, inplace=True)
df['MUN_RESID'] = df['MUN_RESID'].map(lambda mun: unidecode(str(mun)).upper())
return df
return df
def tratar_uf_resid(df):
# Se a UF de Residência é dado por UFEND, UF_END ou ESTADO, entao renomeia a coluna para UF_RESID
if 'UFEND' in df.columns:
df.rename({'UFEND': 'UF_RESID'}, axis=1, inplace=True)
elif 'UF_END' in df.columns:
df.rename({'UF_END': 'UF_RESID'}, axis=1, inplace=True)
elif 'ESTADO' in df.columns:
df.rename({'ESTADO': 'UF_RESID'}, axis=1, inplace=True)
elif 'EST' in df.columns:
df.rename({'EST': 'UF_RESID'}, axis=1, inplace=True)
return df
def tratar_opvest(df,date,path):
# Checa colunas de opção de curso no vestibular
for col in df.columns:
if any(opc in col for opc in {'OPCAO1','OP1','OPCAO1OR'}):
df.rename({col: 'OPCAO1'}, axis=1, inplace=True)
df = validacao_curso(df, 'OPCAO1', date)
if any(opc in col for opc in {'OPCAO2','OP2','OPCAO2OR'}):
df.rename({col: 'OPCAO2'}, axis=1, inplace=True)
df = validacao_curso(df, 'OPCAO2', date)
if any(opc in col for opc in {'OPCAO3','OP3'}):
df.rename({col: 'OPCAO3'}, axis=1, inplace=True)
df = validacao_curso(df, 'OPCAO3', date)
# Opcao 1 = 22 (Musica) - deve-se remapear para o codigo referente a enfase, obtida no perfil
if (date == 2001) or (date == 2002) or (date == 2003):
emphasis = pd.read_excel(path, sheet_name='perfil', usecols=['insc_cand','opcao1'], dtype=str)
emphasis['insc_cand'] = | pd.to_numeric(emphasis['insc_cand'], errors='coerce', downcast='integer') | pandas.to_numeric |
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: all,-execution,-papermill,-trusted
# formats: ipynb,py//py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.7.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown] tags=[]
# # Description
# %% [markdown] tags=[]
# See description in notebook `10_00-spectral_clustering...`.
# %% [markdown] tags=[]
# # Environment variables
# %% tags=[]
from IPython.display import display
import conf
N_JOBS = conf.GENERAL["N_JOBS"]
display(N_JOBS)
# %% tags=[]
# %env MKL_NUM_THREADS=$N_JOBS
# %env OPEN_BLAS_NUM_THREADS=$N_JOBS
# %env NUMEXPR_NUM_THREADS=$N_JOBS
# %env OMP_NUM_THREADS=$N_JOBS
# %% [markdown] tags=[]
# # Modules loading
# %% tags=[]
# %load_ext autoreload
# %autoreload 2
# %% tags=[]
from pathlib import Path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from utils import generate_result_set_name
# %% [markdown] tags=[]
# # Settings
# %% tags=[]
INITIAL_RANDOM_STATE = 100000
# %% tags=[]
CLUSTERING_METHOD_NAME = "DeltaSpectralClustering"
# %% tags=[]
# output dir for this notebook
CONSENSUS_CLUSTERING_DIR = Path(
conf.RESULTS["CLUSTERING_DIR"], "consensus_clustering"
).resolve()
display(CONSENSUS_CLUSTERING_DIR)
# %% [markdown] tags=[]
# # Load data
# %% tags=[]
INPUT_SUBSET = "pca"
# %% tags=[]
INPUT_STEM = "z_score_std-projection-smultixcan-efo_partial-mashr-zscores"
# %% tags=[]
DR_OPTIONS = {
"n_components": 50,
"svd_solver": "full",
"random_state": 0,
}
# %% tags=[]
input_filepath = Path(
conf.RESULTS["DATA_TRANSFORMATIONS_DIR"],
INPUT_SUBSET,
generate_result_set_name(
DR_OPTIONS, prefix=f"{INPUT_SUBSET}-{INPUT_STEM}-", suffix=".pkl"
),
).resolve()
display(input_filepath)
assert input_filepath.exists(), "Input file does not exist"
input_filepath_stem = input_filepath.stem
display(input_filepath_stem)
# %% tags=[]
data = pd.read_pickle(input_filepath)
# %% tags=[]
data.shape
# %% tags=[]
data.head()
# %% tags=[]
traits = data.index.tolist()
# %% tags=[]
len(traits)
# %% [markdown] tags=[]
# # Load coassociation matrix (ensemble)
# %% tags=[]
input_file = Path(CONSENSUS_CLUSTERING_DIR, "ensemble_coassoc_matrix.npy").resolve()
display(input_file)
# %% tags=[]
coassoc_matrix = np.load(input_file)
# %% tags=[]
coassoc_matrix = pd.DataFrame(
data=coassoc_matrix,
index=traits,
columns=traits,
)
# %% tags=[]
coassoc_matrix.shape
# %% tags=[]
coassoc_matrix.head()
# %% tags=[]
dist_matrix = coassoc_matrix
# %% [markdown] tags=[]
# # Clustering
# %% tags=[]
from sklearn.metrics import (
calinski_harabasz_score,
davies_bouldin_score,
)
# %% [markdown] tags=[]
# ## More exhaustive test
# %% [markdown] tags=[]
# Here I run some test across several `k` and `delta` values; then I check how results perform with different clustering quality measures.
# %% tags=[]
CLUSTERING_OPTIONS = {}
CLUSTERING_OPTIONS["K_RANGE"] = [
2,
4,
6,
8,
10,
12,
14,
16,
18,
20,
25,
30,
35,
40,
50,
60,
]
CLUSTERING_OPTIONS["N_REPS_PER_K"] = 5
CLUSTERING_OPTIONS["KMEANS_N_INIT"] = 10
CLUSTERING_OPTIONS["DELTAS"] = [
5.00,
2.00,
1.00,
0.90,
0.75,
0.50,
0.30,
0.25,
0.20,
]
display(CLUSTERING_OPTIONS)
# %% [markdown] tags=[]
# ### Generate ensemble
# %% tags=[]
import tempfile
# %% tags=[]
ensemble_folder = Path(
tempfile.gettempdir(),
"pre_cluster_analysis",
CLUSTERING_METHOD_NAME,
).resolve()
ensemble_folder.mkdir(parents=True, exist_ok=True)
# %% tags=[]
ensemble_file = Path(
ensemble_folder,
generate_result_set_name(CLUSTERING_OPTIONS, prefix="ensemble-", suffix=".pkl"),
)
display(ensemble_file)
# %% tags=[]
assert ensemble_file.exists(), "Ensemble file does not exists"
# %% tags=[]
ensemble = pd.read_pickle(ensemble_file)
# %% tags=[]
ensemble.shape
# %% tags=[]
ensemble.head()
# %% [markdown] tags=[]
# ### Add clustering quality measures
# %% tags=[]
ensemble = ensemble.assign(
# si_score=ensemble["partition"].apply(lambda x: silhouette_score(dist_matrix, x, metric="precomputed")),
ch_score=ensemble["partition"].apply(lambda x: calinski_harabasz_score(data, x)),
db_score=ensemble["partition"].apply(lambda x: davies_bouldin_score(data, x)),
)
# %% tags=[]
ensemble.shape
# %% tags=[]
ensemble.head()
# %% [markdown] tags=[]
# # Cluster quality
# %% tags=[]
with | pd.option_context("display.max_rows", None, "display.max_columns", None) | pandas.option_context |
import argparse
import textwrap
import os
import pandas as pd
from glob import glob
from reframed import Environment, ModelCache
from .designmc import design
def main():
parser = argparse.ArgumentParser(description="Design microbial communities.")
parser.add_argument('models', metavar='MODELS', nargs='+',
help=textwrap.dedent(
"""
Multiple single-species models (one or more files).
You can use wild-cards, for example: models/*.xml, and optionally protect with quotes to avoid automatic bash
expansion (this will be faster for long lists): "models/*.xml".
"""
))
parser.add_argument('-t', '--target', dest="target",required=True, help="Target compound to maximize (example: etoh, succ, trp__L).")
parser.add_argument('-s', '--species', dest="species", type=int, default=1, help="Maximum number of species (default: 1).")
parser.add_argument('-n', '--iters', dest="iters", type=int, default=100, help="Maximum number of iterations (default: 100).")
parser.add_argument('-g', '--growth', dest="growth", type=float, default=0.1, help="Target community growth rate (default: 0.1 h-1).")
parser.add_argument('-m', '--media', dest="media", required=True, help="Run for given media (comma-separated).")
parser.add_argument('-d', '--mediadb', dest="mediadb", required=True, help="Media database file")
parser.add_argument('-o', '--output', dest="output", default='out.tsv', help="Output filename.")
args = parser.parse_args()
## load models
models = args.models
if len(models) == 1 and '*' in models[0]:
models = glob(models[0])
if len(models) == 0:
raise IOError(f"No files found: {models}.")
species = [extract_id_from_filepath(model) for model in models]
cache = ModelCache(species, models)
## load media
try:
mediadb = load_media_db(args.mediadb)
except:
raise IOError(f"Unable to load media db file: {args.mediadb}.")
media = args.media.split(",")
dfs = []
target = f"R_EX_M_{args.target}_e"
for medium in media:
if medium not in mediadb:
raise RuntimeError(f"Medium {medium} not in database.")
print(f"Optimizing {args.target} production in medium {medium}.")
env = Environment.from_compounds(mediadb[medium], fmt_func=lambda x: f"R_EX_M_{x}_e")
df = design(species, target, env, size=args.species, iters=args.iters, growth=args.growth, modelcache=cache)
df["medium"] = medium
df["target"] = args.target
dfs.append(df)
dfs = | pd.concat(dfs) | pandas.concat |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from scipy.special import boxcox1p
from scipy.stats import norm, skew
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error
# import xgboost as xgb
# import lightgbm as lgb
def get_score(prediction, lables):
"""
Prints R2 and RMSE scores
"""
print('R2: {}'.format(r2_score(prediction, lables)))
print('RMSE: {}'.format(np.sqrt(mean_squared_error(prediction, lables))))
def train_test(estimator, x_trn, x_tst, y_trn, y_tst):
"""
Shows scores for train and validation sets
"""
prediction_train = estimator.predict(x_trn)
print(estimator)
get_score(prediction_train, y_trn)
prediction_test = estimator.predict(x_tst)
print("Test")
get_score(prediction_test, y_tst)
def setting():
color = sns.color_palette()
sns.set_style('darkgrid')
pd.set_option('display.float_format', lambda x: '{:.2f}'.format(x))
def null_percentage(all_data):
all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)[:30]
missing_data = | pd.DataFrame({'Missing Ratio': all_data_na}) | pandas.DataFrame |
import __main__ as main
import sys
import geopandas as gpd
import pandas as pd
import numpy as np
if not hasattr(main, '__file__'):
argv = ['code', 'data/processed/geo/tiles.shp',
'data/processed/census/oa_tile_reference.csv',
'data/raw/census_lookups/engwal_OA_lsoa.csv',
'data/raw/census_lookups/OA_to_DZ.csv',
'data/raw/census/NI_SA_Centroids.shp',
'data/raw/ethnicity_data/bulk.csv',
'data/raw/ethnicity_data/KS201SC.csv',
'data/raw/ethnicity_data/DT201NI (s).csv',
'data/raw/census/Eng_Wal_OA_Mid_Pop.csv',
'data/raw/census/simd2020_withinds.csv',
'data/raw/census/NI_Mid_Pop.csv',
'data/processed/census/quadkey_mean_perc_white.csv']
else:
argv = sys.argv
#%%
tiles = gpd.read_file(argv[1])
tiles.crs = 4326
#%%
oa_tile_lookup = pd.read_csv(argv[2])
#%%
oa_lus = {'england': pd.read_csv(argv[3]),
'scotland': pd.read_csv(argv[4]),
'ni': gpd.read_file(argv[5])}
#%%
oa_lus['ni'] = oa_lus['ni'].loc[:, ['SA2011', 'SOA2011']]
#%%
eth_data = {'england': pd.read_csv(argv[6]),
'scotland': pd.read_csv(argv[7]),
'ni': pd.read_csv(argv[8])}
#%%
scotland_imd = pd.read_csv(argv[10])
#%%
#check that the admin code is in the lookups
'''
england: lsoa level
Scotland: data zone level
NI: SOA level
'''
pop_data = {'england': pd.read_csv(argv[9]),
'scotland': pd.read_csv(argv[10]),
'ni': pd.read_csv(argv[11])}
# Handle scotland population peculiarities
scotland_n_oas = oa_lus['scotland'].groupby('DataZone2011Code').count().reset_index()[['DataZone2011Code', 'OutputArea2011Code']].rename(columns = {'DataZone2011Code':'DZ', 'OutputArea2011Code':'n_oas'})
scotland_pop = pd.merge(scotland_imd, scotland_n_oas)[['DZ', 'Total_population', 'n_oas']]
scotland_pop = pd.merge(oa_lus['scotland'][['OutputArea2011Code', 'DataZone2011Code']].rename(columns={'OutputArea2011Code':'OA', 'DataZone2011Code':'DZ'}), scotland_pop)
scotland_pop['Total_population'] = scotland_pop['Total_population'] / scotland_pop['n_oas']
scotland_pop = scotland_pop.drop(columns = ['n_oas', 'DZ']).rename(columns = {'Total_population':'pop'})
'''
England
'''
eth_data['england'] = pd.melt(eth_data['england'], id_vars = ['geography code'], value_vars = eth_data['england'].columns[3:])
eth_data['england']['variable'] = [x.split('.')[0] for x in eth_data['england']['variable']]
eth_data['england']['white'] = [x == 'White' for x in eth_data['england']['variable']]
eth_data['england']['value'] = [str(x).replace(',', '') for x in eth_data['england']['value']]
eth_data['england']['value'] = pd.to_numeric(eth_data['england']['value'], errors = 'coerce')
eth_data['england'] = eth_data['england'][['geography code', 'white', 'value']].groupby(['geography code', 'white']).sum().reset_index()
eth_data['england'] = eth_data['england'].pivot(index = 'geography code', columns = 'white').reset_index()
eth_data['england'].columns = eth_data['england'].columns.droplevel()
eth_data['england']['perc_white'] = eth_data['england'][True] / (eth_data['england'][True] + eth_data['england'][False])
'''Scotland'''
eth_data['scotland'] = pd.melt(eth_data['scotland'], id_vars = ['Area'], value_vars = eth_data['scotland'].columns[2:])
eth_data['scotland']['variable'] = [x.split('.')[0] for x in eth_data['scotland']['variable']]
eth_data['scotland']['white'] = [x == 'White' for x in eth_data['scotland']['variable']]
eth_data['scotland']['value'] = [str(x).replace('-', '0') for x in eth_data['scotland']['value']]
eth_data['scotland']['value'] = pd.to_numeric(eth_data['scotland']['value'], errors = 'coerce')
eth_data['scotland'] = eth_data['scotland'][['Area', 'white', 'value']].groupby(['Area', 'white']).sum().reset_index()
eth_data['scotland'] = eth_data['scotland'].pivot(index = 'Area', columns = 'white').reset_index()
eth_data['scotland'].columns = eth_data['scotland'].columns.droplevel()
eth_data['scotland']['perc_white'] = eth_data['scotland'][True] / (eth_data['scotland'][True] + eth_data['scotland'][False])
'''NI'''
eth_data['ni'] = pd.melt(eth_data['ni'], id_vars = ['Code'], value_vars = eth_data['ni'].columns[3:])
eth_data['ni']['variable'] = [x.split('.')[0] for x in eth_data['ni']['variable']]
eth_data['ni']['white'] = [x == 'Ethnic group: White' for x in eth_data['ni']['variable']]
eth_data['ni']['value'] = [str(x).replace(',', '') for x in eth_data['ni']['value']]
eth_data['ni']['value'] = pd.to_numeric(eth_data['ni']['value'], errors = 'coerce')
eth_data['ni'] = eth_data['ni'][['Code', 'white', 'value']].groupby(['Code', 'white']).sum().reset_index()
eth_data['ni'] = eth_data['ni'].pivot(index = 'Code', columns = 'white').reset_index()
eth_data['ni'].columns = eth_data['ni'].columns.droplevel()
eth_data['ni']['perc_white'] = eth_data['ni'][True] / (eth_data['ni'][True] + eth_data['ni'][False])
eth_data['england'].columns = ['Code', False, True, 'perc_white']
eth_data['scotland'].columns = ['Area', False, True, 'perc_white']
eth_data['ni'].columns = ['Code', False, True, 'perc_white']
ew_eth = pd.merge(oa_lus['england'], eth_data['england'], left_on='LSOA11CD', right_on='Code', how = 'left')
scotand_eth = pd.merge(oa_lus['scotland'], eth_data['scotland'], left_on='DataZone2011Code', right_on='Area', how = 'left')
ni_eth = | pd.merge(oa_lus['ni'], eth_data['ni'], left_on='SA2011', right_on='Code', how = 'left') | pandas.merge |
import random
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
NaT,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestDataFrameSortValues:
def test_sort_values(self):
frame = DataFrame(
[[1, 1, 2], [3, 1, 0], [4, 5, 6]], index=[1, 2, 3], columns=list("ABC")
)
# by column (axis=0)
sorted_df = frame.sort_values(by="A")
indexer = frame["A"].argsort().values
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=["A"], ascending=[False])
tm.assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=["B", "C"])
expected = frame.loc[[2, 1, 3]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=["B", "C"], ascending=False)
tm.assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=["B", "A"], ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=2, inplace=True)
# by row (axis=1): GH#10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis="columns")
expected = frame.reindex(columns=["B", "A", "C"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
msg = r"Length of ascending \(5\) != length of by \(2\)"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=0, ascending=[True] * 5)
def test_sort_values_by_empty_list(self):
# https://github.com/pandas-dev/pandas/issues/40258
expected = DataFrame({"a": [1, 4, 2, 5, 3, 6]})
result = expected.sort_values(by=[])
tm.assert_frame_equal(result, expected)
assert result is not expected
def test_sort_values_inplace(self):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", inplace=True)
assert return_value is None
expected = frame.sort_values(by="A")
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by=1, axis=1, inplace=True)
assert return_value is None
expected = frame.sort_values(by=1, axis=1)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", ascending=False, inplace=True)
assert return_value is None
expected = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by=["A", "B"], ascending=False, inplace=True
)
assert return_value is None
expected = frame.sort_values(by=["A", "B"], ascending=False)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({"A": A, "B": B, "C": np.random.randn(100)})
result = frame.sort_values(by=["A", "B"])
indexer = np.lexsort((frame["B"], frame["A"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["A", "B"], ascending=False)
indexer = np.lexsort(
(frame["B"].rank(ascending=False), frame["A"].rank(ascending=False))
)
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["B", "A"])
indexer = np.lexsort((frame["A"], frame["B"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
def test_sort_values_multicolumn_uint64(self):
# GH#9918
# uint64 multicolumn sort
df = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
}
)
df["a"] = df["a"].astype(np.uint64)
result = df.sort_values(["a", "b"])
expected = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
},
index=pd.Index([1, 0]),
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nan(self):
# GH#3917
df = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]}
)
# sort one column only
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{"A": [np.nan, 8, 6, 4, 2, 1, 1], "B": [5, 4, 5, 5, np.nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3],
)
sorted_df = df.sort_values(["A"], na_position="first", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=["B", "A"])
sorted_df = df.sort_values(by=1, axis=1, na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{"A": [1, 1, 2, 4, 6, 8, np.nan], "B": [2, 9, np.nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2],
)
sorted_df = df.sort_values(["A", "B"])
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 2, 9, np.nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], ascending=[1, 0], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{"A": [8, 6, 4, 2, 1, 1, np.nan], "B": [4, 5, 5, np.nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2],
)
sorted_df = df.sort_values(["A", "B"], ascending=[0, 1], na_position="last")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_descending_sort(self):
# GH#6399
df = DataFrame(
[[2, "first"], [2, "second"], [1, "a"], [1, "b"]],
columns=["sort_col", "order"],
)
sorted_df = df.sort_values(by="sort_col", kind="mergesort", ascending=False)
tm.assert_frame_equal(df, sorted_df)
@pytest.mark.parametrize(
"expected_idx_non_na, ascending",
[
[
[3, 4, 5, 0, 1, 8, 6, 9, 7, 10, 13, 14],
[True, True],
],
[
[0, 3, 4, 5, 1, 8, 6, 7, 10, 13, 14, 9],
[True, False],
],
[
[9, 7, 10, 13, 14, 6, 8, 1, 3, 4, 5, 0],
[False, True],
],
[
[7, 10, 13, 14, 9, 6, 8, 1, 0, 3, 4, 5],
[False, False],
],
],
)
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_sort_values_stable_multicolumn_sort(
self, expected_idx_non_na, ascending, na_position
):
# GH#38426 Clarify sort_values with mult. columns / labels is stable
df = DataFrame(
{
"A": [1, 2, np.nan, 1, 1, 1, 6, 8, 4, 8, 8, np.nan, np.nan, 8, 8],
"B": [9, np.nan, 5, 2, 2, 2, 5, 4, 5, 3, 4, np.nan, np.nan, 4, 4],
}
)
# All rows with NaN in col "B" only have unique values in "A", therefore,
# only the rows with NaNs in "A" have to be treated individually:
expected_idx = (
[11, 12, 2] + expected_idx_non_na
if na_position == "first"
else expected_idx_non_na + [2, 11, 12]
)
expected = df.take(expected_idx)
sorted_df = df.sort_values(
["A", "B"], ascending=ascending, na_position=na_position
)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_categorial(self):
# GH#16793
df = DataFrame({"x": Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)})
expected = df.copy()
sorted_df = df.sort_values("x", kind="mergesort")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_datetimes(self):
# GH#3461, argsort / lexsort differences for a datetime column
df = DataFrame(
["a", "a", "a", "b", "c", "d", "e", "f", "g"],
columns=["A"],
index=date_range("20130101", periods=9),
)
dts = [
Timestamp(x)
for x in [
"2004-02-11",
"2004-01-21",
"2004-01-26",
"2005-09-20",
"2010-10-04",
"2009-05-12",
"2008-11-12",
"2010-09-28",
"2010-09-28",
]
]
df["B"] = dts[::2] + dts[1::2]
df["C"] = 2.0
df["A1"] = 3.0
df1 = df.sort_values(by="A")
df2 = df.sort_values(by=["A"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["B"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["C", "B"])
tm.assert_frame_equal(df1, df2)
def test_sort_values_frame_column_inplace_sort_exception(self, float_frame):
s = float_frame["A"]
with pytest.raises(ValueError, match="This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_values_nat_values_in_int_column(self):
# GH#14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT.value))
float_values = (2.0, -1.797693e308)
df = DataFrame(
{"int": int_values, "float": float_values}, columns=["int", "float"]
)
df_reversed = DataFrame(
{"int": int_values[::-1], "float": float_values[::-1]},
columns=["int", "float"],
index=[1, 0],
)
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(
{"datetime": [Timestamp("2016-01-01"), NaT], "float": float_values},
columns=["datetime", "float"],
)
df_reversed = DataFrame(
{"datetime": [NaT, Timestamp("2016-01-01")], "float": float_values[::-1]},
columns=["datetime", "float"],
index=[1, 0],
)
df_sorted = df.sort_values(["datetime", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["datetime", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df)
# Ascending should not affect the results.
df_sorted = df.sort_values(["datetime", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
def test_sort_nat(self):
# GH 16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
Timestamp(x)
for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
]
df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
d4 = [
Timestamp(x)
for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"]
]
expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=["a", "b"])
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_na_position_with_categories(self):
# GH#22556
# Positioning missing value properly when column is Categorical.
categories = ["A", "B", "C"]
category_indices = [0, 2, 4]
list_of_nans = [np.nan, np.nan]
na_indices = [1, 3]
na_position_first = "first"
na_position_last = "last"
column_name = "c"
reversed_categories = sorted(categories, reverse=True)
reversed_category_indices = sorted(category_indices, reverse=True)
reversed_na_indices = sorted(na_indices)
df = DataFrame(
{
column_name: Categorical(
["A", np.nan, "B", np.nan, "C"], categories=categories, ordered=True
)
}
)
# sort ascending with na first
result = df.sort_values(
by=column_name, ascending=True, na_position=na_position_first
)
expected = DataFrame(
{
column_name: Categorical(
list_of_nans + categories, categories=categories, ordered=True
)
},
index=na_indices + category_indices,
)
tm.assert_frame_equal(result, expected)
# sort ascending with na last
result = df.sort_values(
by=column_name, ascending=True, na_position=na_position_last
)
expected = DataFrame(
{
column_name: Categorical(
categories + list_of_nans, categories=categories, ordered=True
)
},
index=category_indices + na_indices,
)
tm.assert_frame_equal(result, expected)
# sort descending with na first
result = df.sort_values(
by=column_name, ascending=False, na_position=na_position_first
)
expected = DataFrame(
{
column_name: Categorical(
list_of_nans + reversed_categories,
categories=categories,
ordered=True,
)
},
index=reversed_na_indices + reversed_category_indices,
)
tm.assert_frame_equal(result, expected)
# sort descending with na last
result = df.sort_values(
by=column_name, ascending=False, na_position=na_position_last
)
expected = DataFrame(
{
column_name: Categorical(
reversed_categories + list_of_nans,
categories=categories,
ordered=True,
)
},
index=reversed_category_indices + reversed_na_indices,
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nat(self):
# GH#16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
Timestamp(x)
for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
]
df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
d4 = [
Timestamp(x)
for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"]
]
expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=["a", "b"])
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_na_position_with_categories_raises(self):
df = DataFrame(
{
"c": Categorical(
["A", np.nan, "B", np.nan, "C"],
categories=["A", "B", "C"],
ordered=True,
)
}
)
with pytest.raises(ValueError, match="invalid na_position: bad_position"):
df.sort_values(by="c", ascending=False, na_position="bad_position")
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ignore_index, output_index",
[
({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, True, [0, 1, 2]),
({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, False, [2, 1, 0]),
(
{"A": [1, 2, 3], "B": [2, 3, 4]},
{"A": [3, 2, 1], "B": [4, 3, 2]},
True,
[0, 1, 2],
),
(
{"A": [1, 2, 3], "B": [2, 3, 4]},
{"A": [3, 2, 1], "B": [4, 3, 2]},
False,
[2, 1, 0],
),
],
)
def test_sort_values_ignore_index(
self, inplace, original_dict, sorted_dict, ignore_index, output_index
):
# GH 30114
df = DataFrame(original_dict)
expected = DataFrame(sorted_dict, index=output_index)
kwargs = {"ignore_index": ignore_index, "inplace": inplace}
if inplace:
result_df = df.copy()
result_df.sort_values("A", ascending=False, **kwargs)
else:
result_df = df.sort_values("A", ascending=False, **kwargs)
tm.assert_frame_equal(result_df, expected)
tm.assert_frame_equal(df, DataFrame(original_dict))
def test_sort_values_nat_na_position_default(self):
# GH 13230
expected = DataFrame(
{
"A": [1, 2, 3, 4, 4],
"date": pd.DatetimeIndex(
[
"2010-01-01 09:00:00",
"2010-01-01 09:00:01",
"2010-01-01 09:00:02",
"2010-01-01 09:00:03",
"NaT",
]
),
}
)
result = expected.sort_values(["A", "date"])
tm.assert_frame_equal(result, expected)
def test_sort_values_item_cache(self, using_array_manager):
# previous behavior incorrect retained an invalid _item_cache entry
df = DataFrame(np.random.randn(4, 3), columns=["A", "B", "C"])
df["D"] = df["A"] * 2
ser = df["A"]
if not using_array_manager:
assert len(df._mgr.blocks) == 2
df.sort_values(by="A")
ser.values[0] = 99
assert df.iloc[0, 0] == df["A"][0]
def test_sort_values_reshaping(self):
# GH 39426
values = list(range(21))
expected = DataFrame([values], columns=values)
df = expected.sort_values(expected.index[0], axis=1, ignore_index=True)
tm.assert_frame_equal(df, expected)
class TestDataFrameSortKey: # test key sorting (issue 27237)
def test_sort_values_inplace_key(self, sort_by_key):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", inplace=True, key=sort_by_key)
assert return_value is None
expected = frame.sort_values(by="A", key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by=1, axis=1, inplace=True, key=sort_by_key
)
assert return_value is None
expected = frame.sort_values(by=1, axis=1, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by="A", ascending=False, inplace=True, key=sort_by_key
)
assert return_value is None
expected = frame.sort_values(by="A", ascending=False, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(
by=["A", "B"], ascending=False, inplace=True, key=sort_by_key
)
expected = frame.sort_values(by=["A", "B"], ascending=False, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_key(self):
df = DataFrame(np.array([0, 5, np.nan, 3, 2, np.nan]))
result = df.sort_values(0)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(0, key=lambda x: x + 5)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(0, key=lambda x: -x, ascending=False)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_by_key(self):
df = DataFrame(
{
"a": np.array([0, 3, np.nan, 3, 2, np.nan]),
"b": np.array([0, 2, np.nan, 5, 2, np.nan]),
}
)
result = df.sort_values("a", key=lambda x: -x)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a", "b"], key=lambda x: -x)
expected = df.iloc[[3, 1, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a", "b"], key=lambda x: -x, ascending=False)
expected = df.iloc[[0, 4, 1, 3, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_by_key_by_name(self):
df = DataFrame(
{
"a": np.array([0, 3, np.nan, 3, 2, np.nan]),
"b": np.array([0, 2, np.nan, 5, 2, np.nan]),
}
)
def key(col):
if col.name == "a":
return -col
else:
return col
result = df.sort_values(by="a", key=key)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a"], key=key)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by="b", key=key)
expected = df.iloc[[0, 1, 4, 3, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a", "b"], key=key)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_key_string(self):
df = DataFrame(np.array([["hello", "goodbye"], ["hello", "Hello"]]))
result = df.sort_values(1)
expected = df[::-1]
tm.assert_frame_equal(result, expected)
result = df.sort_values([0, 1], key=lambda col: col.str.lower())
tm.assert_frame_equal(result, df)
result = df.sort_values(
[0, 1], key=lambda col: col.str.lower(), ascending=False
)
expected = df.sort_values(1, key=lambda col: col.str.lower(), ascending=False)
tm.assert_frame_equal(result, expected)
def test_sort_values_key_empty(self, sort_by_key):
df = DataFrame(np.array([]))
df.sort_values(0, key=sort_by_key)
df.sort_index(key=sort_by_key)
def test_changes_length_raises(self):
df = DataFrame({"A": [1, 2, 3]})
with pytest.raises(ValueError, match="change the shape"):
df.sort_values("A", key=lambda x: x[:1])
def test_sort_values_key_axes(self):
df = DataFrame({0: ["Hello", "goodbye"], 1: [0, 1]})
result = df.sort_values(0, key=lambda col: col.str.lower())
expected = df[::-1]
tm.assert_frame_equal(result, expected)
result = df.sort_values(1, key=lambda col: -col)
expected = df[::-1]
tm.assert_frame_equal(result, expected)
def test_sort_values_key_dict_axis(self):
df = DataFrame({0: ["Hello", 0], 1: ["goodbye", 1]})
result = df.sort_values(0, key=lambda col: col.str.lower(), axis=1)
expected = df.loc[:, ::-1]
tm.assert_frame_equal(result, expected)
result = df.sort_values(1, key=lambda col: -col, axis=1)
expected = df.loc[:, ::-1]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_sort_values_key_casts_to_categorical(self, ordered):
# https://github.com/pandas-dev/pandas/issues/36383
categories = ["c", "b", "a"]
df = DataFrame({"x": [1, 1, 1], "y": ["a", "b", "c"]})
def sorter(key):
if key.name == "y":
return pd.Series(
Categorical(key, categories=categories, ordered=ordered)
)
return key
result = df.sort_values(by=["x", "y"], key=sorter)
expected = DataFrame(
{"x": [1, 1, 1], "y": ["c", "b", "a"]}, index=pd.Index([2, 1, 0])
)
tm.assert_frame_equal(result, expected)
@pytest.fixture
def df_none():
return DataFrame(
{
"outer": ["a", "a", "a", "b", "b", "b"],
"inner": [1, 2, 2, 2, 1, 1],
"A": np.arange(6, 0, -1),
("B", 5): ["one", "one", "two", "two", "one", "one"],
}
)
@pytest.fixture(params=[["outer"], ["outer", "inner"]])
def df_idx(request, df_none):
levels = request.param
return df_none.set_index(levels)
@pytest.fixture(
params=[
"inner", # index level
["outer"], # list of index level
"A", # column
[("B", 5)], # list of column
["inner", "outer"], # two index levels
[("B", 5), "outer"], # index level and column
["A", ("B", 5)], # Two columns
["inner", "outer"], # two index levels and column
]
)
def sort_names(request):
return request.param
@pytest.fixture(params=[True, False])
def ascending(request):
return request.param
class TestSortValuesLevelAsStr:
def test_sort_index_level_and_column_label(
self, df_none, df_idx, sort_names, ascending
):
# GH#14353
# Get index levels from df_idx
levels = df_idx.index.names
# Compute expected by sorting on columns and the setting index
expected = df_none.sort_values(
by=sort_names, ascending=ascending, axis=0
).set_index(levels)
# Compute result sorting on mix on columns and index levels
result = df_idx.sort_values(by=sort_names, ascending=ascending, axis=0)
tm.assert_frame_equal(result, expected)
def test_sort_column_level_and_index_label(
self, df_none, df_idx, sort_names, ascending
):
# GH#14353
# Get levels from df_idx
levels = df_idx.index.names
# Compute expected by sorting on axis=0, setting index levels, and then
# transposing. For some cases this will result in a frame with
# multiple column levels
expected = (
df_none.sort_values(by=sort_names, ascending=ascending, axis=0)
.set_index(levels)
.T
)
# Compute result by transposing and sorting on axis=1.
result = df_idx.T.sort_values(by=sort_names, ascending=ascending, axis=1)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import pandas as pd
from dsbox.ml.feature_engineering import TagEncoder
from dsbox.ml.feature_engineering.timeseries import RollingWindower, Shifter
from dsbox.utils import pandas_downcast_numeric
def concat_train_test(dataframe_list):
shop_data = dataframe_list[0]
shop_data_to_predict = dataframe_list[1]
shop_data['to_predict'] = False
shop_data_to_predict['to_predict'] = True
shop_data = pd.concat([shop_data, shop_data_to_predict], sort=False)
shop_data = shop_data.reset_index(drop=True)
return shop_data
def resample_fillna(dataframe):
shop_data = dataframe
shop_data['Datetime'] = | pd.to_datetime(shop_data['Date'], format='%Y-%m-%d') | pandas.to_datetime |
import numpy as np
import pandas as pd
from typing import List
from sklearn.preprocessing import StandardScaler
from cytominer_eval.transform import metric_melt
from cytominer_eval.transform.util import set_pair_ids
def assign_replicates(
similarity_melted_df: pd.DataFrame, replicate_groups: List[str],
) -> pd.DataFrame:
"""
Arguments:
similarity_melted_df - a long pandas dataframe output from transform.metric_melt
replicate_groups - a list of metadata column names in the original profile dataframe
to use as replicate columns
Output:
Adds columns to the similarity metric dataframe to indicate whether or not the
pairwise similarity metric is comparing replicates or not
"""
pair_ids = set_pair_ids()
replicate_col_names = {x: "{x}_replicate".format(x=x) for x in replicate_groups}
compare_dfs = []
for replicate_col in replicate_groups:
replicate_cols_with_suffix = [
"{col}{suf}".format(col=replicate_col, suf=pair_ids[x]["suffix"])
for x in pair_ids
]
assert all(
[x in similarity_melted_df.columns for x in replicate_cols_with_suffix]
), "replicate_group not found in melted dataframe columns"
replicate_col_name = replicate_col_names[replicate_col]
compare_df = similarity_melted_df.loc[:, replicate_cols_with_suffix]
compare_df.loc[:, replicate_col_name] = False
compare_df.loc[
np.where(compare_df.iloc[:, 0] == compare_df.iloc[:, 1])[0],
replicate_col_name,
] = True
compare_dfs.append(compare_df)
compare_df = pd.concat(compare_dfs, axis="columns").reset_index(drop=True)
compare_df = compare_df.assign(
group_replicate=compare_df.loc[:, replicate_col_names.values()].min(
axis="columns"
)
).loc[:, list(replicate_col_names.values()) + ["group_replicate"]]
similarity_melted_df = similarity_melted_df.merge(
compare_df, left_index=True, right_index=True
)
return similarity_melted_df
def calculate_precision_recall(replicate_group_df: pd.DataFrame, k: int) -> pd.Series:
"""
Usage: Designed to be called within a pandas.DataFrame().groupby().apply()
"""
assert (
"group_replicate" in replicate_group_df.columns
), "'group_replicate' not found in dataframe; remember to call assign_replicates()."
recall_denom__total_relevant_items = sum(replicate_group_df.group_replicate)
precision_denom__num_recommended_items = k
num_recommended_items_at_k = sum(replicate_group_df.iloc[:k,].group_replicate)
precision_at_k = num_recommended_items_at_k / precision_denom__num_recommended_items
recall_at_k = num_recommended_items_at_k / recall_denom__total_relevant_items
return_bundle = {"k": k, "precision": precision_at_k, "recall": recall_at_k}
return | pd.Series(return_bundle) | pandas.Series |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('data', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype=object)])
def test_constructors_empty(self, data, closed):
# GH 18421
expected_dtype = data.dtype if isinstance(data, np.ndarray) else object
expected_values = np.array([], dtype=object)
expected_index = | IntervalIndex(data, closed=closed) | pandas.IntervalIndex |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
self.assertRaises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
self.assertRaises(AttributeError, lambda: Series(np.arange(5.)).cat)
self.assertRaises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = np.array([1, 2, 3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
s = s.cat.set_categories(["c", "b", "a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = np.array(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), com.CategoricalDtype()], index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(),
com.CategoricalDtype()],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns), 1)
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp, a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2, 3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2, 3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period(self):
# test all length
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx)))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_frame(self):
# normal DataFrame
dt = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
p = pd.period_range('2011-01', freq='M', periods=5)
df = pd.DataFrame({'dt': dt, 'p': p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
df = pd.DataFrame({'dt': pd.Categorical(dt), 'p': pd.Categorical(p)})
self.assertEqual(repr(df), exp)
def test_info(self):
# make sure it works
n = 2500
df = DataFrame({'int64': np.random.randint(100, size=n)})
df['category'] = Series(np.array(list('abcdefghij')).take(
np.random.randint(0, 10, size=n))).astype('category')
df.isnull()
df.info()
df2 = df[df['category'] == 'd']
df2.info()
def test_groupby_sort(self):
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
res = self.cat.groupby(['value_group'])['value_group'].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Series(Categorical(["a", "b", "c", "d"], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Series(Categorical(["a", "b", "c", "d"], categories=[
'd', 'c', 'b', 'a'], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Series(Categorical(
[np.nan, "b", "c", np.nan], categories=['d', 'c', 'b', 'a'
], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
cat = Series(Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
def test_mode(self):
s = Series(Categorical([1, 1, 2, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 1, 1, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5, 1], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True))
res = s.mode()
exp = Series(Categorical([], categories=[5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
def test_value_counts(self):
s = pd.Series(pd.Categorical(
["a", "b", "c", "c", "c", "b"], categories=["c", "a", "b", "d"]))
res = s.value_counts(sort=False)
exp = Series([3, 1, 2, 0],
index=pd.CategoricalIndex(["c", "a", "b", "d"]))
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp = Series([3, 2, 1, 0],
index=pd.CategoricalIndex(["c", "b", "a", "d"]))
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# https://github.com/pydata/pandas/issues/9443
s = pd.Series(["a", "b", "a"], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
s = pd.Series(["a", "b", None, "a", None, None], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"])))
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
tm.assert_series_equal(
s.value_counts(dropna=False, sort=False),
pd.Series([2, 1, 3],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", "a"], categories=["a", "b", np.nan]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1, 0],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", None, "a", None, None], categories=["a", "b", np.nan
]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1],
index=pd.CategoricalIndex([np.nan, "a", "b"])))
def test_groupby(self):
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"
], categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
expected = DataFrame({'a': Series(
[1, 2, 4, np.nan], index=pd.CategoricalIndex(
['a', 'b', 'c', 'd'], name='b'))})
result = data.groupby("b").mean()
tm.assert_frame_equal(result, expected)
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A")
exp_idx = pd.CategoricalIndex(['a', 'b', 'z'], name='A')
expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers
gb = df.groupby(['A', 'B'])
expected = DataFrame({'values': Series(
[1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan
], index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']], names=['A', 'B']))})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers with a non-cat
df = df.copy()
df['C'] = ['foo', 'bar'] * 2
gb = df.groupby(['A', 'B', 'C'])
expected = DataFrame({'values': Series(
np.nan, index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y'], ['foo', 'bar']
], names=['A', 'B', 'C']))}).sortlevel()
expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4]
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name)
g = x.groupby(['person_id'])
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[['person_name']])
result = x.drop_duplicates('person_name')
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates('person_name').iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name='person_id')
expected['person_name'] = expected['person_name'].astype('object')
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']])
# Filter
tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a'])
tm.assert_frame_equal(df.groupby(c).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']])
# GH 9603
df = pd.DataFrame({'a': [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4])
result = df.groupby(c).apply(len)
expected = pd.Series([1, 0, 0, 0],
index=pd.CategoricalIndex(c.values.categories))
expected.index.name = 'a'
tm.assert_series_equal(result, expected)
def test_pivot_table(self):
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'])
expected = Series([1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan],
index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']],
names=['A', 'B']),
name='values')
tm.assert_series_equal(result, expected)
def test_count(self):
s = Series(Categorical([np.nan, 1, 2, np.nan],
categories=[5, 4, 3, 2, 1], ordered=True))
result = s.count()
self.assertEqual(result, 2)
def test_sort(self):
c = Categorical(["a", "b", "b", "a"], ordered=False)
cat = Series(c)
# 9816 deprecated
with tm.assert_produces_warning(FutureWarning):
c.order()
# sort in the categories order
expected = Series(
Categorical(["a", "a", "b", "b"],
ordered=False), index=[0, 3, 1, 2])
result = cat.sort_values()
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "c", "b", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Series(Categorical(["a", "c", "b", "d"], categories=[
"a", "b", "c", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res.__array__(), exp)
raw_cat1 = Categorical(["a", "b", "c", "d"],
categories=["a", "b", "c", "d"], ordered=False)
raw_cat2 = Categorical(["a", "b", "c", "d"],
categories=["d", "c", "b", "a"], ordered=True)
s = ["a", "b", "c", "d"]
df = DataFrame({"unsort": raw_cat1,
"sort": raw_cat2,
"string": s,
"values": [1, 2, 3, 4]})
# Cats must be sorted in a dataframe
res = df.sort_values(by=["string"], ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
self.assertEqual(res["sort"].dtype, "category")
res = df.sort_values(by=["sort"], ascending=False)
exp = df.sort_values(by=["string"], ascending=True)
self.assert_numpy_array_equal(res["values"], exp["values"])
self.assertEqual(res["sort"].dtype, "category")
self.assertEqual(res["unsort"].dtype, "category")
# unordered cat, but we allow this
df.sort_values(by=["unsort"], ascending=False)
# multi-columns sort
# GH 7848
df = DataFrame({"id": [6, 5, 4, 3, 2, 1],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"], ordered=True)
df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a'])
# sorts 'grade' according to the order of the categories
result = df.sort_values(by=['grade'])
expected = df.iloc[[1, 2, 5, 0, 3, 4]]
tm.assert_frame_equal(result, expected)
# multi
result = df.sort_values(by=['grade', 'id'])
expected = df.iloc[[2, 1, 5, 4, 3, 0]]
tm.assert_frame_equal(result, expected)
# reverse
cat = Categorical(["a", "c", "c", "b", "d"], ordered=True)
res = cat.sort_values(ascending=False)
exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
# some NaN positions
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
def test_slicing(self):
cat = Series(Categorical([1, 2, 3, 4]))
reversed = cat[::-1]
exp = np.array([4, 3, 2, 1])
self.assert_numpy_array_equal(reversed.__array__(), exp)
df = DataFrame({'value': (np.arange(100) + 1).astype('int64')})
df['D'] = pd.cut(df.value, bins=[0, 25, 50, 75, 100])
expected = Series([11, '(0, 25]'], index=['value', 'D'], name=10)
result = df.iloc[10]
tm.assert_series_equal(result, expected)
expected = DataFrame({'value': np.arange(11, 21).astype('int64')},
index=np.arange(10, 20).astype('int64'))
expected['D'] = pd.cut(expected.value, bins=[0, 25, 50, 75, 100])
result = df.iloc[10:20]
tm.assert_frame_equal(result, expected)
expected = Series([9, '(0, 25]'], index=['value', 'D'], name=8)
result = df.loc[8]
tm.assert_series_equal(result, expected)
def test_slicing_and_getting_ops(self):
# systematically test the slicing operations:
# for all slicing ops:
# - returning a dataframe
# - returning a column
# - returning a row
# - returning a single value
cats = pd.Categorical(
["a", "c", "b", "c", "c", "c", "c"], categories=["a", "b", "c"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 2, 3, 4, 5, 6, 7]
df = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
cats2 = pd.Categorical(["b", "c"], categories=["a", "b", "c"])
idx2 = pd.Index(["j", "k"])
values2 = [3, 4]
# 2:4,: | "j":"k",:
exp_df = pd.DataFrame({"cats": cats2, "values": values2}, index=idx2)
# :,"cats" | :,0
exp_col = pd.Series(cats, index=idx, name='cats')
# "j",: | 2,:
exp_row = pd.Series(["b", 3], index=["cats", "values"], dtype="object",
name="j")
# "j","cats | 2,0
exp_val = "b"
# iloc
# frame
res_df = df.iloc[2:4, :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.iloc[2, :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.iloc[2, 0]
self.assertEqual(res_val, exp_val)
# loc
# frame
res_df = df.loc["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.loc["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.loc["j", "cats"]
self.assertEqual(res_val, exp_val)
# ix
# frame
# res_df = df.ix["j":"k",[0,1]] # doesn't work?
res_df = df.ix["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.ix["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.ix[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.ix["j", 0]
self.assertEqual(res_val, exp_val)
# iat
res_val = df.iat[2, 0]
self.assertEqual(res_val, exp_val)
# at
res_val = df.at["j", "cats"]
self.assertEqual(res_val, exp_val)
# fancy indexing
exp_fancy = df.iloc[[2]]
res_fancy = df[df["cats"] == "b"]
tm.assert_frame_equal(res_fancy, exp_fancy)
res_fancy = df[df["values"] == 3]
tm.assert_frame_equal(res_fancy, exp_fancy)
# get_value
res_val = df.get_value("j", "cats")
self.assertEqual(res_val, exp_val)
# i : int, slice, or sequence of integers
res_row = df.iloc[2]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
res_df = df.iloc[slice(2, 4)]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[[2, 3]]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
res_df = df.iloc[:, slice(0, 2)]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[:, [0, 1]]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
def test_slicing_doc_examples(self):
# GH 7918
cats = Categorical(
["a", "b", "b", "b", "c", "c", "c"], categories=["a", "b", "c"])
idx = Index(["h", "i", "j", "k", "l", "m", "n", ])
values = [1, 2, 2, 2, 3, 4, 5]
df = DataFrame({"cats": cats, "values": values}, index=idx)
result = df.iloc[2:4, :]
expected = DataFrame(
{"cats": Categorical(
['b', 'b'], categories=['a', 'b', 'c']),
"values": [2, 2]}, index=['j', 'k'])
tm.assert_frame_equal(result, expected)
result = df.iloc[2:4, :].dtypes
expected = Series(['category', 'int64'], ['cats', 'values'])
tm.assert_series_equal(result, expected)
result = df.loc["h":"j", "cats"]
expected = Series(Categorical(['a', 'b', 'b'],
categories=['a', 'b', 'c']),
index=['h', 'i', 'j'], name='cats')
tm.assert_series_equal(result, expected)
result = df.ix["h":"j", 0:1]
expected = DataFrame({'cats': Series(
Categorical(
['a', 'b', 'b'], categories=['a', 'b', 'c']), index=['h', 'i',
'j'])})
tm.assert_frame_equal(result, expected)
def test_assigning_ops(self):
# systematically test the assigning operations:
# for all slicing ops:
# for value in categories and value not in categories:
# - assign a single value -> exp_single_cats_value
# - assign a complete row (mixed values) -> exp_single_row
# assign multiple rows (mixed values) (-> array) -> exp_multi_row
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
cats = pd.Categorical(
["a", "a", "a", "a", "a", "a", "a"], categories=["a", "b"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 1, 1, 1, 1, 1, 1]
orig = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
# changed single row
cats1 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx1 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values1 = [1, 1, 2, 1, 1, 1, 1]
exp_single_row = pd.DataFrame(
{"cats": cats1,
"values": values1}, index=idx1)
# changed multiple rows
cats2 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx2 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values2 = [1, 1, 2, 2, 1, 1, 1]
exp_multi_row = pd.DataFrame(
{"cats": cats2,
"values": values2}, index=idx2)
# changed part of the cats column
cats3 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx3 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values3 = [1, 1, 1, 1, 1, 1, 1]
exp_parts_cats_col = pd.DataFrame(
{"cats": cats3,
"values": values3}, index=idx3)
# changed single value in cats col
cats4 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx4 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values4 = [1, 1, 1, 1, 1, 1, 1]
exp_single_cats_value = pd.DataFrame(
{"cats": cats4,
"values": values4}, index=idx4)
# iloc
# ###############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.iloc[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.iloc[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iloc[2, 0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.iloc[2, :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.iloc[2, :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.iloc[2:4, :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.iloc[2:4, :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.iloc[2:4, 0] = ["c", "c"]
# loc
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.loc["j", "cats"] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.loc["j", :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.loc["j":"k", :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.loc["j":"k", "cats"] = ["c", "c"]
# ix
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.ix["j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.ix[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.ix["j", 0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.ix["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.ix["j", :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.ix["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.ix["j":"k", :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.ix["j":"k", 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.ix["j":"k", 0] = ["c", "c"]
# iat
df = orig.copy()
df.iat[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iat[2, 0] = "c"
self.assertRaises(ValueError, f)
# at
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.at["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.at["j", "cats"] = "c"
self.assertRaises(ValueError, f)
# fancy indexing
catsf = pd.Categorical(
["a", "a", "c", "c", "a", "a", "a"], categories=["a", "b", "c"])
idxf = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
valuesf = [1, 1, 3, 3, 1, 1, 1]
df = pd.DataFrame({"cats": catsf, "values": valuesf}, index=idxf)
exp_fancy = exp_multi_row.copy()
exp_fancy["cats"].cat.set_categories(["a", "b", "c"], inplace=True)
df[df["cats"] == "c"] = ["b", 2]
tm.assert_frame_equal(df, exp_multi_row)
# set_value
df = orig.copy()
df.set_value("j", "cats", "b")
tm.assert_frame_equal(df, exp_single_cats_value)
def f():
df = orig.copy()
df.set_value("j", "cats", "c")
self.assertRaises(ValueError, f)
# Assigning a Category to parts of a int/... column uses the values of
# the Catgorical
df = pd.DataFrame({"a": [1, 1, 1, 1, 1],
"b": ["a", "a", "a", "a", "a"]})
exp = pd.DataFrame({"a": [1, "b", "b", 1, 1],
"b": ["a", "a", "b", "b", "a"]})
df.loc[1:2, "a"] = pd.Categorical(["b", "b"], categories=["a", "b"])
df.loc[2:3, "b"] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp)
# Series
orig = Series(pd.Categorical(["b", "b"], categories=["a", "b"]))
s = orig.copy()
s[:] = "a"
exp = Series(pd.Categorical(["a", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[1] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[s.index > 0] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[[False, True]] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s.index = ["x", "y"]
s["y"] = "a"
exp = Series(
pd.Categorical(["b", "a"],
categories=["a", "b"]), index=["x", "y"])
tm.assert_series_equal(s, exp)
# ensure that one can set something to np.nan
s = Series(Categorical([1, 2, 3]))
exp = Series(Categorical([1, np.nan, 3]))
s[1] = np.nan
tm.assert_series_equal(s, exp)
def test_comparisons(self):
tests_data = [(list("abc"), list("cba"), list("bbb")),
([1, 2, 3], [3, 2, 1], [2, 2, 2])]
for data, reverse, base in tests_data:
cat_rev = pd.Series(pd.Categorical(data, categories=reverse,
ordered=True))
cat_rev_base = pd.Series(pd.Categorical(base, categories=reverse,
ordered=True))
cat = pd.Series(pd.Categorical(data, ordered=True))
cat_base = pd.Series(pd.Categorical(
base, categories=cat.cat.categories, ordered=True))
s = Series(base)
a = np.array(base)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = Series([True, False, False])
tm.assert_series_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = Series([False, False, True])
tm.assert_series_equal(res_rev, exp_rev)
res = cat > cat_base
exp = Series([False, False, True])
tm.assert_series_equal(res, exp)
scalar = base[1]
res = cat > scalar
exp = Series([False, False, True])
exp2 = cat.values > scalar
tm.assert_series_equal(res, exp)
tm.assert_numpy_array_equal(res.values, exp2)
res_rev = cat_rev > scalar
exp_rev = Series([True, False, False])
exp_rev2 = cat_rev.values > scalar
tm.assert_series_equal(res_rev, exp_rev)
tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
# categorical cannot be compared to Series or numpy array, and also
# not the other way around
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
def f():
cat > "b"
self.assertRaises(TypeError, f)
cat = Series(Categorical(list("abc"), ordered=False))
def f():
cat > "b"
self.assertRaises(TypeError, f)
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
self.assertRaises(TypeError, lambda: cat < "d")
self.assertRaises(TypeError, lambda: cat > "d")
self.assertRaises(TypeError, lambda: "d" < cat)
self.assertRaises(TypeError, lambda: "d" > cat)
self.assert_series_equal(cat == "d", Series([False, False, False]))
self.assert_series_equal(cat != "d", Series([True, True, True]))
# And test NaN handling...
cat = Series(Categorical(["a", "b", "c", np.nan]))
exp = Series([True, True, True, False])
res = (cat == cat)
tm.assert_series_equal(res, exp)
def test_cat_equality(self):
# GH 8938
# allow equality comparisons
a = Series(list('abc'), dtype="category")
b = Series(list('abc'), dtype="object")
c = Series(['a', 'b', 'cc'], dtype="object")
d = Series(list('acb'), dtype="object")
e = Categorical(list('abc'))
f = Categorical(list('acb'))
# vs scalar
self.assertFalse((a == 'a').all())
self.assertTrue(((a != 'a') == ~(a == 'a')).all())
self.assertFalse(('a' == a).all())
self.assertTrue((a == 'a')[0])
self.assertTrue(('a' == a)[0])
self.assertFalse(('a' != a)[0])
# vs list-like
self.assertTrue((a == a).all())
self.assertFalse((a != a).all())
self.assertTrue((a == list(a)).all())
self.assertTrue((a == b).all())
self.assertTrue((b == a).all())
self.assertTrue(((~(a == b)) == (a != b)).all())
self.assertTrue(((~(b == a)) == (b != a)).all())
self.assertFalse((a == c).all())
self.assertFalse((c == a).all())
self.assertFalse((a == d).all())
self.assertFalse((d == a).all())
# vs a cat-like
self.assertTrue((a == e).all())
self.assertTrue((e == a).all())
self.assertFalse((a == f).all())
self.assertFalse((f == a).all())
self.assertTrue(((~(a == e) == (a != e)).all()))
self.assertTrue(((~(e == a) == (e != a)).all()))
self.assertTrue(((~(a == f) == (a != f)).all()))
self.assertTrue(((~(f == a) == (f != a)).all()))
# non-equality is not comparable
self.assertRaises(TypeError, lambda: a < b)
self.assertRaises(TypeError, lambda: b < a)
self.assertRaises(TypeError, lambda: a > b)
self.assertRaises(TypeError, lambda: b > a)
def test_concat(self):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = pd.DataFrame({"cats": cat, "vals": vals})
cat2 = pd.Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = pd.DataFrame({"cats": cat2,
"vals": vals2}, index=pd.Index([0, 1, 0, 1]))
res = pd.concat([df, df])
tm.assert_frame_equal(exp, res)
# Concat should raise if the two categoricals do not have the same
# categories
cat3 = pd.Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_wrong_categories = pd.DataFrame({"cats": cat3, "vals": vals3})
def f():
pd.concat([df, df_wrong_categories])
self.assertRaises(ValueError, f)
# GH 7864
# make sure ordering is preserverd
df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"])
df['grade'].cat.set_categories(['e', 'a', 'b'])
df1 = df[0:3]
df2 = df[3:]
self.assert_numpy_array_equal(df['grade'].cat.categories,
df1['grade'].cat.categories)
self.assert_numpy_array_equal(df['grade'].cat.categories,
df2['grade'].cat.categories)
dfx = pd.concat([df1, df2])
dfx['grade'].cat.categories
self.assert_numpy_array_equal(df['grade'].cat.categories,
dfx['grade'].cat.categories)
def test_concat_preserve(self):
# GH 8641
# series concat not preserving category dtype
s = Series(list('abc'), dtype='category')
s2 = Series(list('abd'), dtype='category')
def f():
pd.concat([s, s2])
self.assertRaises(ValueError, f)
result = pd.concat([s, s], ignore_index=True)
expected = Series(list('abcabc')).astype('category')
tm.assert_series_equal(result, expected)
result = pd.concat([s, s])
expected = Series(
list('abcabc'), index=[0, 1, 2, 0, 1, 2]).astype('category')
tm.assert_series_equal(result, expected)
a = Series(np.arange(6, dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A': a,
'B': b.astype('category', categories=list('cab'))})
result = | pd.concat([df2, df2]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Part of slugdetection package
@author: <NAME>
github: dapolak
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from slugdetection import Data_Engineering
class Slug_Detection(Data_Engineering):
"""
Classifies intervals of pressure and temperature data as interval leading to a slug flow or intervals
leading to a normal flow.
Parameters
----------
well : Spark Data Frame
well data frame, includes continuous pressure and temperature data
"""
def __init__(self, well):
super().__init__(well)
def jump(self):
"""
Locates gap in the continuous data in the attribute pd_df, and groups and labels the continuous sets.
"""
assert hasattr(self, "pd_df"), "Pandas data frame pd_df attribute must exist"
assert not self.pd_df.empty, "Pandas data frame cannot be empty"
self.pd_df["time_diff"] = self.pd_df["ts"].diff() # Create time difference column
# Create 'jump' column, where the value is True if the time difference between the current and previous
# row is larger than one minute
self.pd_df["jump"] = np.where(self.pd_df["time_diff"] > pd.Timedelta('1 min'),
True,
False)
self.pd_df['count_id'] = self.pd_df["jump"].cumsum() # Group continuous data together by giving it a count id
def clean_short_sub(self, min_df_size=300):
"""
Deletes entries in pandas data frame attribute pd_df that are continuous in time for less than min_df_size
Parameters
----------
min_df_size : int (optional)
Minimum size of sub data frame that will be considered forward in the analysis (default is 300)
"""
assert hasattr(self, "pd_df"), "Pandas data frame pd_df attribute must exist"
assert not self.pd_df.empty, "Pandas data frame cannot be empty"
# Get count of individual count ids. This gives information about how many continuous sets of data exist
# within the pd_df attribute
counter = self.pd_df.groupby('count_id')['count_id'].count()
for i in range(0, len(counter)):
if counter.iloc[i] < min_df_size:
# Drop sets of continuous data that last for less than min_df_size minutes (default 300)
self.pd_df = self.pd_df[self.pd_df.count_id != i]
def sub_data(self, **kwargs):
"""
Creates a dictionary of sub data frame that are continuous on time for over the min_df_size variable
Parameters
----------
kwargs
min_df_size : int
For clean_short_sub method. Minimum size of sub data frame that will be considered forward in the analysis
"""
assert hasattr(self, "pd_df"), "Pandas data frame pd_df attribute must exist"
assert not self.pd_df.empty, "Pandas data frame cannot be empty"
self.sub_df_dict = {} # Create sub_df_dict attribute which will store sub data frames of continuous data
self.jump() # Group continuous data together
self.clean_short_sub() # Drop sets of continuous data that are too short (min_df_size)
if "min_df_size" in kwargs.keys():
self.clean_short_sub(min_df_size=kwargs["min_df_size"])
self.jump() # Get new, ordered count ids
# Fill sub_df_dict attribute with appropriate size continuous data sets
for i in range(self.pd_df["count_id"].max() + 1):
temp = pd.DataFrame()
mask = self.pd_df["count_id"] == i # Mask values that equate the current count id
temp = self.pd_df[mask].reset_index() # Reset indices
self.sub_df_dict[i] = temp # Save data frame to to dictionary
return
def slug_check(self, slug_idx, key,
dict_slug_def={"time" :[60, 240], "interval" :[25, 25], "value_diff" :[2.0, 2.0]}):
"""
From a list of indices of peaks in the data, return a list of of indices of first slug, based on
the time since the last slug peak, the time between slug peaks and the value difference between slug peaks
Parameters
----------
slug_idx : list of int
List of indices of all the peaks thought to be slugs occurring in the current data frame
in the sub_df_dict attribute
key : int
Index of current database in sub_df_dict
dict_slug_def : dict (optional
Dictionary of values to define slugs, such a minimum time sine last first slug, minimum interval between
two slug peaks, maximum WHP difference between peaks
Returns
-------
first_slug_idx : list of int
List of indices of first slug peaks for current data frame in the dictionary attribute sub_df_dict
"""
curr_df = self.sub_df_dict[key] # Get current data frame from sub_df_dict dictionary
first_slug_idx = [-300] # Create first_slug index list. Set first value to -300
# Variables to check if slugging period has been continuous
first = False # First slug has not occurred yet
last_slug = 0 # Index of last occurring slug
# From slug indices lis, create small lists of 3 consecutive slugs indices
slug_check = [slug_idx[i:i + 3] for i in range(0, len(slug_idx) - 2, 1)]
# loop through list of list of three indices
for idx in slug_check:
# if gap of 240 minutes since last first slug AND slugs haven't been continuous, move to next condition
if (abs(first_slug_idx[-1] - idx[0]) < dict_slug_def['time'][1]) | (
(abs(last_slug - idx[0]) < dict_slug_def['time'][0]) & first):
last_slug = idx[0]
continue # continue to next index list
# if less than 20 minutes between two slug, move to next condition
elif (abs(idx[0] - idx[1]) > dict_slug_def['interval'][0]) | (
abs(idx[1] - idx[0]) > dict_slug_def['interval'][1]):
continue # continue to next index list
# if less than 2 bar difference between slugs (we want similar flow pattern), move to next condition
elif (abs(curr_df["WH_P"].iloc[idx[0]] - curr_df["WH_P"].iloc[idx[1]]) > dict_slug_def['value_diff'][0]) | (
abs(curr_df["WH_P"].iloc[idx[1]] - curr_df["WH_P"].iloc[idx[2]]) > dict_slug_def['value_diff'][1]):
continue # continue to next index list
else:
if len(first_slug_idx) == 1:
first = True # Set first to True, when the first slug of the sub data frame occurs
first_slug_idx.append(idx[0]) # Store first_slug index value
first_slug_idx.pop(0) # Drop dummy value set in the first slug index list
return first_slug_idx
def label_slugs(self, slug_diff=3, **kwargs):
"""
Finds slug peaks in each data frame in the sub_df_dict attribute and creates the list of indices of slugs.
Uses the slug_check method to then compute the list of indices of first slugs occurences, per data frame in
the sub_df_dict attribute.
Parameters
----------
slug_diff : float (optional)
Minimum differential value above which an increase in WHP is assumed to be a slug peak (default is 3.0).
kwargs
dict_slug_def : dict
For slug_check method
Returns
-------
first_slug : list of list of int or list of list of datetime
List containing the sub list of first slug indices as computed by the method slug_check per sub data frames
in the dictionary attribute
slugs : list of list of int or list of list of datetime
List containing the sub list of slug peaks indices per sub data frames in the dictionary attribute
"""
assert hasattr(self, "sub_df_dict"), "Sub_df_dict attribute must have been created"
first_slug = [] # Create list to store first slug indices
slugs = [] # Create list to store slug peaks indices
for key in self.sub_df_dict:
curr_df = self.sub_df_dict[key].copy() # Get copy of current df out of
# Compute WHP gradient in new column
curr_df["WH_P_diff"] = curr_df["WH_P"].diff()
# Label increasing trends as True and decreasing trends as False
curr_df["trend"] = np.where(curr_df["WH_P_diff"] > 0, True, False)
# Give the consecutive sets of WHP trend an id
curr_df["categories"] = (curr_df["trend"] != curr_df["trend"].shift()).cumsum()
# Calculate the cumulative sum of the WHP trend at each id
curr_df["WH_P_diff_total"] = curr_df.groupby("categories")["WH_P_diff"].cumsum()
# Label slug peaks as large increases (> slug_diff) immediately followed by a decrease
curr_df["point_label"] = np.where((curr_df["WH_P_diff_total"] > slug_diff) &
(curr_df["WH_P_diff_total"].shift(periods=-1) < 0),
True,
False)
# Store indices of slug peaks to list
slug_index = curr_df.index[curr_df["point_label"] == True].tolist()
# From slug_check method, compute first slug indices
first = self.slug_check(slug_index, key)
if 'dict_slug_def' in kwargs.keys():
first = self.slug_check(slug_index, key, dict_slug_def=kwargs['dict_slug_def'])
first_slug.append(first) # Store first slug indices for current df to list
slugs.append(slug_index) # Store slug peaks indices for current df to list
return first_slug, slugs
def format_data(self, first_slug, size_list=240, max_clean_count=10000):
"""
Formats data for classification algorithms. Splits down the sub_df_dict attribute's data frames into size_list
sized data frame. Data frames containing first slugs, as labelled in the method label_slugs, are split right
before the occurrence of a first slug.
Parameters
----------
first_slug : list of list of int
List containing the sub list of first slug indices as computed by the method slug_check per sub data frames
in the dictionary attribute
size_list : int (optional)
Size of data frame to use for classification (default is 240)
max_clean_count : int (optional)
Maximum number of data frames to create that lead to a normal, clean, flow. This is known by whether the
first slug list is empty (default is 10000)
"""
self.label = np.array([]) # Create label array to store labels of data frames
self.df_list = [] # Create data frame list to store size_list sized data frames
cnt = 0 # Counter, for max_clean_count
for df, f in zip(self.sub_df_dict.values(), first_slug):
if not f: # If first slug list is empty (no slugs occurring in current data frame)
if len(df) >= size_list: # Check that data frame has enough data points
if cnt < max_clean_count:
# Drop last hour of data. It is not known if it would have led to a slug or normal flow
df = df[:-60]
# Compute number of splits that can be performed on the sub data frame
n_splits = int(np.ceil(len(df) / size_list))
if n_splits > 1:
# Compute the overlap value if any
overlap = int(np.ceil(((size_list * (n_splits - 1)) - (len(df) - size_list)) / (
n_splits - 1)))
else:
overlap = 0
# Add data frames of size size_list to df_list
self.df_list.extend([df[i: i + size_list] for i in range(0, (len(df) - (size_list - overlap)), (
size_list - overlap))])
# Append corresponding labels to label array
for i in range(0, (len(df) - (size_list - overlap)), (size_list - overlap)):
self.label = np.append(self.label, [0])
cnt += 1 # Count number of clean/normal intervals added
else: # If first slugs are present in current data frame
for first in f:
if first - size_list >= 0: # Check there's sufficient number of points before first slug
self.df_list.append(df[first - size_list: first]) # Add data frames to df_list
self.label = np.append(self.label, [1]) # Append corresponding labels to label array
return
def feature_vector(self, split_num=5, time_predict=60, percentage_significance=0.1, standardise=True):
"""
Transform classification data into feature vectors.
Parameters
----------
split_num : int (optional)
Number of time to split the data frame (default is 5)
time_predict : int (optional)
Number of minutes to ignore at the end of the interval. This number will also be the number of minutes the
classifier is trained to recognize a slug before it occurs (default is 60)
percentage_significance : float (optional)
For the significant feature, percentage value for which to state an increase/decrease in the data was
significant compare to the original value. Must be below 1 (default is 0.10)
standardise : bool (optional)
Whether to standardise the data or not (default is True)
"""
assert percentage_significance <= 1, "percentage_significance must be a decimal/percentage"
assert time_predict < len(self.df_list[0]), "Time to prediction before must be less than data frame size"
for k in range(len(self.df_list)):
clean_df = self.df_list[k].copy() # Get copy of current current data frame
if standardise: # standardise the data
clean_df = self.standardise(clean_df)
clean_df = clean_df[:-time_predict] # Ignore data within prediction time, default 1 hour
clean_df = clean_df.drop("WH_choke", axis=1) # Drop choke data (always opened)
self.features = ["WH_P", "DH_P", "WH_T", "DH_T"] # Update features list attribute
interval = int(len(clean_df) / split_num) # Get window size
clean = [] # Create empty list to store current data frames feature
if k == 0:
header = [] # Create empty list to store features' names
for i in range(split_num):
low = i * interval # Lower bound of interval
high = (i + 1) * interval # Upper bound of interval
for f in self.features:
if k == 0:
header.append(("mean_" + f + "_" + str(i))) # Store mean feature header name
header.append(("std_" + f + "_" + str(i))) # Sotre std feature header name
clean.append(clean_df[f][low:high].mean()) # Append interval's mean feature value
clean.append(clean_df[f][low:high].std()) # Append interval's std feature value
if k == 0:
self.X = pd.DataFrame([clean], columns=[*header]) # Create new data frame X to store feature vectors
else:
dic = dict(zip(header, clean))
self.X = self.X.append(dic, ignore_index=True) # Append data from current data frame to X attribute
for i in range(split_num - 1):
for f in self.features:
# Get delta mean feature
self.X["diff_mean_" + f + "_" + str(i) + "_" + str(i + 1)] = self.X["mean_" + f + "_" + str(i + 1)] - \
self.X["mean_" + f + "_" + str(i)]
# Get delta std feature
self.X["diff_std_" + f + "_" + str(i) + "_" + str(i + 1)] = self.X["std_" + f + "_" + str(i + 1)] - \
self.X["std_" + f + "_" + str(i)]
# Get mean trend feature (1 increase, 0 decrease)
self.X["diff_mean_trend_" + f + "_" + str(i) + "_" + str(i + 1)] = np.where(
self.X["diff_mean_" + f + "_" + str(i) + "_" + str(i + 1)] > 0, 1, 0)
# Get std trend feature
self.X["diff_std_trend_" + f + "_" + str(i) + "_" + str(i + 1)] = np.where(
self.X["diff_std_" + f + "_" + str(i) + "_" + str(i + 1)] > 0, 1, 0)
# Get significance delta feature
# Significant delat is a difference of more than percentage_significance, default 10%
self.X["diff_mean_signif_" + f + "_" + str(i) + "_" + str(i + 1)] = np.where(
self.X["diff_mean_" + f + "_" + str(i) + "_" + str(i + 1)] > self.X[
("mean_" + f + "_" + str(i))] * percentage_significance, 1, 0)
self.X["diff_std_signif_" + f + "_" + str(i) + "_" + str(i + 1)] = np.where(
self.X["diff_std_" + f + "_" + str(i) + "_" + str(i + 1)] > self.X[
("std_" + f + "_" + str(i))] * percentage_significance, 1, 0)
# Get count of significant of increases and count of significant decreases features
for f in self.features:
binary_mean_diff_col_names = []
binary_mean_trend_col_names = []
binary_std_diff_col_names = []
binary_std_trend_col_names = []
for i in range(split_num - 1):
binary_mean_trend_col_names.append("diff_mean_trend_" + f + "_" + str(i) + "_" + str(i + 1))
binary_mean_diff_col_names.append("diff_mean_signif_" + f + "_" + str(i) + "_" + str(i + 1))
binary_std_trend_col_names.append("diff_std_trend_" + f + "_" + str(i) + "_" + str(i + 1))
binary_std_diff_col_names.append("diff_std_signif_" + f + "_" + str(i) + "_" + str(i + 1))
# Count of significant increases
self.X["num_mean_" + f + "_sign_incr"] = self.X[binary_mean_diff_col_names].sum(axis=1).where(
self.X[binary_mean_trend_col_names] == 1, 0)
self.X["num_std_" + f + "_sign_incr"] = self.X[binary_std_diff_col_names].sum(axis=1).where(
self.X[binary_std_trend_col_names] == 1, 0)
# Count of significant decreases
self.X["num_mean_" + f + "_sign_decr"] = self.X[binary_mean_diff_col_names].sum(axis=1).where(
self.X[binary_mean_trend_col_names] == 0, 0)
self.X["num_std_" + f + "_sign_decr"] = self.X[binary_std_diff_col_names].sum(axis=1).where(
self.X[binary_std_trend_col_names] == 0, 0)
return
def split_data(self, test_size=0.3):
"""
Split the data into a training and testing set
Parameters
----------
test_size : float (optional)
Percentage of the data to include in the training set (default is 0.3)
"""
assert (test_size <= 1), "Test size must be a percentage, less than 1"
sss = StratifiedShuffleSplit(n_splits=2, test_size=test_size, random_state=0) # Instantiate Shuffle Split
sss.get_n_splits(self.X, self.label)
train_index, test_index = sss.split(self.X, self.label) # Split data
# Store train and test data into respective X and y attributes
self.X_train, self.X_test = self.X.iloc[train_index[0]], self.X.iloc[test_index[1]]
self.y_train, self.y_test = self.label[train_index[0]], self.label[test_index[1]]
return
def RF_train(self, n_estimators=15, max_depth=None, bootstrap=True, **kwargs):
"""
Train the Random Forest model
Parameters
----------
n_estimators : int
Number of Decision Trees in Random Forest model (optional, default is 15)
max_depth : int
Maximum depth of the Decisions Trees in Random Forest model (optional, default is None
bootstrap : bool
Whether bootstrap samples are used when building decision trees. If False, the whole datset is used to
build each tree in the Random Forest (optional, default is True)
kwargs
test_size : float
For split_data method, percentage repartition of testing data
"""
test_size = 0.3
if "test_size" in kwargs.keys():
test_size = kwargs["test_size"]
self.split_data(test_size=test_size)
# Instantiate RF model
self.rf = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth, bootstrap=bootstrap)
self.rf.fit(self.X_train, self.y_train) # Fit model to RF model
pred_features = self.rf.feature_importances_ # Get features scores
return pred_features
def feature_selection(self, feature_score, top_n=15):
"""
Selects the top_n most important features from the feature_score list
Parameters
----------
feature_score : list of floats
List of feature scores, in order. As produced by the RF_train method
top_n : int
Number of features to return
Returns
-------
top_feature : list of str
List containing the names of the top_n features
"""
assert (len(feature_score) == len(self.X.columns)), "Feature numbers must be the same in both lists"
from collections import namedtuple
feature_s = [] # Empty list to store feature score and feature name
Feature = namedtuple('Feature', 'name score') # New empty tuple format
for f, s in zip(self.X.columns, feature_score):
feature_s.append(Feature(f, s)) # Store feature score with feature names
feature_ranked = sorted(feature_s, key=lambda x: getattr(x, 'score'), reverse=True) # Sort features by score
top_feature = []
for i in range(top_n):
top_feature.append(feature_ranked[i][0]) # Get list of names of top top_n features
return top_feature
def RF_predict(self, true_label=True, **kwargs):
"""
Predict data labels from trained Random Forest model
Parameters
----------
true_label : bool (optional)
Whether the data used has true labels (default is True)
kwargs
X_test : Pandas DataFrame
For user, if additional data needs to be predicted. Data frame needs to be feature vector format
y-test : Pandas Series or Numpy array
For user, labels of inputted X_test data
Returns
-------
pred : array
prediction labels for the test data
score : float
Accuracy score of the predictions, if true_label = True
cf : array
Confusion matrix, as created by Scikit Learn's Confusion matrix on the accuracy of the results, if
true_label = True
"""
# If new data is passed to be classified
if "X_test" in kwargs.keys():
self.X_test = kwargs["X_test"]
pred = self.rf.predict(self.X_test) # Predict labels of data
if true_label: # If the true labels are known
if "y_test" in kwargs.keys():
self.y_test = kwargs["y_test"] # if new true labels are passed
assert (len(self.y_test) == len(self.X_test)), "X and y must be the same length"
# Compute prediction score and confusion matrix
score = self.rf.score(self.X_test, self.y_test)
cf = confusion_matrix(self.y_test, pred)
return pred, score, cf
else:
return pred
def LogReg_train(self, top_features=[], C=0.01, max_iter=50, split=True, **kwargs):
"""
Train Logistics Regression model
Parameters
----------
top_features : list of str (optional)
List of the top features names. If list not empty, the Logistics Regression model will only be trained using
the listed features. (default is [] meaning all features)
C : float (optional)
Regularization parameter for Logitics Regression (default is 0.01)
max_iter : int (optional)
Maximum iteration parameter for Logistics Regression (default is 50)
split : bool
True for new split, False to use same split as RF model was trained on
kwargs
test_size : float
For split_data method, percentage repartition of testing data
"""
# Split data into a training and testing set if new split is required
if split:
test_size = 0.3
if "test_size" in kwargs.keys():
test_size = kwargs["test_size"]
self.split_data(test_size=test_size)
if len(top_features) != 0: # If top features selection are passed
self.logreg_features = top_features # Store top features as an attribute
self.X_train = self.X_train[[*self.logreg_features]] # Update feature vector selection
# Instantiate and fit Logistics Regression model to training data
self.log = LogisticRegression(C=C, max_iter=max_iter)
self.log.fit(self.X_train, self.y_train)
return
def LogReg_pred(self, true_label=True, **kwargs):
"""
Predict data labels from trained Logistics Regression model
Parameters
----------
true_label : bool (optional)
Whether the data used has true labels (default is True)
kwargs
X_test : Pandas DataFrame
For user, if additional data needs to be predicted. Data frame needs to be feature vector format
y-test : Pandas Series or Numpy array
For user, labels of inputted X_test data
Returns
-------
pred : array
prediction labels for the test data
proba : array
prediction probababilities for the test data
score : float
Accuracy score of the predictions, if true_label = True
cf : array
Confusion matrix, as created by Scikit Learn's Confusion matrix on the accuracy of the results, if
true_label = True
"""
# If new data is passed to be classified
if "X_test" in kwargs.keys():
self.X_test = kwargs["X_test"]
# If only top features are used
if hasattr(self, "logreg_features"):
self.X_test = self.X_test[[*self.logreg_features]]
# Predict labels and probabilities of data
log_pred = self.log.predict(self.X_test)
proba = self.log.predict_proba(self.X_test)
if true_label: # If the true labels are known
if "y_test" in kwargs.keys():
self.y_test = kwargs["y_test"] # if new true labels are passed
# Compute prediction score and confusion matrix
score = self.log.score(self.X_test, self.y_test)
cf = confusion_matrix(self.y_test, log_pred)
return log_pred, proba, score, cf
else:
return log_pred, proba
def data_prep(self, **kwargs):
"""
Quick data preparation from raw pandas data frame to feature vectors
Parameters
----------
kwargs
slug_diff : float
Argument of label_slugs method. Minimum WHP differential value to be assumed a slug peak
size_list : int
Argument of format_data method. Size of data frame to use for classification
max_clean_count : int
Argument of format_data method. Maximum number of data frames to create that lead to a normal flow.
split_num : int
Argument of feature_vector method. Number of splits the data frame
time_predict : int
Argument of feature_vector method. Number of minutes before slug flow.
percentage_significance : float
Argument of feature_vector method. For the significant feature, percentage value for which to state
an increase/decrease in the data was significant compare to the original value.
standardise : bool
Argument of feature_vector method. Whether to standardise the data or not.
"""
assert hasattr(self, "pd_df"), "Pandas data frame pd_df attribute must exist"
assert not self.pd_df.empty, "Pandas data frame cannot be empty"
self.sub_data(**kwargs) # Split original data frame into smaller continuous data frames
slug_diff = 3
if "slug_diff" in kwargs.keys():
slug_diff = kwargs["slug_diff"]
first_idx, slug_idx = self.label_slugs(slug_diff=slug_diff) # Get first slug indices list
size_list = 300
if "size_list" in kwargs.keys():
size_list = kwargs["size_list"]
max_clean_count = 10000
if "max_clean_count" in kwargs.keys():
max_clean_count = kwargs["max_clean_count"]
# Format data into size_list (default 300) long data frames, with label list
self.format_data(first_idx, size_list=size_list, max_clean_count=max_clean_count)
# check for kwargs
split_num = 5
if "split_num" in kwargs.keys():
split_num = kwargs["window_size"]
time_predict = 60
if "time_predict" in kwargs.keys():
time_predict = kwargs["time_predict"]
percentage_significance = 0.1
if "percentage_significance" in kwargs.keys():
percentage_significance = kwargs["percentage_significance"]
standardise = True
if "standardise" in kwargs.keys():
standardise = kwargs["standardise"]
# Create data feature vectors
self.feature_vector(split_num=split_num, time_predict=time_predict,
percentage_significance=percentage_significance, standardise=standardise)
return
def plot_raw_slugs(self, variables=["WH_P", "DH_P"], scaled=True, n_examples=10, first_sample=10, **kwargs):
"""
Plotting functions to plot a set of n_examples raw sub_df_dict data frame, showing the slugs peaks and first
slugs value.
Parameters
----------
variables : list of str (optional)
Names of variables to be plotted (default is ["WH_P", "DH_P"])
scaled : bool (optional)
Whether to scale variables to WH_P (default is True)
n_examples : int (optional)
Number of examples to plot (default is 10)
first_sample : int (optional)
Index of the sub_df_dict to start plotting from (default is 10)
kwargs
slug_diff : float
Argument of label_slugs method. Minimum WHP differential value to be assumed a slug peak
Returns
-------
: Figure
raw slugs figure
"""
assert hasattr(self, "pd_df"), "Pandas data frame pd_df attribute must exist"
assert not self.pd_df.empty, "Pandas data frame cannot be empty"
for v in variables:
assert (v in self.pd_df.columns) # Assert variables name exist
x_list = [x for x in range(first_sample, first_sample + n_examples)] # Create list of data frames to plot
self.sub_data(**kwargs) # Split data frame into smaller continuous data frames
# check for kwargs
slug_diff = 3
if "slug_diff" in kwargs.keys():
slug_diff = kwargs["slug_diff"]
f, s = self.label_slugs(slug_diff=slug_diff) # Get first slug indexes
fig, ax = plt.subplots(n_examples, 1, figsize=(20, 20)) # Create plot with the n_examples
plt.tight_layout()
for i, x in enumerate(x_list):
for v in variables:
if (v != "WH_P") & scaled: # If variables are to be scaled to WHP
ax[i].plot(self.sub_df_dict[x]["ts"], self.sub_df_dict[x][v] - self.sub_df_dict[x][v][0] + 20, "-",
label=str(v))
else:
ax[i].plot(self.sub_df_dict[x]["ts"], self.sub_df_dict[x][v], "-", label=str(v))
# Plot slug peaks in red
ax[i].plot(self.sub_df_dict[x]["ts"][s[x]], self.sub_df_dict[x]["WH_P"][s[x]], "ro", label="Slug peaks")
# Plot first slug in magenta
ax[i].plot(self.sub_df_dict[x]["ts"][f[x]], self.sub_df_dict[x]["WH_P"][f[x]], "m*", markersize=20,
label="First slug")
# Plot start of interval in cyan
ax[i].plot(self.sub_df_dict[x]["ts"][f[x]] - | pd.Timedelta('5 h') | pandas.Timedelta |
import numpy as np
from numpy import where
from flask import Flask, request, jsonify, render_template
import pandas as pd
from sklearn.ensemble import IsolationForest
from pyod.models.knn import KNN
import json
from flask import send_from_directory
from flask import current_app
app = Flask(__name__)
class Detect:
def __init__(self, file, non_num):
self.file = file
self.non_num = non_num
def IQR(self):
# anomaly=pd.DataFrame()
data = pd.DataFrame(self.file)
non_num=pd.DataFrame(self.non_num)
data.dropna(axis=0,inplace=True)
# data=data.select_dtypes(include=['float64','int64'])
Q1 = data.quantile(0.25)
Q3 = data.quantile(0.75)
IQR = Q3 - Q1
IQR_Out = data[((data < (Q1 - 1.5 * IQR)) |(data > (Q3 + 1.5 * IQR))).any(axis=1)]
IQR_Out = non_num.join(IQR_Out, how='inner')
IQR_Out.to_csv(r'IQR_Outlier.csv')
# IQR Method
def isolation(self):
anomaly=pd.DataFrame()
data_n=pd.DataFrame(self.file)
non_num=pd.DataFrame(self.non_num)
data_n.dropna(axis=0,inplace=True)
# data_n=data_n.select_dtypes(include=['float64','int64'])
model = IsolationForest(n_estimators=50, max_samples=500, contamination=.01, bootstrap=False, n_jobs=1, random_state=1, verbose=0, warm_start=False).fit(data_n)
data_n['anomaly_score'] = model.predict(data_n)
anomaly=data_n[data_n['anomaly_score']==-1]
anomaly = non_num.join(anomaly, how='inner')
anomaly.to_csv("outlierss_isolation.csv")
# Isolation forest Method
def mcd(self):
anomaly=pd.DataFrame()
data= | pd.DataFrame(self.file) | pandas.DataFrame |
import pandas as pd
import tasks
from . import base
from models import TimeModel
from datetime import datetime, timedelta
from azrael import SnapchatReporter
from typing import Optional
class SnapchatReportFetcher(base.ReportFetcher[tasks.FetchSnapchatReportTask]):
api_start_date: Optional[datetime]
api_end_date: Optional[datetime]
def fetch(self):
self.task.api.ad_account_id = self.task.ad_account_id
self.task.campaigns = self.task.api.get_campaigns()
self.task.ad_squads = self.task.api.get_ad_squads()
reporter = SnapchatReporter(api=self.task.api)
self.api_start_date = reporter.clamped_date_in_account_timezone(
date=self.task.report_start_date,
now=TimeModel.shared.utc_now
)
self.api_end_date = reporter.clamped_date_in_account_timezone(
date=self.task.report_end_date + timedelta(days=1),
now=TimeModel.shared.utc_now
)
print(f'Actual start: {self.api_start_date}')
print(f'Actual end: {self.api_end_date}')
start_day_offest = (self.api_start_date - self.task.report_start_date).days
self.task.report_start_date = self.api_start_date - timedelta(days=start_day_offest, hours=self.api_start_date.hour)
self.task.report_end_date = self.api_end_date - timedelta(days=start_day_offest + 1, hours=self.api_end_date.hour)
class SnapchatCampaignsReportFetcher(SnapchatReportFetcher):
def fetch(self):
super().fetch()
if self.task.report_start_date > self.task.report_end_date:
return
reporter = SnapchatReporter(api=self.task.api)
df = pd.DataFrame()
for c in self.task.campaigns:
report = reporter.get_campaign_stats(
campaign_id=c['id'],
start_date=self.api_start_date,
end_date=self.api_end_date,
columns=self.task.fetch_columns,
swipe_up_attribution_window=self.task.swipe_up_attribution_window,
view_attribution_window=self.task.view_attribution_window
)
df = df.append(report, sort=False)
df[self.task.fetched_currency_column] = self.task.api.ad_account['currency']
self.task.report = df
class SnapchatAdSquadsReportFetcher(SnapchatReportFetcher):
def fetch(self):
super().fetch()
if self.task.report_start_date > self.task.report_end_date:
return
reporter = SnapchatReporter(api=self.task.api)
df = pd.DataFrame()
for c in self.task.campaigns:
report = reporter.get_adsquad_stats(
campaign_id=c['id'],
start_date=self.api_start_date,
end_date=self.api_end_date,
columns=self.task.fetch_columns,
swipe_up_attribution_window=self.task.swipe_up_attribution_window,
view_attribution_window=self.task.view_attribution_window
)
df = df.append(report, sort=False)
df[self.task.fetched_currency_column] = self.task.api.ad_account['currency']
self.task.report = df
class SnapchatAdsReportFetcher(SnapchatReportFetcher):
def fetch(self):
super().fetch()
if self.task.report_start_date > self.task.report_end_date:
return
self.task.ads = self.task.api.get_ads()
reporter = SnapchatReporter(api=self.task.api)
df = | pd.DataFrame() | pandas.DataFrame |
import datetime
import os
import numpy as np
import pandas as pd
import us_state_abbrev
def LoadAllJhuData(path=None):
if not path:
path = os.path.join(os.path.dirname(__file__),
'COVID-19/csse_covid_19_data/csse_covid_19_daily_reports')
all_dfs = {}
for f in sorted(os.listdir(path)):
full_path = os.path.join(path, f)
if not os.path.isfile(full_path):
continue
if not f.endswith('.csv'):
continue
df = | pd.read_csv(full_path) | pandas.read_csv |
#encoding=utf-8
import numpy as np
import pandas as pd
from activation_functions import one_hot
from build_nn import BasePath, L_layer_model,predict
import pickle
def get_data_from_kaggle(filePath):
"""
:param filePath:
:return:
X -- input data, shape of (number of features, number of examples)
Y -- the true label of X, shape of (number of class, number of examples)
"""
my_frame = | pd.read_csv(filePath) | pandas.read_csv |
# -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import numpy as np
import pandas as pd
import platform
import unittest
from itertools import combinations, combinations_with_replacement, product
from numba.core.config import IS_32BITS
from numba.core.errors import TypingError
from sdc.tests.test_base import TestCase
from sdc.tests.test_utils import (skip_numba_jit,
_make_func_from_text,
gen_frand_array)
def _make_func_use_binop1(operator):
func_text = "def test_impl(A, B):\n"
func_text += " return A {} B\n".format(operator)
return _make_func_from_text(func_text)
def _make_func_use_binop2(operator):
func_text = "def test_impl(A, B):\n"
func_text += " A {} B\n".format(operator)
func_text += " return A\n"
return _make_func_from_text(func_text)
def _make_func_use_method_arg1(method):
func_text = "def test_impl(A, B):\n"
func_text += " return A.{}(B)\n".format(method)
return _make_func_from_text(func_text)
class TestSeries_ops(TestCase):
def test_series_operators_int(self):
"""Verifies using all various Series arithmetic binary operators on two integer Series with default indexes"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.int32),
np.ones(n + 3, dtype=np.int32),
np.random.randint(-5, 5, n + 7)]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data_left, data_right in combinations_with_replacement(data_to_test, 2):
# integers to negative powers are not allowed
if (operator == '**' and np.any(data_right < 0)):
data_right = np.abs(data_right)
with self.subTest(left=data_left, right=data_right, operator=operator):
S1 = pd.Series(data_left)
S2 = pd.Series(data_right)
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2), check_dtype=False)
def test_series_operators_int_scalar(self):
"""Verifies using all various Series arithmetic binary operators
on an integer Series with default index and a scalar value"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.int32),
np.ones(n + 3, dtype=np.int32),
np.random.randint(-5, 5, n + 7)]
scalar_values = [1, -1, 0, 3, 7, -5]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data, scalar, swap_operands in product(data_to_test, scalar_values, (False, True)):
S = pd.Series(data)
left, right = (S, scalar) if swap_operands else (scalar, S)
# integers to negative powers are not allowed
if (operator == '**' and np.any(right < 0)):
right = abs(right)
with self.subTest(left=left, right=right, operator=operator):
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(left, right), test_impl(left, right), check_dtype=False)
def test_series_operators_float(self):
"""Verifies using all various Series arithmetic binary operators on two float Series with default indexes"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.float32),
np.ones(n + 3, dtype=np.float32),
np.random.ranf(n + 7)]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data_left, data_right in combinations_with_replacement(data_to_test, 2):
with self.subTest(left=data_left, right=data_right, operator=operator):
S1 = pd.Series(data_left)
S2 = pd.Series(data_right)
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2), check_dtype=False)
def test_series_operators_float_scalar(self):
"""Verifies using all various Series arithmetic binary operators
on a float Series with default index and a scalar value"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.float32),
np.ones(n + 3, dtype=np.float32),
np.random.ranf(n + 7)]
scalar_values = [1., -1., 0., -0., 7., -5.]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data, scalar, swap_operands in product(data_to_test, scalar_values, (False, True)):
S = pd.Series(data)
left, right = (S, scalar) if swap_operands else (scalar, S)
with self.subTest(left=left, right=right, operator=operator):
pd.testing.assert_series_equal(hpat_func(S, scalar), test_impl(S, scalar), check_dtype=False)
@skip_numba_jit('Not implemented in new-pipeline yet')
def test_series_operators_inplace(self):
arithmetic_binops = ('+=', '-=', '*=', '/=', '//=', '%=', '**=')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = self.jit(test_impl)
# TODO: extend to test arithmetic operations between numeric Series of different dtypes
n = 11
A1 = pd.Series(np.arange(1, n, dtype=np.float64), name='A')
A2 = A1.copy(deep=True)
B = pd.Series(np.ones(n - 1), name='B')
hpat_func(A1, B)
test_impl(A2, B)
pd.testing.assert_series_equal(A1, A2)
@skip_numba_jit('Not implemented in new-pipeline yet')
def test_series_operators_inplace_scalar(self):
arithmetic_binops = ('+=', '-=', '*=', '/=', '//=', '%=', '**=')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = self.jit(test_impl)
# TODO: extend to test arithmetic operations between numeric Series of different dtypes
n = 11
S1 = pd.Series(np.arange(1, n, dtype=np.float64), name='A')
S2 = S1.copy(deep=True)
hpat_func(S1, 1)
test_impl(S2, 1)
pd.testing.assert_series_equal(S1, S2)
@skip_numba_jit('operator.neg for SeriesType is not implemented in yet')
def test_series_operator_neg(self):
def test_impl(A):
return -A
hpat_func = self.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_operators_comp_numeric(self):
"""Verifies using all various Series comparison binary operators on two integer Series with various indexes"""
n = 11
data_left = [1, 2, -1, 3, 4, 2, -3, 5, 6, 6, 0]
data_right = [3, 2, -2, 1, 4, 1, -5, 6, 6, 3, -1]
dtype_to_index = {'None': None,
'int': np.arange(n, dtype='int'),
'float': np.arange(n, dtype='float'),
'string': ['aa', 'aa', '', '', 'b', 'b', 'cccc', None, 'dd', 'ddd', None]}
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for dtype, index_data in dtype_to_index.items():
with self.subTest(operator=operator, index_dtype=dtype, index=index_data):
A = pd.Series(data_left, index=index_data)
B = pd.Series(data_right, index=index_data)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_operators_comp_numeric_scalar(self):
"""Verifies using all various Series comparison binary operators on an integer Series and scalar values"""
S = pd.Series([1, 2, -1, 3, 4, 2, -3, 5, 6, 6, 0])
scalar_values = [2, 2.0, -3, np.inf, -np.inf, np.PZERO, np.NZERO]
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for scalar in scalar_values:
with self.subTest(left=S, right=scalar, operator=operator):
pd.testing.assert_series_equal(hpat_func(S, scalar), test_impl(S, scalar))
def test_series_operators_comp_str_scalar(self):
"""Verifies using all various Series comparison binary operators on an string Series and scalar values"""
S = pd.Series(['aa', 'aa', '', '', 'b', 'b', 'cccc', None, 'dd', 'ddd', None])
scalar_values = ['a', 'aa', 'ab', 'ba', '']
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for scalar in scalar_values:
with self.subTest(left=S, right=scalar, operator=operator):
pd.testing.assert_series_equal(hpat_func(S, scalar), test_impl(S, scalar))
@skip_numba_jit
def test_series_operators_inplace_array(self):
def test_impl(A, B):
A += B
return A
hpat_func = self.jit(test_impl)
n = 11
A = np.arange(n)**2.0 # TODO: use 2 for test int casting
B = pd.Series(np.ones(n))
np.testing.assert_array_equal(hpat_func(A.copy(), B), test_impl(A, B))
@skip_numba_jit('Functionally test passes, but in old-style it checked fusion of parfors.\n'
'TODO: implement the same checks in new-pipeline')
def test_series_fusion1(self):
def test_impl(A, B):
return A + B + 1
hpat_func = self.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
# self.assertEqual(count_parfor_REPs(), 1)
@skip_numba_jit('Functionally test passes, but in old-style it checked fusion of parfors.\n'
'TODO: implement the same checks in new-pipeline')
def test_series_fusion2(self):
def test_impl(A, B):
S = B + 2
if A.iat[0] == 0:
S = A + 1
return S + B
hpat_func = self.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
# self.assertEqual(count_parfor_REPs(), 3)
def test_series_operator_add_numeric_scalar(self):
"""Verifies Series.operator.add implementation for numeric series and scalar second operand"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
dtype_to_index = {'None': None,
'int': np.arange(n, dtype='int'),
'float': np.arange(n, dtype='float'),
'string': ['aa', 'aa', 'b', 'b', 'cccc', 'dd', 'ddd']}
int_scalar = 24
for dtype, index_data in dtype_to_index.items():
with self.subTest(index_dtype=dtype, index=index_data):
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n, dtype=np.int64), index=index_data)
else:
A = pd.Series(np.arange(n), index=index_data)
result = hpat_func(A, int_scalar)
result_ref = test_impl(A, int_scalar)
pd.testing.assert_series_equal(result, result_ref, check_dtype=False, check_names=False)
float_scalar = 24.0
for dtype, index_data in dtype_to_index.items():
with self.subTest(index_dtype=dtype, index=index_data):
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n, dtype=np.int64), index=index_data)
else:
A = pd.Series(np.arange(n), index=index_data)
ref_result = test_impl(A, float_scalar)
result = hpat_func(A, float_scalar)
pd.testing.assert_series_equal(result, ref_result, check_dtype=False, check_names=False)
def test_series_operator_add_numeric_same_index_default(self):
"""Verifies implementation of Series.operator.add between two numeric Series
with default indexes and same size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(dtypes_to_test, 2):
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.arange(n), dtype=dtype_left)
B = pd.Series(np.arange(n)**2, dtype=dtype_right)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
@skip_numba_jit
def test_series_operator_add_numeric_same_index_numeric(self):
"""Verifies implementation of Series.operator.add between two numeric Series
with the same numeric indexes of different dtypes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(dtypes_to_test, 2):
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.arange(n), index=np.arange(n, dtype=dtype_left))
B = pd.Series(np.arange(n)**2, index=np.arange(n, dtype=dtype_right))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
def test_series_operator_add_numeric_same_index_numeric_fixme(self):
""" Same as test_series_operator_add_same_index_numeric but with w/a for the problem.
Can be deleted when the latter is fixed """
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
index_dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(index_dtypes_to_test, 2):
# FIXME: skip the sub-test if one of the dtypes is float and the other is integer
if not (np.issubdtype(dtype_left, np.integer) and np.issubdtype(dtype_right, np.integer)
or np.issubdtype(dtype_left, np.float) and np.issubdtype(dtype_right, np.float)):
continue
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.arange(n), index=np.arange(n, dtype=dtype_left))
B = pd.Series(np.arange(n)**2, index=np.arange(n, dtype=dtype_right))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
def test_series_operator_add_numeric_same_index_str(self):
"""Verifies implementation of Series.operator.add between two numeric Series with the same string indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
A = pd.Series(np.arange(n), index=['a', 'c', 'e', 'c', 'b', 'a', 'o'])
B = pd.Series(np.arange(n)**2, index=['a', 'c', 'e', 'c', 'b', 'a', 'o'])
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_numeric_align_index_int(self):
"""Verifies implementation of Series.operator.add between two numeric Series with non-equal integer indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 11
index_A = [0, 1, 1, 2, 3, 3, 3, 4, 6, 8, 9]
index_B = [0, 1, 1, 3, 4, 4, 5, 5, 6, 6, 9]
np.random.shuffle(index_A)
np.random.shuffle(index_B)
A = pd.Series(np.arange(n), index=index_A)
B = pd.Series(np.arange(n)**2, index=index_B)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_numeric_align_index_str(self):
"""Verifies implementation of Series.operator.add between two numeric Series with non-equal string indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 11
index_A = ['', '', 'aa', 'aa', 'ae', 'ae', 'b', 'ccc', 'cccc', 'oo', 's']
index_B = ['', '', 'aa', 'aa', 'cc', 'cccc', 'e', 'f', 'h', 'oo', 's']
np.random.shuffle(index_A)
np.random.shuffle(index_B)
A = pd.Series(np.arange(n), index=index_A)
B = pd.Series(np.arange(n)**2, index=index_B)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
@skip_numba_jit('TODO: fix Series.sort_values to handle both None and '' in string series')
def test_series_operator_add_numeric_align_index_str_fixme(self):
"""Same as test_series_operator_add_align_index_str but with None values in string indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 11
index_A = ['', '', 'aa', 'aa', 'ae', 'b', 'ccc', 'cccc', 'oo', None, None]
index_B = ['', '', 'aa', 'aa', 'cccc', 'f', 'h', 'oo', 's', None, None]
np.random.shuffle(index_A)
np.random.shuffle(index_B)
A = pd.Series(np.arange(n), index=index_A)
B = pd.Series(np.arange(n)**2, index=index_B)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_numeric_align_index_other_dtype(self):
"""Verifies implementation of Series.operator.add between two numeric Series
with non-equal integer indexes of different dtypes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
A = pd.Series(np.arange(3*n), index=np.arange(-n, 2*n, 1, dtype=np.int64))
B = pd.Series(np.arange(3*n)**2, index=np.arange(0, 3*n, 1, dtype=np.float64))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_numeric_diff_series_sizes(self):
"""Verifies implementation of Series.operator.add between two numeric Series with different sizes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
size_A, size_B = 7, 25
A = pd.Series(np.arange(size_A))
B = pd.Series(np.arange(size_B)**2)
result = hpat_func(A, B)
result_ref = test_impl(A, B)
pd.testing.assert_series_equal(result, result_ref, check_dtype=False, check_names=False)
def test_series_operator_add_align_index_int_capacity(self):
"""Verifies implementation of Series.operator.add and alignment of numeric indexes of large size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 20000
np.random.seed(0)
index1 = np.random.randint(-30, 30, n)
index2 = np.random.randint(-30, 30, n)
A = pd.Series(np.random.ranf(n), index=index1)
B = pd.Series(np.random.ranf(n), index=index2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_align_index_str_capacity(self):
"""Verifies implementation of Series.operator.add and alignment of string indexes of large size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 2000
np.random.seed(0)
valid_ids = ['', 'aaa', 'a', 'b', 'ccc', 'ef', 'ff', 'fff', 'fa', 'dddd']
index1 = [valid_ids[i] for i in np.random.randint(0, len(valid_ids), n)]
index2 = [valid_ids[i] for i in np.random.randint(0, len(valid_ids), n)]
A = pd.Series(np.random.ranf(n), index=index1)
B = pd.Series(np.random.ranf(n), index=index2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_str_same_index_default(self):
"""Verifies implementation of Series.operator.add between two string Series
with default indexes and same size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
A = pd.Series(['a', '', 'ae', 'b', 'cccc', 'oo', None])
B = | pd.Series(['b', 'aa', '', 'b', 'o', None, 'oo']) | pandas.Series |
import pandas as pd # Library to read and write the data in structure format
import numpy as np # Library to deal with vector, array and matrices
import requests # Library to read APIs
import re # Library for regular expression
import json # Library to read and write JSON file
from bs4 import BeautifulSoup # Library for web scraping
####################################### APIs to be scrapped to getting real time Corna data ############################
moh_link = "https://www.mohfw.gov.in/"
url_state = "https://api.covid19india.org/state_district_wise.json"
data_data = "https://api.covid19india.org/data.json"
travel_history="https://api.covid19india.org/travel_history.json"
raw_data="https://api.covid19india.org/raw_data.json"
class COVID19India(object):
def __init__(self):
self.moh_url = moh_link # Ministry of Health and Family welfare website
self.url_state = url_state # districtwise data
self.data_url = data_data # All India data ==> Statewise data, test data, timeseries data etc
self.travel_history_url=travel_history # Travel history of Patient
self.raw_data_url=raw_data
def __request(self, url):
content=requests.get(url).json()
return content
def moh_data(self):
url = self.moh_url
df = pd.read_html(url)[-1].iloc[:-1]
del df['S. No.']
cols = df.columns.values.tolist()
return df
def statewise(self):
data=self.__request(self.data_url)
# delta=pd.DataFrame(data.get('key_values'))
statewise=pd.DataFrame(data.get('statewise'))
# statewise=pd.concat([pd.DataFrame(data.get('statewise')),pd.DataFrame([i.get('delta') for i in data.get('statewise')])],axis=1)
# del statewise["delta"]
cases_time_series=pd.DataFrame(data.get('cases_time_series'))
tested=pd.DataFrame(data.get('tested'))
return(statewise,cases_time_series,tested)
def state_district_data(self):
state_data = self.__request(self.url_state)
key1 = state_data.keys()
Values = []
for k in key1:
key2 = state_data[k]['districtData'].keys()
for k2 in key2:
c = list(state_data[k]['districtData'][k2].values())
v = [k, k2, c[0]]
Values.append(v)
state_data = | pd.DataFrame(Values,columns=['State_UT', 'District', 'Confirmed']) | pandas.DataFrame |
import os
from math import ceil
from concurrent.futures import ProcessPoolExecutor
import pandas as pd
class FileUtils:
ALLOWED_EXTENSIONS = ['csv', 'xls', 'xlsx', 'zip']
@staticmethod
def read_parallel(paths, workers=4, concat=True, **read_options):
""" Concat the dataframes using multiple proccess """
dim = ceil(len(paths) / workers)
chunks = (paths[k: k + dim] for k in range(0, len(paths), dim))
temp = []
with ProcessPoolExecutor(max_workers=workers) as executor:
futures = [
executor.submit(
FileUtils.read_chunk, chunk, **read_options) for chunk in chunks
]
for future in futures:
temp.append(future.result())
if concat:
temp = | pd.concat(temp) | pandas.concat |
# -*- coding: utf-8 -*-
import sys
import os
from pandas.io import pickle
# import pandas as pd
PROJECT_ID = "dots-stock" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
USER = "shkim01" # <---CHANGE THIS
BUCKET_NAME = "gs://pipeline-dots-stock" # @param {type:"string"}
PIPELINE_ROOT = f"{BUCKET_NAME}/pipeline_root/{USER}"
from typing import NamedTuple
from kfp import dsl
from kfp.v2 import compiler
from kfp.v2.dsl import (Artifact,
Dataset,
Input,
Model,
Output,
Metrics,
ClassificationMetrics,
component)
from kfp.v2.google.client import AIPlatformClient
@component(
# base_image='gcr.io/dots-stock/py38-pandas-cal',
base_image="gcr.io/dots-stock/python-img-v5.2"
)
def set_defaults()-> NamedTuple(
'Outputs',
[
('date_ref',str),
('n_days', int),
('period_extra', int)
]):
import pandas as pd
from trading_calendars import get_calendar
today = | pd.Timestamp.now('Asia/Seoul') | pandas.Timestamp.now |
import pandas as pd
import pyodbc
def query_to_df(sql_file, driver='upiqm110'):
# Read SQL
sql_path = 'sql/{}.sql'.format(sql_file)
sql_query = open(sql_path).read()
# Connection
print('Connecting to database ... ')
con = pyodbc.connect('DSN={driver}'.format(driver=driver))
return | pd.read_sql_query(sql_query, con) | pandas.read_sql_query |
#!/usr/bin/env python
# coding: utf-8
# # Feature_Selection
# - **Having irrelevant features in your data can decrease the accuracy of the models and makes your models learn based on irrelevant**
# ## Defination
# **Feature Selection** :
#
# - Process of selecting the best features which contribute maximum for the model in order to get best result in term of accuracy or it should take less time for traning .
#
# - Feature selection methods are intended to reduce the number of input variables to those that are believed to be most useful to a model in order to predict the target variable.
#
#
# **Benefits of Performing Feature-Selection :**
#
# 1. Reduce Overfitting
# 2. Improve Accuracy
# 3. Reduce Traning Time
#
# In[3]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
# In[4]:
pwd
# In[5]:
path='E:\\DataScience\\MachineLearning\\Breast-cancer-detection-using-ML'
# In[6]:
import os
os.listdir(path)
# In[7]:
#reading data
df = | pd.read_csv(path+"\\data.csv") | pandas.read_csv |
'''
Name:HenonMapDataGen
Desriptption: It is used to generate the data of modified Henon Map
Email: <EMAIL>
OpenSource: https://github.com/yesunhuang
Msg: For quantum recurrrent neural networks
Author: YesunHuang
Date: 2022-03-26 20:45:29
'''
#import everything
import pandas as pd
import numpy as np
import torch
import os
try:
from DataGenerator.SequenceDataLoader import SeqDataLoader
except:
from SequenceDataLoader import SeqDataLoader
class HenonMapDataGen:
'''Generate data of modified Henon Map'''
def __init__(self, seed:list=[],\
n:int=1,a:float=1.4,b:float=0.3,\
heavyMem:bool=True,bound:bool=-1.2,\
savepath:str=os.getcwd()):
'''
name: __init__
fuction: initialize the Henon map
param {seed}: seed for generation
param {n}: interval
param {a}: Henon value a
param {b}: Henon value b
param {HeavyMem}: if using a heavy memory
param {bound}: bound of the data
param {savepath}: path to save the data
'''
self.heavyMem=heavyMem
self.interval=n
if len(seed)==0:
self.random_seed()
else:
self.__seed=seed
if self.heavyMem:
assert len(self.__seed)==n+1,'invalid seed!'
else:
assert len(self.__seed)==2*n,'invalid seed!'
self.paramA=a
self.paramB=b
self.HenonFunc=lambda X1,X0:1-self.paramA*X1*X1+self.paramB*X0
self.savepath=savepath
self.bound=bound
self.__X=[]
self.__Y=[]
def __call__(self, size:int):
'''
name:__call__
fuction: generate the Henon data
param {size}: size of the data
return {X,Y}: tuple of list in the form (X,Y)
'''
self.clear_data()
self.__X=self.__X+self.__seed
self.__Y=self.__Y+[0.0]*self.interval
if self.heavyMem:
assert size>len(self.__seed), 'size not enough!'
for i in range(self.interval,size):
Y_next=self.HenonFunc(self.__X[i],self.__X[i-self.interval])
if self.interval>1:
self.__Y.append(max(Y_next,self.bound))
else:
self.__Y.append(Y_next)
self.__X.append(self.__Y[i])
self.__X.pop()
else:
assert size>len(self.__seed)+self.interval, 'size not enough'
for i in range(self.interval,size):
Y_next=self.HenonFunc(self.__X[i],self.__X[i-self.interval])
self.__Y.append(max(Y_next,self.bound))
if i+self.interval<size:
self.__X.append(self.__Y[i])
return np.array(self.__X),np.array(self.__Y)
def random_seed(self):
'''
name: random_seed
function: random the seed
return {seed}
'''
if self.heavyMem:
self.__seed=[np.random.rand()*0.1 for i in range(self.interval+1)]
else:
self.__seed=[np.random.rand()*0.1 for i in range(2*self.interval)]
return self.__seed
@property
def seed(self):
'''
name: seed
function: get the seed
return {seed}
'''
return self.__seed
def save_to_CSV(self,fileName:str):
'''
name: save_to_CSV
function: save the data to csv file
param {fileName}: name of the file
'''
path=os.path.join(self.savepath,fileName)
data=pd.DataFrame({'X':self.__X,'Y':self.__Y,\
'interval':self.interval,\
'paramA':self.paramA,'paramB':self.paramB,\
'bound':self.bound,'heavyMem':self.heavyMem})
data.to_csv(path,index=False)
def read_from_CSV(self,fileName:str):
'''
name: read_from_CSV
function: read the data from csv file
param {fileName}: name of the file
'''
path=os.path.join(self.savepath,fileName)
data= | pd.read_csv(path) | pandas.read_csv |
from collections import namedtuple
from jug import TaskGenerator, bvalue
import ena
from cleanup import cleanup_metadata
from jug.hooks import exit_checks
exit_checks.exit_if_file_exists('jug.exit')
cleanup_metadata = TaskGenerator(cleanup_metadata)
get_sample_xml = TaskGenerator(ena.get_sample_xml)
get_data_xml = TaskGenerator(ena.get_data_xml)
parse_experiment_meta = TaskGenerator(ena.parse_experiment_meta)
Project = namedtuple('Project', ['accession', 'samples'])
PROJECT_DATA_URL = 'http://www.ebi.ac.uk/ena/data/warehouse/search?' + \
'query="library_source="METAGENOMIC""&result=read_study&download=xml&display=xml'
PROJECTS_DATA_FILE = 'data/project-list.xml'
SAMPLE_BY_TAXID_FILE = 'data/sample-408169-table.tsv'
@TaskGenerator
def download_project_data():
'''Download reads with library_source = METAGENOMIC'''
import requests
from contextlib import closing
from os import makedirs
makedirs('data', exist_ok=True)
with closing(requests.get(PROJECT_DATA_URL, stream=True)) as ifile, \
open(PROJECTS_DATA_FILE, 'wb') as ofile:
for chunk in ifile.iter_content():
ofile.write(chunk)
return PROJECTS_DATA_FILE
@TaskGenerator
def download_sample_408169_table():
'''Download samples by TaxID below 408169 (which is "metagenome")'''
import requests
from contextlib import closing
from os import makedirs
from ena import ENA_BASE_URL
makedirs('data', exist_ok=True)
url = ENA_BASE_URL + 'data/warehouse/search?query=%22tax_tree(408169)%22&limit=522911&length=522911&offset=1&display=report&result=sample&fields=accession,secondary_sample_accession,first_public,tax_id,scientific_name,sample_alias&download=txt'
with closing(requests.get(url, stream=True)) as ifile, \
open(SAMPLE_BY_TAXID_FILE, 'wb') as ofile:
for chunk in ifile.iter_content():
ofile.write(chunk)
return SAMPLE_BY_TAXID_FILE
@TaskGenerator
def getprojects(datafile):
import xml.etree.ElementTree as ET
tree = ET.parse(datafile)
root = tree.getroot()
projects = []
for it in root.iter('PROJECT'):
accession = it.get('accession')
for lk in it.findall('PROJECT_LINKS/PROJECT_LINK/XREF_LINK'):
d = lk.find('DB')
if d.text == 'ENA-SAMPLE':
projects.append(Project(accession, lk.find('ID').text))
return projects
@TaskGenerator
def projectreads(pr):
return ena.get_project_reads_table(pr.accession)
@TaskGenerator
def astable(data):
import numpy as np
import pandas as pd
from six import StringIO
from collections import Counter
from pdutils import pdselect
table = pd.concat([pd.read_table(StringIO(d)) for d in data])
repeats = [k for k,v in Counter(table['run_accession']).items() if v > 1]
for r in repeats:
r = pdselect(table, run_accession=r)
if not np.all( (r == r.iloc[0]) | r.isnull() ):
raise ValueError("Runs with same ID have different data")
table.drop_duplicates(subset='run_accession', inplace=True)
table.index = table.run_accession
table.drop('run_accession', inplace=True, axis=1)
return table
@TaskGenerator
def experiment_table(metamerged_ex):
import pandas as pd
from collections import Counter
hasdata = Counter()
for k,vs in metamerged_ex.items():
hasdata.update(vs.keys())
used = set(k for k,v in hasdata.items() if v >=100)
for k,vs in metamerged_ex.items():
for k in list(vs.keys()):
if k not in used:
del vs[k]
m = pd.DataFrame.from_dict(metamerged_ex, orient='index')
hasdata = m.shape[0] - m.isnull().sum()
hasdata.sort_values(inplace=True)
m = m[hasdata.index[::-1]]
return m
@TaskGenerator
def samples_with_wgs(table_ex):
library = table_ex[['sample_accession', 'LIBRARY_STRATEGY', 'LIBRARY_SOURCE', 'LIBRARY_SELECTION']]
library = library.query('LIBRARY_STRATEGY == "WGS"').query('LIBRARY_SOURCE != "GENOMIC"')
return set(library.sample_accession)
@TaskGenerator
def parsei(ptable):
import pandas as pd
from six import StringIO
return pd.read_table(StringIO(ptable)).groupby('instrument_model').count()
@TaskGenerator
def parse_sample_meta(data):
from six import StringIO
return ena.parse_sample_meta(StringIO(data))
@TaskGenerator
def mergedicts(ds):
merged = {}
for d in ds:
merged.update(d)
return merged
@TaskGenerator
def merge_metadatable(metamerged, selected_samples):
import pandas as pd
print("MISSING: ", sum(1 for k in selected_samples if k not in metamerged))
metasample = | pd.DataFrame({k:metamerged[k] for k in selected_samples if k in metamerged}) | pandas.DataFrame |
import json
from os import path
import time
import typing
import random
import sys
import itertools
import warnings
import numpy as np
import tqdm
from lazy import lazy
import pandas as pd
from docopt import docopt
import multiprocessing as mp
from multiprocessing import Pool
from rdkit import RDLogger
RDLogger.DisableLog('rdApp.info')
from utils import rd_filters
THIS_FILE_DIR = path.dirname(__file__)
class QualityFiltersCheck:
"""
These are the Quality Filters proposed in the GuacaMol paper, which try to rule out " compounds which are
potentially unstable, reactive, laborious to synthesize, or simply unpleasant to the eye of medicinal chemists."
The filter rules are from the GuacaMol supplementary material: https://pubs.acs.org/doi/10.1021/acs.jcim.8b00839
The filter code is from: https://github.com/PatWalters/rd_filters
Parts of the code below have been taken from the script in this module. This code put in this
class came with this MIT Licence:
MIT License
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
def __init__(self, training_data_smi: typing.List[str]):
alert_file_name = path.join(THIS_FILE_DIR, 'rd_filters_data/alert_collection.csv')
self.rf = rd_filters.RDFilters(alert_file_name)
rules_file_path = path.join(THIS_FILE_DIR, 'rd_filters_data/rules.json')
rule_dict = rd_filters.read_rules(rules_file_path)
rule_list = [x.replace("Rule_", "") for x in rule_dict.keys() if x.startswith("Rule") and rule_dict[x]]
rule_str = " and ".join(rule_list)
print(f"Using alerts from {rule_str}", file=sys.stderr)
self.rf.build_rule_list(rule_list)
self.rule_dict = rule_dict
self.training_data_smi = training_data_smi
@lazy
def _training_data_prop(self):
training_data_quality_filters = self.call_on_smiles_no_normalization(self.training_data_smi)
print(f"Training data filters returned {training_data_quality_filters}. Rest normalized on this.")
return training_data_quality_filters
def call_on_smiles_no_normalization(self, smiles: typing.List[str]):
num_cores = 10
print(f"using {num_cores} cores", file=sys.stderr)
start_time = time.time()
p = Pool(mp.cpu_count())
num_smiles_in = len(smiles)
input_data = [(smi, f"MOL_{i}") for i, smi in enumerate(smiles)]
res = list(p.map(self.rf.evaluate, input_data))
df = pd.DataFrame(res, columns=["SMILES", "NAME", "FILTER", "MW", "LogP", "HBD", "HBA", "TPSA", "Rot"])
df_ok = df[
(df.FILTER == "OK") &
df.MW.between(*self.rule_dict["MW"]) &
df.LogP.between(*self.rule_dict["LogP"]) &
df.HBD.between(*self.rule_dict["HBD"]) &
df.HBA.between(*self.rule_dict["HBA"]) &
df.TPSA.between(*self.rule_dict["TPSA"]) &
df.TPSA.between(*self.rule_dict["Rot"])
]
num_input_rows = df.shape[0]
num_output_rows = df_ok.shape[0]
fraction_passed = "{:.1f}".format(num_output_rows / num_input_rows * 100.0)
print(f"{num_output_rows} of {num_input_rows} passed filters {fraction_passed}%", file=sys.stderr)
elapsed_time = "{:.2f}".format(time.time() - start_time)
print(f"Elapsed time {elapsed_time} seconds", file=sys.stderr)
p.close()
return (num_output_rows / num_smiles_in)
def check_smiles_pass_quality_filters_flag(self, smiles: typing.List[str]):
num_cores = 10
print(f"using {num_cores} cores", file=sys.stderr)
start_time = time.time()
p = Pool(mp.cpu_count())
num_smiles_in = len(smiles)
input_data = [(smi, f"MOL_{i}") for i, smi in enumerate(smiles)]
res = list(p.map(self.rf.evaluate, input_data))
df = | pd.DataFrame(res, columns=["SMILES", "NAME", "FILTER", "MW", "LogP", "HBD", "HBA", "TPSA", "Rot"]) | pandas.DataFrame |
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import sys, os
import pandas.io.sql as psql
import psycopg2 as pg
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
from pandas.core.frame import DataFrame
import json
import math
# Connect to database
conn = pg.connect(host='192.168.61.4', database='summer', user='postgres')
#Load the relationship between problem_id(id) and chanllenge_id(challenge_id)
df_problems = pd.read_sql_query('select * from problems_multiple_choice_problem', con=conn)
#Load the relationship between challenge_id(id) and quest_id(quest_id)
df_challenges = pd.read_sql_query('select * from content_challenge', con=conn)
#Load the relationship between quest_id(id) and order(order)
df_quests = pd.read_sql_query('select * from content_quest', con=conn)
quad_list = []
for row_index, row in df_problems.iterrows():
quad = {}
if not math.isnan(row['challenge_id']):
quad['problem_id'] = row['id']
quad['challenge_id'] = int(row['challenge_id'])
quad_list.append(quad)
for item in quad_list:
for row_index, row in df_challenges.iterrows():
if (item['challenge_id'] == row['id']) & (not math.isnan(row['quest_id'])):
item['quest_id'] = int(row['quest_id'])
# In[2]:
# print(quad_list)
quad_list1 = []
for item in quad_list:
if len(item) == 3:
quad_list1.append(item)
for item in quad_list1:
for row_index, row in df_quests.iterrows():
if item['quest_id'] == row['id']:
item['order'] = int(row['order'])
# In[3]:
df_quad = | pd.DataFrame(quad_list1) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import deflex as dflx
from holoviews_sankey import create_and_save_sankey
path = "/home/uwe/deflex/quarree100/results_cbc/"
dump = "2018-DE02-Agora.dflx"
deflx = os.path.join(path, dump)
all_results = dflx.fetch_deflex_result_tables(deflx)
# From Commodities to ...
comm = all_results["commodity"]
FC = np.sum(comm.xs("fuel converter", level=4, axis=1)) # All fuel converters
ComoilMob = FC[0]
# From Electricity bus
elect = all_results["electricity"]
EbusMob = np.sum(np.sum(elect.xs("fuel converter", level=4, axis=1)))
# Create df (From, To, value)
d = (
["From El Bus", "Mobility Demand", EbusMob],
["Oil", "Mobility Demand", ComoilMob],
)
df = | pd.DataFrame(data=d, columns=["From", "To", "Value"]) | pandas.DataFrame |
"""SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
import csv
from datetime import date, datetime, time
from io import StringIO
import sqlite3
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.io.sql as sql
from pandas.io.sql import read_sql_query, read_sql_table
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
"create_iris": {
"sqlite": """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
"mysql": """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
"postgresql": """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)""",
},
"insert_iris": {
"sqlite": """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
"mysql": """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
"postgresql": """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);""",
},
"create_test_types": {
"sqlite": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
"mysql": """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`IntDateOnlyCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
"postgresql": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)""",
},
"insert_test_types": {
"sqlite": {
"query": """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"mysql": {
"query": """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"postgresql": {
"query": """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"DateColWithTz",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
},
"read_parameters": {
"sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
"mysql": 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
"postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
},
"read_named_parameters": {
"sqlite": """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
"mysql": """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
"postgresql": """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
""",
},
"create_view": {
"sqlite": """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
},
}
class MixInBase:
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
if hasattr(self, "conn"):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}")
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute("SHOW TABLES")
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute(
f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}"
)
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest:
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, "execute"):
return self.conn
else:
return self.conn.cursor()
@pytest.fixture(params=[("data", "iris.csv")])
def load_iris_data(self, datapath, request):
import io
iris_csv_file = datapath(*request.param)
if not hasattr(self, "conn"):
self.setup_connect()
self.drop_table("iris")
self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor])
with io.open(iris_csv_file, mode="r", newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS["insert_iris"][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table("iris_view")
self._get_exec().execute(SQL_STRINGS["create_view"][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _load_test1_data(self):
columns = ["index", "A", "B", "C", "D"]
data = [
(
"2000-01-03 00:00:00",
0.980268513777,
3.68573087906,
-0.364216805298,
-1.15973806169,
),
(
"2000-01-04 00:00:00",
1.04791624281,
-0.0412318367011,
-0.16181208307,
0.212549316967,
),
(
"2000-01-05 00:00:00",
0.498580885705,
0.731167677815,
-0.537677223318,
1.34627041952,
),
(
"2000-01-06 00:00:00",
1.12020151869,
1.56762092543,
0.00364077397681,
0.67525259227,
),
]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(
dict(
A=[4, 1, 3, 6],
B=["asd", "gsq", "ylt", "jkl"],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
)
)
df["E"] = to_datetime(df["E"])
self.test_frame2 = df
def _load_test3_data(self):
columns = ["index", "A", "B"]
data = [
("2000-01-03 00:00:00", 2 ** 31 - 1, -1.987670),
("2000-01-04 00:00:00", -29, -0.0412318367011),
("2000-01-05 00:00:00", 20000, 0.731167677815),
("2000-01-06 00:00:00", -290867, 1.56762092543),
]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table("types_test_data")
self._get_exec().execute(SQL_STRINGS["create_test_types"][self.flavor])
ins = SQL_STRINGS["insert_test_types"][self.flavor]
data = [
{
"TextCol": "first",
"DateCol": "2000-01-03 00:00:00",
"DateColWithTz": "2000-01-01 00:00:00-08:00",
"IntDateCol": 535852800,
"IntDateOnlyCol": 20101010,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": 1,
"BoolColWithNull": False,
},
{
"TextCol": "first",
"DateCol": "2000-01-04 00:00:00",
"DateColWithTz": "2000-06-01 00:00:00-07:00",
"IntDateCol": 1356998400,
"IntDateOnlyCol": 20101212,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": None,
"BoolColWithNull": None,
},
]
for d in data:
self._get_exec().execute(
ins["query"], [d[field] for field in ins["fields"]]
)
def _count_rows(self, table_name):
result = (
self._get_exec()
.execute(f"SELECT count(*) AS count_1 FROM {table_name}")
.fetchone()
)
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS["read_parameters"][self.flavor]
params = ["Iris-setosa", 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS["read_named_parameters"][self.flavor]
params = {"name": "Iris-setosa", "length": 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self, method=None):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=method)
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _to_sql_empty(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], "test_frame1")
def _to_sql_fail(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
assert self.pandasSQL.has_table("test_frame1")
msg = "Table 'test_frame1' already exists"
with pytest.raises(ValueError, match=msg):
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
self.drop_table("test_frame1")
def _to_sql_replace(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="replace")
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_append(self):
# Nuke table just in case
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="append")
assert self.pandasSQL.has_table("test_frame1")
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_method_callable(self):
check = [] # used to double check function below is really being used
def sample(pd_table, conn, keys, data_iter):
check.append(1)
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(pd_table.table.insert(), data)
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=sample)
assert self.pandasSQL.has_table("test_frame1")
assert check == [1]
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _roundtrip(self):
self.drop_table("test_frame_roundtrip")
self.pandasSQL.to_sql(self.test_frame1, "test_frame_roundtrip")
result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip")
result.set_index("level_0", inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _to_sql_save_index(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"]
)
self.pandasSQL.to_sql(df, "test_to_sql_saves_index")
ix_cols = self._get_index_columns("test_to_sql_saves_index")
assert ix_cols == [["A"]]
def _transaction_test(self):
with self.pandasSQL.run_transaction() as trans:
trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
class DummyException(Exception):
pass
# Make sure when transaction is rolled back, no rows get inserted
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise DummyException("error")
except DummyException:
# ignore raised exception
pass
res = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = "sqlite"
mode: str
def setup_connect(self):
self.conn = self.connect()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def load_test_data_and_sql(self):
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, "test_frame1", self.conn)
assert sql.has_table("test_frame1", self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
assert sql.has_table("test_frame2", self.conn)
msg = "Table 'test_frame2' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="replace")
assert sql.has_table("test_frame3", self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame3")
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="append")
assert sql.has_table("test_frame4", self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame4")
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, "test_frame5", self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype="int64"), name="series")
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, "test_frame_roundtrip", con=self.conn)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index("level_0", inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(
self.test_frame1,
"test_frame_roundtrip",
con=self.conn,
index=False,
chunksize=2,
)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def test_date_parsing(self):
# Test date parsing in read_sql
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["DateCol"]
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"IntDateOnlyCol": "%Y%m%d"},
)
assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64)
assert df.IntDateOnlyCol.tolist() == [
pd.Timestamp("2010-10-10"),
pd.Timestamp("2010-12-12"),
]
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
index_col="DateCol",
parse_dates=["DateCol", "IntDateCol"],
)
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql("test_timedelta", self.conn)
result = sql.read_sql_query("SELECT * FROM test_timedelta", self.conn)
tm.assert_series_equal(result["foo"], df["foo"].astype("int64"))
def test_complex_raises(self):
df = DataFrame({"a": [1 + 1j, 2j]})
msg = "Complex datatypes not supported"
with pytest.raises(ValueError, match=msg):
df.to_sql("test_complex", self.conn)
@pytest.mark.parametrize(
"index_name,index_label,expected",
[
# no index name, defaults to 'index'
(None, None, "index"),
# specifying index_label
(None, "other_label", "other_label"),
# using the index name
("index_name", None, "index_name"),
# has index name, but specifying index_label
("index_name", "other_label", "other_label"),
# index name is integer
(0, None, "0"),
# index name is None but index label is integer
(None, 0, "0"),
],
)
def test_to_sql_index_label(self, index_name, index_label, expected):
temp_frame = DataFrame({"col1": range(4)})
temp_frame.index.name = index_name
query = "SELECT * FROM test_index_label"
sql.to_sql(temp_frame, "test_index_label", self.conn, index_label=index_label)
frame = sql.read_sql_query(query, self.conn)
assert frame.columns[0] == expected
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame(
{"col1": range(4)},
index=MultiIndex.from_product([("A0", "A1"), ("B0", "B1")]),
)
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, "test_index_label", self.conn)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[0] == "level_0"
assert frame.columns[1] == "level_1"
# specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["A", "B"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# using the index name
temp_frame.index.names = ["A", "B"]
sql.to_sql(temp_frame, "test_index_label", self.conn, if_exists="replace")
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# has index name, but specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["C", "D"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["C", "D"]
msg = "Length of 'index_label' should match number of levels, which is 2"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label="C",
)
def test_multiindex_roundtrip(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")],
columns=["A", "B", "C"],
index=["A", "B"],
)
df.to_sql("test_multiindex_roundtrip", self.conn)
result = sql.read_sql_query(
"SELECT * FROM test_multiindex_roundtrip", self.conn, index_col=["A", "B"]
)
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn, if_exists="replace")
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, "test", con=self.conn)
assert "CREATE" in create_sql
def test_get_schema_dtypes(self):
float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == "sqlalchemy" else "INTEGER"
create_sql = sql.get_schema(
float_frame, "test", con=self.conn, dtype={"b": dtype}
)
assert "CREATE" in create_sql
assert "INTEGER" in create_sql
def test_get_schema_keys(self):
frame = DataFrame({"Col1": [1.1, 1.2], "Col2": [2.1, 2.2]})
create_sql = sql.get_schema(frame, "test", con=self.conn, keys="Col1")
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
assert constraint_sentence in create_sql
# multiple columns as key (GH10385)
create_sql = sql.get_schema(
self.test_frame1, "test", con=self.conn, keys=["A", "B"]
)
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
assert constraint_sentence in create_sql
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list("abcde"))
df.to_sql("test_chunksize", self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query(
"select * from test_chunksize", self.conn, chunksize=5
):
res2 = concat([res2, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == "sqlalchemy":
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn, chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{
"person_id": [1, 2, 3],
"person_name": ["<NAME>", "<NAME>", "<NAME>"],
}
)
df2 = df.copy()
df2["person_name"] = df2["person_name"].astype("category")
df2.to_sql("test_categorical", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_categorical", self.conn)
tm.assert_frame_equal(res, df)
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1, 2], [3, 4]], columns=["\xe9", "b"])
df.to_sql("test_unicode", self.conn, index=False)
def test_escaped_table_name(self):
# GH 13206
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("d1187b08-4943-4c8d-a7f6", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM `d1187b08-4943-4c8d-a7f6`", self.conn)
tm.assert_frame_equal(res, df)
@pytest.mark.single
@pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="SQLAlchemy not installed")
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = "sqlite"
mode = "sqlalchemy"
def connect(self):
return sqlalchemy.create_engine("sqlite:///:memory:")
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
cols = ["A", "B"]
result = sql.read_sql_table("test_frame", self.conn, columns=cols)
assert result.columns.tolist() == cols
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
result = sql.read_sql_table("test_frame", self.conn, index_col="index")
assert result.index.names == ["index"]
result = sql.read_sql_table("test_frame", self.conn, index_col=["A", "B"])
assert result.index.names == ["A", "B"]
result = sql.read_sql_table(
"test_frame", self.conn, index_col=["A", "B"], columns=["C", "D"]
)
assert result.index.names == ["A", "B"]
assert result.columns.tolist() == ["C", "D"]
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table("iris", self.conn)
iris_frame2 = sql.read_sql("iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table("other_table", self.conn)
sql.read_sql_query("SELECT * FROM other_table", self.conn)
# Verify some things
assert len(w) == 0
def test_warning_case_insensitive_table_name(self):
# see gh-7815
#
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql("CaseSensitive", self.conn)
# Verify some things
assert len(w) == 0
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes("test_index_saved")
ixs = [i["column_names"] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
# GH 9086: TIMESTAMP is the suggested type for datetimes with timezones
assert isinstance(table.table.c["time"].type, sqltypes.TIMESTAMP)
def test_database_uri_string(self):
# Test read_sql and .to_sql method with a database URI (GH10654)
test_frame1 = self.test_frame1
# db_uri = 'sqlite:///:memory:' # raises
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near
# "iris": syntax error [SQL: 'iris']
with tm.ensure_clean() as name:
db_uri = "sqlite:///" + name
table = "iris"
test_frame1.to_sql(table, db_uri, if_exists="replace", index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = "SELECT * FROM iris"
test_frame4 = sql.read_sql_query(query, db_uri)
tm.assert_frame_equal(test_frame1, test_frame2)
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
# using driver that will not be installed on Travis to trigger error
# in sqlalchemy.create_engine -> test passing of this error to user
try:
# the rest of this test depends on pg8000's being absent
import pg8000 # noqa
pytest.skip("pg8000 is installed")
except ImportError:
pass
db_uri = "postgresql+pg8000://user:pass@host/dbname"
with pytest.raises(ImportError, match="pg8000"):
sql.read_sql("select * from table", db_uri)
def _make_iris_table_metadata(self):
sa = sqlalchemy
metadata = sa.MetaData()
iris = sa.Table(
"iris",
metadata,
sa.Column("SepalLength", sa.REAL),
sa.Column("SepalWidth", sa.REAL),
sa.Column("PetalLength", sa.REAL),
sa.Column("PetalWidth", sa.REAL),
sa.Column("Name", sa.TEXT),
)
return iris
def test_query_by_text_obj(self):
# WIP : GH10846
name_text = sqlalchemy.text("select * from iris where name=:name")
iris_df = sql.read_sql(name_text, self.conn, params={"name": "Iris-versicolor"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-versicolor"}
def test_query_by_select_obj(self):
# WIP : GH10846
iris = self._make_iris_table_metadata()
name_select = sqlalchemy.select([iris]).where(
iris.c.Name == sqlalchemy.bindparam("name")
)
iris_df = sql.read_sql(name_select, self.conn, params={"name": "Iris-setosa"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-setosa"}
class _EngineToConnMixin:
"""
A mixin that causes setup_connect to create a conn rather than an engine.
"""
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
super().load_test_data_and_sql()
engine = self.conn
conn = engine.connect()
self.__tx = conn.begin()
self.pandasSQL = sql.SQLDatabase(conn)
self.__engine = engine
self.conn = conn
yield
self.__tx.rollback()
self.conn.close()
self.conn = self.__engine
self.pandasSQL = sql.SQLDatabase(self.__engine)
# XXX:
# super().teardown_method(method)
@pytest.mark.single
class TestSQLApiConn(_EngineToConnMixin, TestSQLApi):
pass
@pytest.mark.single
class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = "sqlite"
mode = "fallback"
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy", conn, index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
@pytest.mark.skipif(SQLALCHEMY_INSTALLED, reason="SQLAlchemy is installed")
def test_con_string_import_error(self):
conn = "mysql://root@localhost/pandas_nosetest"
msg = "Using URI string without sqlalchemy installed"
with pytest.raises(ImportError, match=msg):
sql.read_sql("SELECT * FROM iris", conn)
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
msg = "Execution failed on sql 'iris': near \"iris\": syntax error"
with pytest.raises(sql.DatabaseError, match=msg):
sql.read_sql("iris", self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b "]) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn, index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = | sql.get_schema(self.test_frame1, "test") | pandas.io.sql.get_schema |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 15 21:56:08 2020
@author: <NAME>
"""
# STEP1----------------- # Importing the libraries------------
#-------------------------------------------------------------
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.impute import SimpleImputer
from sklearn import metrics
from scipy import interp
from itertools import cycle
from sklearn.preprocessing import StandardScaler # for preprocessing the data
from sklearn.ensemble import RandomForestClassifier # Random forest classifier
from sklearn.tree import DecisionTreeClassifier # for Decision Tree classifier
from sklearn.svm import SVC # for SVM classification
from sklearn.decomposition import PCA
from sklearn.preprocessing import OneHotEncoder, LabelEncoder # # Encoding categorical variables
from sklearn.compose import ColumnTransformer, make_column_transformer #labelencoder class takes cat. var. and assign value to them
from sklearn.pipeline import Pipeline
from sklearn.utils import resample
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split # to split the data
from sklearn.model_selection import KFold # For cross vbalidation
from sklearn.model_selection import GridSearchCV # for tunnig hyper parameter it will use all combination of given parameters
from sklearn.model_selection import RandomizedSearchCV # same for tunning hyper parameter but will use random combinations of parameters
from sklearn.metrics import confusion_matrix,recall_score,precision_recall_curve,auc,roc_curve,roc_auc_score,classification_report
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.ensemble import AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.metrics import f1_score
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.utils.validation import check_random_state
from sklearn.model_selection import StratifiedKFold
from sklearn.ensemble import GradientBoostingClassifier
#from rgf.sklearn import RGFClassifier
from sklearn.metrics import accuracy_score# same for tunning hyper parameter but will use random combinations of parameters
###=========================== Importing the DATASET ======================================
###=========================================================================================
# Loading data from the iMotions the path to csv file directory
os.chdir("\\ML4TakeOver\\Data\\RawData")
directory = os.getcwd()
#dataFrame_takeover_feature = pd.read_csv('takeover_cleaned_feature4ML.csv', index_col=[0])
dataFrame_takeover_feature = pd.read_csv('takeover4ML.csv', index_col=[0])
dataset = dataFrame_takeover_feature
chunk_users = ['015_M3', '015_m2', '015_M1', '014_M3', #Select a handful of ppl for saving resource
'014_M2', '014_m1']
chunk_dataset = dataset[dataset['Name'].isin(chunk_users)]
dataset = chunk_dataset
dataset.shape
###### ======================================Encoding notes=======================================
# Alarm Type: TA =2, NoA =1, FA = 0 , Z = 3
# TakeOver : TK =1 , NTK= 0
# Alarm : 339.0 =339.0, 103.0= 4, 332.0=14, 259.0=11, 16.0=2, 178.0=6, 284.0=12,
# 213.0=9, 323.0=13, 185.0=7, 84.0=3, 137.0=5, 5.0=1, 191.0=8, 254.0=10
# Mode : +1 (Auto)= +1, -1(Manual)= 0
## ========================= Eploring the data, mainly the Label (ReactionTime) ====================
## ===================================================================================================
# let's check the "Takeover" distributions
#sns.countplot("TOT_Class",data=dataset)
# Let's check the Percentage for "ReactionTime"
Count_FastRT = len(dataset[dataset["TOT_Class"]== 0 ]) # Faster: <4000
Count_LowRT = len(dataset[dataset["TOT_Class"]== 1 ]) # Slower: >4000
Percentage_of_FastRT = Count_FastRT/(Count_FastRT+Count_LowRT)
print("Percentage_of_FastRT, 0 = ",Percentage_of_FastRT*100)
Percentage_of_SlowRT= Count_LowRT/(Count_FastRT+Count_LowRT)
print("Percentage_of_SlowRT, 1 = ",Percentage_of_SlowRT*100)
# Pandas offers us out-of-the-box three various correlation coefficients 1) Pearson's 2) Spearman rank 3) Kendall Tau
pearson = dataset.corr(method='pearson')
# assume target attr is the "Takeover or -3", then remove corr with itself
corr_with_target = pearson.iloc[-1][:]
# attributes sorted from the most predictive
predictivity = corr_with_target.sort_values(ascending=False)
## =========================-# Prepration for Machine Learning algorithms=========================
## ====================================================================================================
# Drop useless features for ML
dataset = dataset.drop(['Timestamp','index','ID', 'Name', 'EventSource', 'ManualGear','EventW','EventN','GazeDirectionLeftY','Alarm',
'GazeDirectionLeftX', 'GazeDirectionRightX', 'GazeDirectionRightY','CurrentBrake',
'PassBy','RangeN'], axis=1) #ManualGear has only "one" value
#EventW is pretty similar to EventN
dataset.shape
#---------------------------------------------------------
# convert categorical value to the number
# convert datatype of object to int and strings
dataset['LeftLaneType'] = dataset.LeftLaneType.astype(object)
dataset['RightLaneType'] = dataset.RightLaneType.astype(object)
dataset['TOT_Class'] = dataset.TOT_Class.astype(object)
dataset['Coming_Alarm'] = dataset.Coming_Alarm.astype(object)
dataset['Takeover'] = dataset.Takeover.astype(object)
dataset['Coming_AlarmType'] = dataset.Coming_AlarmType.astype(object)
dataset['NDTask'] = dataset.NDTask.astype(object)
dataset['TOT_Three_Class'] = dataset.TOT_Three_Class.astype(object)
#****** Drop features that happing after Alarm (anything after alarm interupt takeover prediction)****************
dataset = dataset.drop(['Mode','AlarmDuration','Coming_Alarm'], axis=1) # Coming Alarm maybe helpful for ReactionTime
# Check the reaction time values in each category of Alarm
print('FalseAlarm ReactionTime:', dataset[dataset['Coming_AlarmType']== 'FA'].ReactionTime.mean()) # 2007.2
print('TrueAlarm ReactionTime:', dataset[dataset['Coming_AlarmType']== 'TA'].ReactionTime.mean()) # 4712.5
print('NoAlarm ReactionTime:', dataset[dataset['Coming_AlarmType']== 'NoA'].ReactionTime.mean()) # 5003.5
# How many times they takeover in each alarm
len(dataset[dataset['Coming_AlarmType']== 'FA'][dataset['Takeover']=='TK'].ReactionTime.unique()) #92
len(dataset[dataset['Coming_AlarmType']== 'FA'][dataset['Takeover']=='NTK'].ReactionTime.unique())
len(dataset[dataset['Coming_AlarmType']== 'TA'][dataset['Takeover']=='TK'].ReactionTime.unique()) #355
len(dataset[dataset['Coming_AlarmType']== 'TA'][dataset['Takeover']=='NTK'].ReactionTime.unique())
len(dataset[dataset['Coming_AlarmType']== 'NoA'][dataset['Takeover']=='TK'].ReactionTime.unique()) #81
len(dataset[dataset['Coming_AlarmType']== 'NoA'][dataset['Takeover']=='NTK'].ReactionTime.unique())
dataFrame_takeover_feature[dataFrame_takeover_feature['Coming_AlarmType']== 'NoA'][
dataFrame_takeover_feature['Takeover']=='NTK'].Name.value_counts()
# Drop Reaction Time features which happen after Alarm
dataset = dataset.drop(['ReactionTime','Takeover','Coming_AlarmType','TOT_Class'], axis=1)
# ======================================= Count the number of Needed columns =========================
# List of all Categorical features
Cat_Features= ['LeftLaneType','RightLaneType','NDTask']
# Get the column index of the categorical features
categorical_features = []
for i in Cat_Features:
position = dataset.columns.get_loc(i)
categorical_features.append(position)
print(categorical_features)
# Get the column index of the Contin. features
conti_features = []
Cont_Filter = dataset.dtypes!=object
Cont_Filter = dataset.columns.where(Cont_Filter).tolist()
Cont_Filter_Cleaned = [name for name in Cont_Filter if str(name) !='nan']
for i in Cont_Filter_Cleaned:
position = dataset.columns.get_loc(i)
conti_features.append(position)
print(conti_features)
# How many columns will be needed for each categorical feature
print(dataset[Cat_Features].nunique(),
'There are',"--",sum(dataset[Cat_Features].nunique().loc[:]),"--",'groups in the whole dataset')
# # ============================== Creating a Neural Network ===============================================
from pandas import read_csv
import tensorflow as tf
import GPUtil
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
import h5py
import pytest
# Check the GPU availability with various approaches
from tensorflow.python.client import device_lib
tf.test.gpu_device_name() # if GPU is working
print(device_lib.list_local_devices()) # Which GPUs are connected
print("Number GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
print(tf.test.is_built_with_cuda()) #to make sure that your version of tensorflow includes GPU support.
GPUtil.showUtilization()
with tf.compat.v1.Session() as sess:
devices = sess.list_devices()
print(tf.config.list_physical_devices('GPU'))
# Assigning values to X, Y
y = dataset.TOT_Three_Class
X = dataset.drop('TOT_Three_Class', axis=1)
# setting up testing and training sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=27)
# concatenate our training data back together
X = pd.concat([X_train, y_train], axis=1)
# separate minority and majority classes
FastRT = X[X.TOT_Three_Class==0]
MidRT = X[X.TOT_Three_Class==1]
SlowRT = X[X.TOT_Three_Class==2]
# upsample minorityF
FastRT_upsampled = resample(FastRT,
replace=True, # sample with replacement
n_samples=len(MidRT), # match number in majority class
random_state=27) # reproducible results
SlowRT_upsampled = resample(SlowRT,
replace=True, # sample with replacement
n_samples=len(MidRT), # match number in majority class
random_state=27) # reproducible results
# combine majority and upsampled minority
upsampled = | pd.concat([MidRT, SlowRT_upsampled, FastRT_upsampled]) | pandas.concat |
import torch
from torch.utils.data import Dataset
import os
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedShuffleSplit
__author__ = "<NAME>"
__copyright__ = "Copyright 2018 The Aramis Lab Team"
__credits__ = ["<NAME>"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
#################################
# CNN train / test
#################################
def train(model, data_loader, use_cuda, loss_func, optimizer, writer, epoch, model_mode="train",
selection_threshold=None):
"""
This is the function to train, validate or test the model, depending on the model_mode parameter.
:param model:
:param data_loader:
:param use_cuda:
:param loss_func:
:param optimizer:
:param writer:
:param epoch:
:return:
"""
global_step = None
softmax = torch.nn.Softmax(dim=1)
if model_mode == "train":
columns = ['participant_id', 'session_id', 'slice_id', 'true_label', 'predicted_label', 'proba0', 'proba1']
results_df = pd.DataFrame(columns=columns)
total_loss = 0.0
model.train() # set the model to training mode
print('The number of batches in this sampler based on the batch size: %s' % str(len(data_loader)))
for i, data in enumerate(data_loader):
# update the global step
global_step = i + epoch * len(data_loader)
if use_cuda:
imgs, labels = data['image'].cuda(), data['label'].cuda()
else:
imgs, labels = data['image'], data['label']
gound_truth_list = labels.data.cpu().numpy().tolist()
output = model(imgs)
normalized_output = softmax(output)
_, predicted = torch.max(output.data, 1)
predict_list = predicted.data.cpu().numpy().tolist()
batch_loss = loss_func(output, labels)
total_loss += batch_loss.item()
# calculate the batch balanced accuracy and loss
batch_metrics = evaluate_prediction(gound_truth_list, predict_list)
batch_accuracy = batch_metrics['balanced_accuracy']
writer.add_scalar('classification accuracy', batch_accuracy, global_step)
writer.add_scalar('loss', batch_loss.item(), global_step)
optimizer.zero_grad()
batch_loss.backward()
optimizer.step()
# Generate detailed DataFrame
for idx, sub in enumerate(data['participant_id']):
row = [sub, data['session_id'][idx], data['slice_id'][idx],
labels[idx].item(), predicted[idx].item(),
normalized_output[idx, 0].item(), normalized_output[idx, 1]]
row_df = pd.DataFrame(np.array(row).reshape(1, -1), columns=columns)
results_df = pd.concat([results_df, row_df])
# delete the temporary variables taking the GPU memory
del imgs, labels, output, predicted, batch_loss, batch_accuracy
torch.cuda.empty_cache()
epoch_metrics = evaluate_prediction(results_df.true_label.values.astype(int),
results_df.predicted_label.values.astype(int))
accuracy_batch_mean = epoch_metrics['balanced_accuracy']
loss_batch_mean = total_loss / len(data_loader)
torch.cuda.empty_cache()
elif model_mode == "valid":
results_df, metrics_batch = test(model, data_loader, use_cuda, loss_func)
# calculate the balanced accuracy
_, metrics_subject = soft_voting(results_df, results_df, selection_threshold=selection_threshold)
accuracy_batch_mean = metrics_subject['balanced_accuracy']
total_loss = metrics_batch['total_loss']
loss_batch_mean = total_loss / len(data_loader)
writer.add_scalar('classification accuracy', accuracy_batch_mean, epoch)
writer.add_scalar('loss', loss_batch_mean, epoch)
torch.cuda.empty_cache()
else:
raise ValueError('This mode %s was not implemented. Please choose between train and valid' % model_mode)
return results_df, accuracy_batch_mean, loss_batch_mean, global_step
def test(model, data_loader, use_cuda, loss_func):
"""
The function to evaluate the testing data for the trained classifiers
:param model:
:param data_loader:
:param use_cuda:
:return:
"""
softmax = torch.nn.Softmax(dim=1)
columns = ['participant_id', 'session_id', 'slice_id', 'true_label', 'predicted_label', 'proba0', 'proba1']
results_df = pd.DataFrame(columns=columns)
total_loss = 0
if use_cuda:
model.cuda()
model.eval() # set the model to evaluation mode
torch.cuda.empty_cache()
with torch.no_grad():
for i, data in enumerate(data_loader):
if use_cuda:
imgs, labels = data['image'].cuda(), data['label'].cuda()
else:
imgs, labels = data['image'], data['label']
output = model(imgs)
normalized_output = softmax(output)
loss = loss_func(output, labels)
total_loss += loss.item()
_, predicted = torch.max(output.data, 1)
# Generate detailed DataFrame
for idx, sub in enumerate(data['participant_id']):
row = [sub, data['session_id'][idx], data['slice_id'][idx].item(),
labels[idx].item(), predicted[idx].item(),
normalized_output[idx, 0].item(), normalized_output[idx, 1].item()]
row_df = pd.DataFrame(np.array(row).reshape(1, -1), columns=columns)
results_df = pd.concat([results_df, row_df])
del imgs, labels, output
torch.cuda.empty_cache()
# calculate the balanced accuracy
results = evaluate_prediction(results_df.true_label.values.astype(int),
results_df.predicted_label.values.astype(int))
results_df.reset_index(inplace=True, drop=True)
results['total_loss'] = total_loss
torch.cuda.empty_cache()
return results_df, results
def evaluate_prediction(y, y_hat):
"""
This is a function to calculate the different metrics based on the list of true label and predicted label
:param y: list
:param y_hat: list
:return:
"""
true_positive = 0.0
true_negative = 0.0
false_positive = 0.0
false_negative = 0.0
tp = []
tn = []
fp = []
fn = []
for i in range(len(y)):
if y[i] == 1:
if y_hat[i] == 1:
true_positive += 1
tp.append(i)
else:
false_negative += 1
fn.append(i)
else: # -1
if y_hat[i] == 0:
true_negative += 1
tn.append(i)
else:
false_positive += 1
fp.append(i)
accuracy = (true_positive + true_negative) / (true_positive + true_negative + false_positive + false_negative)
if (true_positive + false_negative) != 0:
sensitivity = true_positive / (true_positive + false_negative)
else:
sensitivity = 0.0
if (false_positive + true_negative) != 0:
specificity = true_negative / (false_positive + true_negative)
else:
specificity = 0.0
if (true_positive + false_positive) != 0:
ppv = true_positive / (true_positive + false_positive)
else:
ppv = 0.0
if (true_negative + false_negative) != 0:
npv = true_negative / (true_negative + false_negative)
else:
npv = 0.0
balanced_accuracy = (sensitivity + specificity) / 2
results = {'accuracy': accuracy,
'balanced_accuracy': balanced_accuracy,
'sensitivity': sensitivity,
'specificity': specificity,
'ppv': ppv,
'npv': npv,
'confusion_matrix': {'tp': len(tp), 'tn': len(tn), 'fp': len(fp), 'fn': len(fn)}
}
return results
#################################
# Datasets
#################################
def mix_slices(df_training, df_validation, mri_plane=0, val_size=0.15):
"""
This is a function to gather the training and validation tsv together, then do the bad data split by slice.
:param training_tsv:
:param validation_tsv:
:return:
"""
df_all = pd.concat([df_training, df_validation])
df_all = df_all.reset_index(drop=True)
if mri_plane == 0:
slices_per_patient = 169 - 40
slice_index = list(np.arange(20, 169 - 20))
elif mri_plane == 1:
slices_per_patient = 208 - 40
slice_index = list(np.arange(20, 208 - 20))
else:
slices_per_patient = 179 - 40
slice_index = list(np.arange(20, 179 - 20))
participant_list = list(df_all['participant_id'])
session_list = list(df_all['session_id'])
label_list = list(df_all['diagnosis'])
slice_participant_list = [ele for ele in participant_list for _ in range(slices_per_patient)]
slice_session_list = [ele for ele in session_list for _ in range(slices_per_patient)]
slice_label_list = [ele for ele in label_list for _ in range(slices_per_patient)]
slice_index_list = slice_index * len(label_list)
df_final = pd.DataFrame(columns=['participant_id', 'session_id', 'slice_id', 'diagnosis'])
df_final['participant_id'] = np.array(slice_participant_list)
df_final['session_id'] = np.array(slice_session_list)
df_final['slice_id'] = np.array(slice_index_list)
df_final['diagnosis'] = np.array(slice_label_list)
y = np.array(slice_label_list)
# split the train data into training and validation set
skf_2 = StratifiedShuffleSplit(n_splits=1, test_size=val_size, random_state=10000)
indices = next(skf_2.split(np.zeros(len(y)), y))
train_ind, valid_ind = indices
df_sub_train = df_final.iloc[train_ind]
df_sub_valid = df_final.iloc[valid_ind]
df_sub_train.reset_index(inplace=True, drop=True)
df_sub_valid.reset_index(inplace=True, drop=True)
return df_sub_train, df_sub_valid
class MRIDataset_slice(Dataset):
"""
This class reads the CAPS of image processing pipeline of DL
To note, this class processes the MRI to be RGB for transfer learning.
Return: a Pytorch Dataset objective
"""
def __init__(self, caps_directory, data_file, transformations=None, mri_plane=0, prepare_dl=False):
"""
Args:
caps_directory (string): the output folder of image processing pipeline.
transformations (callable, optional): if the data sample should be done some transformations or not, such as resize the image.
To note, for each view:
Axial_view = "[:, :, slice_i]"
Coronal_veiw = "[:, slice_i, :]"
Saggital_view= "[slice_i, :, :]"
"""
self.caps_directory = caps_directory
self.transformations = transformations
self.diagnosis_code = {'CN': 0, 'AD': 1, 'sMCI': 0, 'pMCI': 1, 'MCI': 1}
self.mri_plane = mri_plane
self.prepare_dl = prepare_dl
# Check the format of the tsv file here
if isinstance(data_file, str):
self.df = pd.read_csv(data_file, sep='\t')
elif isinstance(data_file, pd.DataFrame):
self.df = data_file
else:
raise Exception('The argument datafile is not of correct type.')
# This dimension is for the output of image processing pipeline of Raw: 169 * 208 * 179
if mri_plane == 0:
self.slices_per_patient = 169 - 40
self.slice_direction = 'sag'
elif mri_plane == 1:
self.slices_per_patient = 208 - 40
self.slice_direction = 'cor'
elif mri_plane == 2:
self.slices_per_patient = 179 - 40
self.slice_direction = 'axi'
def __len__(self):
return len(self.df) * self.slices_per_patient
def __getitem__(self, idx):
sub_idx = idx // self.slices_per_patient
img_name = self.df.loc[sub_idx, 'participant_id']
sess_name = self.df.loc[sub_idx, 'session_id']
img_label = self.df.loc[sub_idx, 'diagnosis']
label = self.diagnosis_code[img_label]
slice_idx = idx % self.slices_per_patient
if self.prepare_dl:
# read the slices directly
slice_path = os.path.join(self.caps_directory, 'subjects', img_name, sess_name, 't1',
'preprocessing_dl',
img_name + '_' + sess_name + '_space-MNI_res-1x1x1_axis-' +
self.slice_direction + '_rgbslice-' + str(slice_idx + 20) + '.pt')
extracted_slice = torch.load(slice_path)
else:
image_path = os.path.join(self.caps_directory, 'subjects', img_name, sess_name, 't1',
'preprocessing_dl',
img_name + '_' + sess_name + '_space-MNI_res-1x1x1.pt')
image = torch.load(image_path)
extracted_slice = extract_slice_from_mri(image, slice_idx + 20, self.mri_plane)
# check if the slice has NaN value
if torch.isnan(extracted_slice).any():
print("Slice %s has NaN values." % str(img_name + '_' + sess_name + '_' + str(slice_idx + 20)))
extracted_slice[torch.isnan(extracted_slice)] = 0
if self.transformations:
extracted_slice = self.transformations(extracted_slice)
sample = {'image_id': img_name + '_' + sess_name + '_slice' + str(slice_idx + 20), 'image': extracted_slice, 'label': label,
'participant_id': img_name, 'session_id': sess_name, 'slice_id': slice_idx + 20}
return sample
class MRIDataset_slice_mixed(Dataset):
"""
This class reads the CAPS of image processing pipeline of DL. However, this is used for the bad data split strategy
To note, this class processes the MRI to be RGB for transfer learning.
Return: a Pytorch Dataset objective
"""
def __init__(self, caps_directory, data_file, transformations=None, mri_plane=0, prepare_dl=False):
"""
Args:
caps_directory (string): the output folder of image processing pipeline.
transformations (callable, optional): if the data sample should be done some transformations or not, such as resize the image.
To note, for each view:
Axial_view = "[:, :, slice_i]"
Coronal_veiw = "[:, slice_i, :]"
Saggital_view= "[slice_i, :, :]"
"""
self.caps_directory = caps_directory
self.transformations = transformations
self.diagnosis_code = {'CN': 0, 'AD': 1, 'sMCI': 0, 'pMCI': 1, 'MCI': 1}
self.mri_plane = mri_plane
self.prepare_dl = prepare_dl
# Check the format of the tsv file here
if isinstance(data_file, str):
self.df = | pd.read_csv(data_file, sep='\t') | pandas.read_csv |
# required libraries
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from processing import train_lemma, test_lemma
# datasets
train = pd.read_csv('dataset/train.csv')
test = pd.read_csv('dataset/test.csv')
# preprocessed and final dataset dataset
train_df = pd.concat([train, | pd.DataFrame(train_lemma, columns=['resumes']) | pandas.DataFrame |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, date_range, offsets
import pandas._testing as tm
class TestDataFrameShift:
def test_shift(self, datetime_frame, int_frame):
# naive shift
shiftedFrame = datetime_frame.shift(5)
tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
shiftedSeries = datetime_frame["A"].shift(5)
tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
shiftedFrame = datetime_frame.shift(-5)
tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
shiftedSeries = datetime_frame["A"].shift(-5)
tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
# shift by 0
unshifted = datetime_frame.shift(0)
tm.assert_frame_equal(unshifted, datetime_frame)
# shift by DateOffset
shiftedFrame = datetime_frame.shift(5, freq=offsets.BDay())
assert len(shiftedFrame) == len(datetime_frame)
shiftedFrame2 = datetime_frame.shift(5, freq="B")
tm.assert_frame_equal(shiftedFrame, shiftedFrame2)
d = datetime_frame.index[0]
shifted_d = d + offsets.BDay(5)
tm.assert_series_equal(
datetime_frame.xs(d), shiftedFrame.xs(shifted_d), check_names=False
)
# shift int frame
int_shifted = int_frame.shift(1) # noqa
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(
unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values
)
shifted2 = ps.shift(1, "B")
shifted3 = ps.shift(1, offsets.BDay())
tm.assert_frame_equal(shifted2, shifted3)
tm.assert_frame_equal(ps, shifted2.shift(-1, "B"))
msg = "does not match PeriodIndex freq"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="D")
# shift other axis
# GH#6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis=1)
tm.assert_frame_equal(result, expected)
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis="columns")
tm.assert_frame_equal(result, expected)
def test_shift_bool(self):
df = DataFrame({"high": [True, False], "low": [False, False]})
rs = df.shift(1)
xp = DataFrame(
np.array([[np.nan, np.nan], [True, False]], dtype=object),
columns=["high", "low"],
)
tm.assert_frame_equal(rs, xp)
def test_shift_categorical(self):
# GH#9416
s1 = Series(["a", "b", "c"], dtype="category")
s2 = Series(["A", "B", "C"], dtype="category")
df = DataFrame({"one": s1, "two": s2})
rs = df.shift(1)
xp = DataFrame({"one": s1.shift(1), "two": s2.shift(1)})
tm.assert_frame_equal(rs, xp)
def test_shift_fill_value(self):
# GH#24128
df = DataFrame(
[1, 2, 3, 4, 5], index=date_range("1/1/2000", periods=5, freq="H")
)
exp = DataFrame(
[0, 1, 2, 3, 4], index=date_range("1/1/2000", periods=5, freq="H")
)
result = df.shift(1, fill_value=0)
tm.assert_frame_equal(result, exp)
exp = DataFrame(
[0, 0, 1, 2, 3], index=date_range("1/1/2000", periods=5, freq="H")
)
result = df.shift(2, fill_value=0)
tm.assert_frame_equal(result, exp)
def test_shift_empty(self):
# Regression test for GH#8019
df = DataFrame({"foo": []})
rs = df.shift(-1)
tm.assert_frame_equal(df, rs)
def test_shift_duplicate_columns(self):
# GH#9092; verify that position-based shifting works
# in the presence of duplicate columns
column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]]
data = np.random.randn(20, 5)
shifted = []
for columns in column_lists:
df = pd.DataFrame(data.copy(), columns=columns)
for s in range(5):
df.iloc[:, s] = df.iloc[:, s].shift(s + 1)
df.columns = range(5)
shifted.append(df)
# sanity check the base case
nulls = shifted[0].isna().sum()
tm.assert_series_equal(nulls, Series(range(1, 6), dtype="int64"))
# check all answers are the same
tm.assert_frame_equal(shifted[0], shifted[1])
tm.assert_frame_equal(shifted[0], shifted[2])
def test_shift_axis1_multiple_blocks(self):
# GH#35488
df1 = pd.DataFrame(np.random.randint(1000, size=(5, 3)))
df2 = pd.DataFrame(np.random.randint(1000, size=(5, 2)))
df3 = pd.concat([df1, df2], axis=1)
assert len(df3._mgr.blocks) == 2
result = df3.shift(2, axis=1)
expected = df3.take([-1, -1, 0, 1, 2], axis=1)
expected.iloc[:, :2] = np.nan
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
# Case with periods < 0
# rebuild df3 because `take` call above consolidated
df3 = pd.concat([df1, df2], axis=1)
assert len(df3._mgr.blocks) == 2
result = df3.shift(-2, axis=1)
expected = df3.take([2, 3, 4, -1, -1], axis=1)
expected.iloc[:, -2:] = np.nan
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning")
def test_tshift(self, datetime_frame):
# TODO: remove this test when tshift deprecation is enforced
# PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_frame_equal(unshifted, ps)
shifted2 = ps.tshift(freq="B")
tm.assert_frame_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=offsets.BDay())
tm.assert_frame_equal(shifted, shifted3)
msg = "Given freq M does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.tshift(freq="M")
# DatetimeIndex
shifted = datetime_frame.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_frame_equal(datetime_frame, unshifted)
shifted2 = datetime_frame.tshift(freq=datetime_frame.index.freq)
tm.assert_frame_equal(shifted, shifted2)
inferred_ts = DataFrame(
datetime_frame.values,
Index(np.asarray(datetime_frame.index)),
columns=datetime_frame.columns,
)
shifted = inferred_ts.tshift(1)
expected = datetime_frame.tshift(1)
expected.index = expected.index._with_freq(None)
tm.assert_frame_equal(shifted, expected)
unshifted = shifted.tshift(-1)
tm.assert_frame_equal(unshifted, inferred_ts)
no_freq = datetime_frame.iloc[[0, 5, 7], :]
msg = "Freq was not set in the index hence cannot be inferred"
with pytest.raises(ValueError, match=msg):
no_freq.tshift()
def test_tshift_deprecated(self, datetime_frame):
# GH#11631
with tm.assert_produces_warning(FutureWarning):
datetime_frame.tshift()
def test_period_index_frame_shift_with_freq(self):
ps = tm.makePeriodFrame()
shifted = ps.shift(1, freq="infer")
unshifted = shifted.shift(-1, freq="infer")
| tm.assert_frame_equal(unshifted, ps) | pandas._testing.assert_frame_equal |
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import matplotlib.pyplot as plt
import pandas as pd
def get_analysis(news_list):
vader = SentimentIntensityAnalyzer()
columns = ['ticker','date', 'time', 'headline']
news_df = pd.DataFrame(news_list, columns=columns)
##pd.set_option('display.max_colwidth', 1000)
scores = news_df['headline'].apply(vader.polarity_scores).tolist()
scores_df = pd.DataFrame(scores)
news_df = news_df.join(scores_df, rsuffix='_right')
news_df['date'] = | pd.to_datetime(news_df.date) | pandas.to_datetime |
import pandas as pd
import numpy as np
from copy import deepcopy
import json
from pathlib import Path
from kipoi.data import Dataset
# try:
# import torch
# from bpnet.data import Dataset
# torch.multiprocessing.set_sharing_strategy('file_system')
# except:
# print("PyTorch not installed. Using Dataset from kipoi.data")
# from kipoi.data import Dataset
from kipoi.metadata import GenomicRanges
from bpnet.utils import to_list
from bpnet.dataspecs import DataSpec
from bpnet.preproc import bin_counts, keep_interval, moving_average, IntervalAugmentor
from bpnet.extractors import _chrom_sizes, _chrom_names
from concise.utils.helper import get_from_module
from tqdm import tqdm
from concise.preprocessing import encodeDNA
from random import Random
import joblib
from bpnet.preproc import resize_interval
from genomelake.extractors import FastaExtractor, BigwigExtractor, ArrayExtractor
from kipoi_utils.data_utils import get_dataset_item
from kipoiseq.dataloaders.sequence import BedDataset
import gin
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class TsvReader:
def __init__(self, tsv_file,
num_chr=False,
label_dtype=None,
mask_ambigous=None,
# task_prefix='task/',
incl_chromosomes=None,
excl_chromosomes=None,
chromosome_lens=None,
resize_width=None
):
"""Reads a tsv/BED file in the following format:
chr start stop [task1 task2 ... ]
Args:
tsv_file: a tsv file with or without the header (i.e. BED file)
num_chr: if True, remove the 'chr' prefix if existing in the chromosome names
label_dtype: data type of the labels
mask_ambigous: if specified, rows where `<task>==mask_ambigous` will be omitted
incl_chromosomes (list of str): list of chromosomes to keep.
Intervals from other chromosomes are dropped.
excl_chromosomes (list of str): list of chromosomes to exclude.
Intervals from other chromosomes are dropped.
chromosome_lens (dict of int): dictionary with chromosome lengths
resize_width (int): desired interval width. The resize fixes the center
of the interval.
"""
self.tsv_file = tsv_file
self.num_chr = num_chr
self.label_dtype = label_dtype
self.incl_chromosomes = incl_chromosomes
self.excl_chromosomes = excl_chromosomes
self.chromosome_lens = chromosome_lens
self.resize_width = resize_width
columns = list( | pd.read_csv(self.tsv_file, nrows=0, sep='\t') | pandas.read_csv |
#%%
import numpy as np
import pandas as pd
import altair as alt
import anthro.io
# Generate a plot for phosphate rock production
data = pd.read_csv('../processed/IFA_phosphate_rock_public_2008_2019_processed.csv')
data['year'] = pd.to_datetime(data['Year'].astype(str), format='%Y', errors='coerce')
agg_data = | pd.DataFrame() | pandas.DataFrame |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2019 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
try:
import talib
except:
print('PLEASE install TALIB to call these methods')
import pandas as pd
def CMO(Series, timeperiod=14):
res = talib.CMO(Series.values, timeperiod)
return pd.Series(res, index=Series.index)
def BBANDS(Series, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0):
up, middle, low = talib.BBANDS(
Series.values, timeperiod, nbdevup, nbdevdn, matype)
return pd.Series(up, index=Series.index), pd.Series(middle, index=Series.index), pd.Series(low, index=Series.index)
def BETA(SeriesA, SeriesB, timeperiod=5):
res = talib.BETA(SeriesA.values, SeriesB.values, timeperiod)
return pd.Series(res, index=SeriesA.index)
def CORREL(SeriesA, SeriesB, timeperiod=5):
res = talib.BETA(SeriesA.values, SeriesB.values, timeperiod)
return pd.Series(res, index=SeriesA.index)
def DEMA(Series, timeperiod=30):
res = talib.DEMA(Series.values, timeperiod)
return pd.Series(res, index=Series.index)
def EMA(Series, timeperiod=30):
res = talib.EMA(Series.values, timeperiod)
return pd.Series(res, index=Series.index)
def HT_DCPERIOD(Series):
res = talib.HT_DCPERIOD(Series.values)
return pd.Series(res, index=Series.index)
def HT_DCPHASE(Series):
res = talib.HT_DCPHASE(Series.values)
return pd.Series(res, index=Series.index)
def HT_PHASOR(Series):
res = talib.HT_PHASOR(Series.values)
return pd.Series(res, index=Series.index)
def HT_SINE(Series):
res = talib.HT_SINE(Series.values)
return pd.Series(res, index=Series.index)
def HT_TRENDLINE(Series):
res = talib.HT_TRENDLINE(Series.values)
return pd.Series(res, index=Series.index)
def HT_TRENDMODE(Series):
res = talib.HT_TRENDMODE(Series.values)
return pd.Series(res, index=Series.index)
def KAMA(Series, timeperiod=30):
res = talib.KAMA(Series.values, timeperiod)
return pd.Series(res, index=Series.index)
def LINEARREG(Series, timeperiod=14):
res = talib.LINEARREG(Series.values, timeperiod)
return pd.Series(res, index=Series.index)
def LINEARREG_ANGLE(Series, timeperiod=14):
res = talib.LINEARREG_ANGLE(Series.values, timeperiod)
return pd.Series(res, index=Series.index)
def LINEARREG_INTERCEPT(Series, timeperiod=14):
res = talib.LINEARREG_INTERCEPT(Series.values, timeperiod)
return pd.Series(res, index=Series.index)
def LINEARREG_SLOPE(Series, timeperiod=14):
res = talib.LINEARREG_SLOPE(Series.values, timeperiod)
return pd.Series(res, index=Series.index)
# def MA(Series,):
# 废弃* 因为和QA的MA函数冲突
# def MACD(Series):
# 废弃* 因为和QA的MACD函数冲突
def MACDEXT(Series, fastperiod=12, fastmatype=0, slowperiod=26, slowmatype=0, signalperiod=9, signalmatype=0):
macd, macdsignal, macdhist = talib.MACDEXT(
Series.values, fastperiod, fastmatype, slowperiod, slowmatype, signalperiod, signalmatype)
return pd.Series(macd, index=Series.index), pd.Series(macdsignal, index=Series.index), pd.Series(macdhist, index=Series.index)
def MACDFIX(Series, timeperiod=9):
macd, macdsignal, macdhist = talib.MACDFIX(Series.values, timeperiod)
return | pd.Series(macd, index=Series.index) | pandas.Series |
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
import datetime as dt
import re
import cupy as cp
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from pandas.util.testing import (
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
import cudf
from cudf.core import DataFrame, Series
from cudf.core.index import DatetimeIndex
from cudf.tests.utils import NUMERIC_TYPES, assert_eq
def data1():
return pd.date_range("20010101", "20020215", freq="400h", name="times")
def data2():
return pd.date_range("20010101", "20020215", freq="400h", name="times")
def timeseries_us_data():
return pd.date_range(
"2019-07-16 00:00:00",
"2019-07-16 00:00:01",
freq="5555us",
name="times",
)
def timestamp_ms_data():
return pd.Series(
[
"2019-07-16 00:00:00.333",
"2019-07-16 00:00:00.666",
"2019-07-16 00:00:00.888",
]
)
def timestamp_us_data():
return pd.Series(
[
"2019-07-16 00:00:00.333333",
"2019-07-16 00:00:00.666666",
"2019-07-16 00:00:00.888888",
]
)
def timestamp_ns_data():
return pd.Series(
[
"2019-07-16 00:00:00.333333333",
"2019-07-16 00:00:00.666666666",
"2019-07-16 00:00:00.888888888",
]
)
def numerical_data():
return np.arange(1, 10)
fields = ["year", "month", "day", "hour", "minute", "second", "weekday"]
@pytest.mark.parametrize("data", [data1(), data2()])
def test_series(data):
pd_data = pd.Series(data.copy())
gdf_data = Series(pd_data)
assert_eq(pd_data, gdf_data)
@pytest.mark.parametrize(
"lhs_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
@pytest.mark.parametrize(
"rhs_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_datetime_series_binops_pandas(lhs_dtype, rhs_dtype):
pd_data_1 = pd.Series(
pd.date_range("20010101", "20020215", freq="400h", name="times")
)
pd_data_2 = pd.Series(
pd.date_range("20010101", "20020215", freq="401h", name="times")
)
gdf_data_1 = Series(pd_data_1).astype(lhs_dtype)
gdf_data_2 = Series(pd_data_2).astype(rhs_dtype)
assert_eq(pd_data_1, gdf_data_1.astype("datetime64[ns]"))
assert_eq(pd_data_2, gdf_data_2.astype("datetime64[ns]"))
assert_eq(pd_data_1 < pd_data_2, gdf_data_1 < gdf_data_2)
assert_eq(pd_data_1 > pd_data_2, gdf_data_1 > gdf_data_2)
assert_eq(pd_data_1 == pd_data_2, gdf_data_1 == gdf_data_2)
assert_eq(pd_data_1 <= pd_data_2, gdf_data_1 <= gdf_data_2)
assert_eq(pd_data_1 >= pd_data_2, gdf_data_1 >= gdf_data_2)
@pytest.mark.parametrize(
"lhs_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
@pytest.mark.parametrize(
"rhs_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_datetime_series_binops_numpy(lhs_dtype, rhs_dtype):
pd_data_1 = pd.Series(
pd.date_range("20010101", "20020215", freq="400h", name="times")
)
pd_data_2 = pd.Series(
pd.date_range("20010101", "20020215", freq="401h", name="times")
)
gdf_data_1 = Series(pd_data_1).astype(lhs_dtype)
gdf_data_2 = Series(pd_data_2).astype(rhs_dtype)
np_data_1 = np.array(pd_data_1).astype(lhs_dtype)
np_data_2 = np.array(pd_data_2).astype(rhs_dtype)
np.testing.assert_equal(np_data_1, gdf_data_1.to_array())
np.testing.assert_equal(np_data_2, gdf_data_2.to_array())
np.testing.assert_equal(
np.less(np_data_1, np_data_2), (gdf_data_1 < gdf_data_2).to_array()
)
np.testing.assert_equal(
np.greater(np_data_1, np_data_2), (gdf_data_1 > gdf_data_2).to_array()
)
np.testing.assert_equal(
np.equal(np_data_1, np_data_2), (gdf_data_1 == gdf_data_2).to_array()
)
np.testing.assert_equal(
np.less_equal(np_data_1, np_data_2),
(gdf_data_1 <= gdf_data_2).to_array(),
)
np.testing.assert_equal(
np.greater_equal(np_data_1, np_data_2),
(gdf_data_1 >= gdf_data_2).to_array(),
)
@pytest.mark.parametrize("data", [data1(), data2()])
def test_dt_ops(data):
pd_data = pd.Series(data.copy())
gdf_data = Series(data.copy())
assert_eq(pd_data == pd_data, gdf_data == gdf_data)
assert_eq(pd_data < pd_data, gdf_data < gdf_data)
assert_eq(pd_data > pd_data, gdf_data > gdf_data)
# libgdf doesn't respect timezones
@pytest.mark.parametrize("data", [data1()])
@pytest.mark.parametrize("field", fields)
def test_dt_series(data, field):
pd_data = pd.Series(data.copy())
gdf_data = Series(pd_data)
base = getattr(pd_data.dt, field)
test = getattr(gdf_data.dt, field).to_pandas().astype("int64")
assert_series_equal(base, test)
@pytest.mark.parametrize("data", [data1()])
@pytest.mark.parametrize("field", fields)
def test_dt_index(data, field):
pd_data = data.copy()
gdf_data = DatetimeIndex(pd_data)
assert_index_equal(
getattr(gdf_data, field).to_pandas(), getattr(pd_data, field)
)
def test_setitem_datetime():
df = DataFrame()
df["date"] = pd.date_range("20010101", "20010105").values
assert np.issubdtype(df.date.dtype, np.datetime64)
def test_sort_datetime():
df = pd.DataFrame()
df["date"] = np.array(
[
np.datetime64("2016-11-20"),
np.datetime64("2020-11-20"),
np.datetime64("2019-11-20"),
np.datetime64("1918-11-20"),
np.datetime64("2118-11-20"),
]
)
df["vals"] = np.random.sample(len(df["date"]))
gdf = cudf.from_pandas(df)
s_df = df.sort_values(by="date")
s_gdf = gdf.sort_values(by="date")
assert_eq(s_df, s_gdf)
def test_issue_165():
df_pandas = pd.DataFrame()
start_date = dt.datetime.strptime("2000-10-21", "%Y-%m-%d")
data = [(start_date + dt.timedelta(days=x)) for x in range(6)]
df_pandas["dates"] = data
df_pandas["num"] = [1, 2, 3, 4, 5, 6]
df_cudf = DataFrame.from_pandas(df_pandas)
base = df_pandas.query("dates==@start_date")
test = df_cudf.query("dates==@start_date")
assert_frame_equal(base, test.to_pandas())
assert len(test) > 0
mask = df_cudf.dates == start_date
base_mask = df_pandas.dates == start_date
assert_series_equal(mask.to_pandas(), base_mask, check_names=False)
assert mask.to_pandas().sum() > 0
start_date_ts = pd.Timestamp(start_date)
test = df_cudf.query("dates==@start_date_ts")
base = df_pandas.query("dates==@start_date_ts")
assert_frame_equal(base, test.to_pandas())
assert len(test) > 0
mask = df_cudf.dates == start_date_ts
base_mask = df_pandas.dates == start_date_ts
assert_series_equal(mask.to_pandas(), base_mask, check_names=False)
assert mask.to_pandas().sum() > 0
start_date_np = np.datetime64(start_date_ts, "ns")
test = df_cudf.query("dates==@start_date_np")
base = df_pandas.query("dates==@start_date_np")
assert_frame_equal(base, test.to_pandas())
assert len(test) > 0
mask = df_cudf.dates == start_date_np
base_mask = df_pandas.dates == start_date_np
assert_series_equal(mask.to_pandas(), base_mask, check_names=False)
assert mask.to_pandas().sum() > 0
@pytest.mark.parametrize("data", [data1(), data2()])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
def test_typecast_from_datetime(data, dtype):
pd_data = pd.Series(data.copy())
np_data = np.array(pd_data)
gdf_data = Series(pd_data)
np_casted = np_data.astype(dtype)
gdf_casted = gdf_data.astype(dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [data1(), data2()])
@pytest.mark.parametrize(
"dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_from_datetime_to_int64_to_datetime(data, dtype):
pd_data = pd.Series(data.copy())
np_data = np.array(pd_data)
gdf_data = Series(pd_data)
np_casted = np_data.astype(np.int64).astype(dtype)
gdf_casted = gdf_data.astype(np.int64).astype(dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [timeseries_us_data()])
@pytest.mark.parametrize(
"dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_to_different_datetime_resolutions(data, dtype):
pd_data = pd.Series(data.copy())
np_data = np.array(pd_data).astype(dtype)
gdf_series = Series(pd_data).astype(dtype)
np.testing.assert_equal(np_data, gdf_series.to_array())
@pytest.mark.parametrize(
"data", [timestamp_ms_data(), timestamp_us_data(), timestamp_ns_data()]
)
@pytest.mark.parametrize(
"dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_string_timstamp_typecast_to_different_datetime_resolutions(
data, dtype
):
pd_sr = data
gdf_sr = cudf.Series.from_pandas(pd_sr)
expect = pd_sr.values.astype(dtype)
got = gdf_sr.astype(dtype).values_host
np.testing.assert_equal(expect, got)
@pytest.mark.parametrize("data", [numerical_data()])
@pytest.mark.parametrize("from_dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"to_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_to_datetime(data, from_dtype, to_dtype):
np_data = data.astype(from_dtype)
gdf_data = Series(np_data)
np_casted = np_data.astype(to_dtype)
gdf_casted = gdf_data.astype(to_dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [numerical_data()])
@pytest.mark.parametrize("from_dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"to_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_to_from_datetime(data, from_dtype, to_dtype):
np_data = data.astype(from_dtype)
gdf_data = Series(np_data)
np_casted = np_data.astype(to_dtype).astype(from_dtype)
gdf_casted = gdf_data.astype(to_dtype).astype(from_dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [numerical_data()])
@pytest.mark.parametrize(
"from_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
@pytest.mark.parametrize(
"to_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_from_datetime_to_datetime(data, from_dtype, to_dtype):
np_data = data.astype(from_dtype)
gdf_col = Series(np_data)._column
np_casted = np_data.astype(to_dtype)
gdf_casted = gdf_col.astype(to_dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [numerical_data()])
@pytest.mark.parametrize("nulls", ["some", "all"])
def test_to_from_pandas_nulls(data, nulls):
pd_data = pd.Series(data.copy().astype("datetime64[ns]"))
if nulls == "some":
# Fill half the values with NaT
pd_data[list(range(0, len(pd_data), 2))] = np.datetime64("nat", "ns")
elif nulls == "all":
# Fill all the values with NaT
pd_data[:] = np.datetime64("nat", "ns")
gdf_data = Series.from_pandas(pd_data)
expect = pd_data
got = gdf_data.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize(
"dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_datetime_to_arrow(dtype):
timestamp = (
cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={}
)
.reset_index()["timestamp"]
.reset_index(drop=True)
)
gdf = DataFrame({"timestamp": timestamp.astype(dtype)})
assert_eq(gdf, DataFrame.from_arrow(gdf.to_arrow(preserve_index=False)))
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(pd.date_range("2010-01-01", "2010-02-01")),
pd.Series([None, None], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize(
"nulls", ["none", pytest.param("some", marks=pytest.mark.xfail)]
)
def test_datetime_unique(data, nulls):
psr = pd.Series(data)
print(data)
print(nulls)
if len(data) > 0:
if nulls == "some":
p = np.random.randint(0, len(data), 2)
psr[p] = None
gsr = cudf.from_pandas(psr)
expected = psr.unique()
got = gsr.unique()
assert_eq(pd.Series(expected), got.to_pandas())
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(pd.date_range("2010-01-01", "2010-02-01")),
pd.Series([None, None], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("nulls", ["none", "some"])
def test_datetime_nunique(data, nulls):
psr = pd.Series(data)
if len(data) > 0:
if nulls == "some":
p = np.random.randint(0, len(data), 2)
psr[p] = None
gsr = cudf.from_pandas(psr)
expected = psr.nunique()
got = gsr.nunique()
assert_eq(got, expected)
testdata = [
(
Series(
["2018-01-01", None, "2019-01-31", None, "2018-01-01"],
dtype="datetime64[ms]",
),
True,
),
(
Series(
[
"2018-01-01",
"2018-01-02",
"2019-01-31",
"2018-03-01",
"2018-01-01",
],
dtype="datetime64[ms]",
),
False,
),
(
Series(
np.array(
["2018-01-01", None, "2019-12-30"], dtype="datetime64[ms]"
)
),
True,
),
]
@pytest.mark.parametrize("data, expected", testdata)
def test_datetime_has_null_test(data, expected):
pd_data = data.to_pandas()
count = pd_data.notna().value_counts()
expected_count = 0
if False in count.keys():
expected_count = count[False]
assert_eq(expected, data.has_nulls)
assert_eq(expected_count, data.null_count)
def test_datetime_has_null_test_pyarrow():
data = Series(
pa.array(
[0, np.iinfo("int64").min, np.iinfo("int64").max, None],
type=pa.timestamp("ns"),
)
)
expected = True
expected_count = 1
assert_eq(expected, data.has_nulls)
assert_eq(expected_count, data.null_count)
def test_datetime_dataframe():
data = {
"timearray": np.array(
[0, 1, None, 2, 20, None, 897], dtype="datetime64[ms]"
)
}
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame(data)
assert_eq(pdf, gdf)
assert_eq(pdf.dropna(), gdf.dropna())
assert_eq(pdf.isnull(), gdf.isnull())
data = np.array([0, 1, None, 2, 20, None, 897], dtype="datetime64[ms]")
gs = cudf.Series(data)
ps = pd.Series(data)
assert_eq(ps, gs)
assert_eq(ps.dropna(), gs.dropna())
assert_eq(ps.isnull(), gs.isnull())
@pytest.mark.parametrize(
"data",
[
None,
[],
pd.Series([]),
pd.Index([]),
pd.Series([1, 2, 3]),
pd.Series([0, 1, -1]),
pd.Series([0, 1, -1, 100.3, 200, 47637289]),
pd.Series(["2012-10-11", "2010-01-01", "2016-07-07", "2014-02-02"]),
[1, 2, 3, 100, -123, -1, 0, 1000000000000679367],
pd.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}),
pd.DataFrame(
{"year": ["2015", "2016"], "month": ["2", "3"], "day": [4, 5]}
),
pd.DataFrame(
{
"year": [2015, 2016],
"month": [2, 3],
"day": [4, 5],
"minute": [1, 100],
"second": [90, 10],
"hour": [1, 0.5],
},
index=["a", "b"],
),
pd.DataFrame(
{
"year": [],
"month": [],
"day": [],
"minute": [],
"second": [],
"hour": [],
},
),
["2012-10-11", "2010-01-01", "2016-07-07", "2014-02-02"],
pd.Index([1, 2, 3, 4]),
pd.DatetimeIndex(
["1970-01-01 00:00:00.000000001", "1970-01-01 00:00:00.000000002"],
dtype="datetime64[ns]",
freq=None,
),
pd.DatetimeIndex([], dtype="datetime64[ns]", freq=None,),
pd.Series([1, 2, 3]).astype("datetime64[ns]"),
pd.Series([1, 2, 3]).astype("datetime64[us]"),
pd.Series([1, 2, 3]).astype("datetime64[ms]"),
pd.Series([1, 2, 3]).astype("datetime64[s]"),
pd.Series([1, 2, 3]).astype("datetime64[D]"),
1,
100,
17,
53.638435454,
np.array([1, 10, 15, 478925, 2327623467]),
np.array([0.3474673, -10, 15, 478925.34345, 2327623467]),
],
)
@pytest.mark.parametrize("dayfirst", [True, False])
@pytest.mark.parametrize("infer_datetime_format", [True, False])
def test_cudf_to_datetime(data, dayfirst, infer_datetime_format):
pd_data = data
if isinstance(pd_data, (pd.Series, pd.DataFrame, pd.Index)):
gd_data = cudf.from_pandas(pd_data)
else:
if type(pd_data).__module__ == np.__name__:
gd_data = cp.array(pd_data)
else:
gd_data = pd_data
expected = pd.to_datetime(
pd_data, dayfirst=dayfirst, infer_datetime_format=infer_datetime_format
)
actual = cudf.to_datetime(
gd_data, dayfirst=dayfirst, infer_datetime_format=infer_datetime_format
)
assert_eq(actual, expected)
@pytest.mark.parametrize(
"data",
[
"2",
["1", "2", "3"],
["1/1/1", "2/2/2", "1"],
pd.DataFrame(
{
"year": [2015, 2016],
"month": [2, 3],
"day": [4, 5],
"minute": [1, 100],
"second": [90, 10],
"hour": [1, 0],
"blablacol": [1, 1],
}
),
pd.DataFrame(
{
"month": [2, 3],
"day": [4, 5],
"minute": [1, 100],
"second": [90, 10],
"hour": [1, 0],
}
),
],
)
def test_to_datetime_errors(data):
pd_data = data
if isinstance(pd_data, (pd.Series, pd.DataFrame, pd.Index)):
gd_data = cudf.from_pandas(pd_data)
else:
gd_data = pd_data
try:
pd.to_datetime(pd_data)
except Exception as e:
with pytest.raises(type(e), match=re.escape(str(e))):
cudf.to_datetime(gd_data)
else:
raise AssertionError("Was expecting `pd.to_datetime` to fail")
def test_to_datetime_not_implemented():
with pytest.raises(NotImplementedError):
cudf.to_datetime([], exact=False)
with pytest.raises(NotImplementedError):
cudf.to_datetime([], origin="julian")
with pytest.raises(NotImplementedError):
cudf.to_datetime([], yearfirst=True)
@pytest.mark.parametrize(
"data",
[
1,
[],
pd.Series([]),
pd.Index([]),
pd.Series([1, 2, 3]),
pd.Series([1, 2.4, 3]),
pd.Series([0, 1, -1]),
| pd.Series([0, 1, -1, 100, 200, 47637]) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2019, <NAME> <<EMAIL>>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, _distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import division
from deap import algorithms
from deap import tools
from deap.tools import emo
from deap.tools.emo import sortNondominated
from sklearn.cluster import KMeans
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neighbors.nearest_centroid import NearestCentroid
from sklearn.model_selection import train_test_split
from mpl_toolkits import mplot3d
from matplotlib.pyplot import figure
from matplotlib.ticker import PercentFormatter
import matplotlib.ticker as mtick
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import time
import sys
import os
import random
import random
import pdb
def _emo_sortNondominated_idx(pop, first_front_only=False):
fronts = emo.sortNondominated(
pop, len(pop), first_front_only=first_front_only)
return [[pop.index(i) for i in f] for f in fronts]
def random_pop(model, N):
pop = list()
for _ in range(N):
pop.append(
model.Individual([random.random() for _ in range(model.decsNum)]))
return pop
def action_expr(model):
startat = time.time()
samples = random_pop(model, 100)
for p in samples:
model.eval(p, normalized=False)
print("100 init pop evaluated.")
for round_ in range(10):
samples.extend(random_pop(model, 20))
for p in samples[-20:]:
model.eval(p, normalized=False)
D = pd.DataFrame(data=samples, columns=model.decs)
O = pd.DataFrame(data=list(map(lambda i: i.fitness.values, samples)))
front_idx = _emo_sortNondominated_idx(
samples, first_front_only=True)[0]
next_pop = list()
for fi in front_idx:
dist_order = (D - D.loc[fi]).abs().pow(2).sum(
axis=1).sort_values().index[1:int(len(samples) * 0.1) +
1] # fetch the top 10% of samples
dD, dO = list(), list()
for i in dist_order:
for j in dist_order:
if i == j: continue
dD.append(D.iloc[i] - D.iloc[j])
dO.append(O.iloc[i] - O.iloc[j])
dD = pd.DataFrame(dD, index=range(len(dD)))
dO = pd.DataFrame(dO, index=range(len(dO)))
assert not (dO.std() < 0).any()
regr = list()
for oi, obj in enumerate(dO.columns):
regr_tmp = KNeighborsRegressor(n_neighbors=4).fit(dD, dO[obj])
regr.append(regr_tmp)
mut_dD = list()
for _ in range(D.shape[1] * 2):
mut_dD.append(D.loc[fi] * np.random.normal(0, 0.5, D.shape[1]))
mut_dD = pd.DataFrame(mut_dD, index=range(len(mut_dD)))
mut_dO = pd.DataFrame(columns=dO.columns)
for oi, obj in enumerate(mut_dO.columns):
mut_dO[obj] = regr[oi].predict(mut_dD)
filtered = (mut_dO < -1 * mut_dO.std()).any(axis=1)
new_decs = D.loc[fi] + mut_dD[filtered]
print('new eval = ', str(new_decs.shape[0]))
for nd in new_decs.index:
candidate = model.Individual(new_decs.loc[nd])
model.eval(candidate, normalized=False)
next_pop.append(candidate)
samples.extend(emo.sortNondominated(next_pop, len(next_pop), True)[0])
print(f'Round {round_} done. Sample size = {len(samples)}')
return emo.sortNondominated(
samples, len(samples), first_front_only=True)[0]
def action_expr2(model):
startat = time.time()
samples = random_pop(model, 100)
for p in samples:
model.eval(p, normalized=False)
print("100 init pop evaluated.")
for round_ in range(10):
samples.extend(random_pop(model, 20))
for p in samples[-20:]:
model.eval(p, normalized=False)
D = | pd.DataFrame(data=samples, columns=model.decs) | pandas.DataFrame |
# coding: UTF-8
import numpy as np
from numpy import nan as npNaN
import pandas as pd
from pandas import Series
import talib
from src import verify_series
def first(l=[]):
return l[0]
def last(l=[]):
return l[-1]
def highest(source, period):
return pd.Series(source).rolling(period).max().values
def lowest(source, period):
return pd.Series(source).rolling(period).min().values
def med_price(high, low):
"""
also found in tradingview as hl2 source
"""
return talib.MEDPRICE(high, low)
def avg_price(open, high, low, close):
"""
also found in tradingview as ohlc4 source
"""
return talib.AVGPRICE(open, high, low, close)
def typ_price(high,low,close):
"""
typical price, also found in tradingview as hlc3 source
"""
return talib.TYPPRICE(high, low, close)
def MAX(close, period):
return talib.MAX(close, period)
def highestbars(source, length):
"""
Highest value offset for a given number of bars back.
Returns offset to the highest bar.
"""
source = source[-length:]
offset = abs(length - 1 - np.argmax(source))
return offset
def lowestbars(source, length):
"""
Lowest value offset for a given number of bars back.
Returns offset to the lowest bar.
"""
source = source[-length:]
offset = abs(length - 1 - np.argmin(source))
return offset
def tr(high, low, close):
"""
true range
"""
return talib.TRANGE(high, low, close)
def atr(high, low, close, period):
"""
average true range
"""
return talib.ATR(high, low, close, period)
def stdev(source, period):
return pd.Series(source).rolling(period).std().values
def stddev(source, period, nbdev=1):
"""
talib stdev
"""
return talib.STDDEV(source, timeperiod=period, nbdev=nbdev)
def sma(source, period):
return pd.Series(source).rolling(period).mean().values
def ema(source, period):
return talib.EMA(np.array(source), period)
def double_ema(src, length):
ema_val = ema(src, length)
return 2 * ema_val - ema(ema_val, length)
def triple_ema(src, length):
ema_val = ema(src, length)
return 3 * (ema_val - ema(ema_val, length)) + ema(ema(ema_val, length), length)
def wma(src, length):
return talib.WMA(src, length)
def ssma(src, length):
return pd.Series(src).ewm(alpha=1.0 / length).mean().values.flatten()
def hull(src, length):
return wma(2 * wma(src, length / 2) - wma(src, length), round(np.sqrt(length)))
def bbands(source, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0):
return talib.BBANDS(source, timeperiod, nbdevup, nbdevdn, matype)
def macd(close, fastperiod=12, slowperiod=26, signalperiod=9):
return talib.MACD(close, fastperiod, slowperiod, signalperiod)
def adx(high, low, close, period=14):
return talib.ADX(high, low, close, period)
def di_plus(high, low, close, period=14):
return talib.PLUS_DI(high, low, close, period)
def di_minus(high, low, close, period=14):
return talib.MINUS_DI(high, low, close, period)
def rsi(close, period=14):
return talib.RSI(close, period)
def rsx(source, length=None, drift=None, offset=None):
"""
Indicator: Relative Strength Xtra (inspired by <NAME>)
"""
# Validate arguments
length = int(length) if length and length > 0 else 14
source = pd.Series(source)
source = verify_series(source, length)
#drift = get_drift(drift)
#offset = get_offset(offset)
if source is None: return
# variables
vC, v1C = 0, 0
v4, v8, v10, v14, v18, v20 = 0, 0, 0, 0, 0, 0
f0, f8, f10, f18, f20, f28, f30, f38 = 0, 0, 0, 0, 0, 0, 0, 0
f40, f48, f50, f58, f60, f68, f70, f78 = 0, 0, 0, 0, 0, 0, 0, 0
f80, f88, f90 = 0, 0, 0
# Calculate Result
m = source.size
result = [npNaN for _ in range(0, length - 1)] + [0]
for i in range(length, m):
if f90 == 0:
f90 = 1.0
f0 = 0.0
if length - 1.0 >= 5:
f88 = length - 1.0
else:
f88 = 5.0
f8 = 100.0 * source.iloc[i]
f18 = 3.0 / (length + 2.0)
f20 = 1.0 - f18
else:
if f88 <= f90:
f90 = f88 + 1
else:
f90 = f90 + 1
f10 = f8
f8 = 100 * source.iloc[i]
v8 = f8 - f10
f28 = f20 * f28 + f18 * v8
f30 = f18 * f28 + f20 * f30
vC = 1.5 * f28 - 0.5 * f30
f38 = f20 * f38 + f18 * vC
f40 = f18 * f38 + f20 * f40
v10 = 1.5 * f38 - 0.5 * f40
f48 = f20 * f48 + f18 * v10
f50 = f18 * f48 + f20 * f50
v14 = 1.5 * f48 - 0.5 * f50
f58 = f20 * f58 + f18 * abs(v8)
f60 = f18 * f58 + f20 * f60
v18 = 1.5 * f58 - 0.5 * f60
f68 = f20 * f68 + f18 * v18
f70 = f18 * f68 + f20 * f70
v1C = 1.5 * f68 - 0.5 * f70
f78 = f20 * f78 + f18 * v1C
f80 = f18 * f78 + f20 * f80
v20 = 1.5 * f78 - 0.5 * f80
if f88 >= f90 and f8 != f10:
f0 = 1.0
if f88 == f90 and f0 == 0.0:
f90 = 0.0
if f88 < f90 and v20 > 0.0000000001:
v4 = (v14 / v20 + 1.0) * 50.0
if v4 > 100.0:
v4 = 100.0
if v4 < 0.0:
v4 = 0.0
else:
v4 = 50.0
result.append(v4)
rsx = Series(result, index=source.index)
# Offset
if offset != 0 and offset != None:
rsx = rsx.shift(offset)
return rsx
def cci(high, low, close, period):
return talib.CCI(high,low, close, period)
def sar(high, low, acceleration=0, maximum=0):
return talib.SAR(high, low, acceleration, maximum)
def crossover(a, b):
return a[-2] < b[-2] and a[-1] > b[-1]
def crossunder(a, b):
return a[-2] > b[-2] and a[-1] < b[-1]
def ord(seq, sort_seq, idx, itv):
p = seq[idx]
for i in range(0, itv):
if p >= sort_seq[i]:
return i + 1
def d(src, itv):
sort_src = np.sort(src)[::-1]
sum = 0.0
for i in range(0, itv):
sum += pow((i + 1) - ord(src, sort_src, i, itv), 2)
return sum
def rci(src, itv):
reversed_src = src[::-1]
ret = [(1.0 - 6.0 * d(reversed_src[i:i + itv], itv) / (itv * (itv * itv - 1.0))) * 100.0
for i in range(2)]
return ret[::-1]
def vix(close, low, pd=23, bbl=23, mult=1.9, lb=88, ph=0.85, pl=1.01):
hst = highest(close, pd)
wvf = (hst - low) / hst * 100
s_dev = mult * stdev(wvf, bbl)
mid_line = sma(wvf, bbl)
lower_band = mid_line - s_dev
upper_band = mid_line + s_dev
range_high = (highest(wvf, lb)) * ph
range_low = (lowest(wvf, lb)) * pl
green_hist = [wvf[-i] >= upper_band[-i] or wvf[-i] >= range_high[-i] for i in range(8)][::-1]
red_hist = [wvf[-i] <= lower_band[-i] or wvf[-i] <= range_low[-i] for i in range(8)][::-1]
return green_hist, red_hist
def supertrend(high, low, close, length=None, multiplier=None, offset=None):
"""
Indicator: Supertrend
"""
# Validate Arguments
length = int(length) if length and length > 0 else 7
multiplier = float(multiplier) if multiplier and multiplier > 0 else 3.0
high = pd.Series(high)
low = pd.Series(low)
close = pd.Series(close)
high = verify_series(high, length)
low = verify_series(low, length)
close = verify_series(close, length)
#offset = get_offset(offset)
if high is None or low is None or close is None: return
# Calculate Results
m = close.size
dir_, trend = [1] * m, [0] * m
long, short = [npNaN] * m, [npNaN] * m
hl2_ = med_price(high, low)
matr = multiplier * atr(high, low, close, length)
upperband = hl2_ + matr
lowerband = hl2_ - matr
for i in range(1, m):
if close.iloc[i] > upperband.iloc[i - 1]:
dir_[i] = 1
elif close.iloc[i] < lowerband.iloc[i - 1]:
dir_[i] = -1
else:
dir_[i] = dir_[i - 1]
if dir_[i] > 0 and lowerband.iloc[i] < lowerband.iloc[i - 1]:
lowerband.iloc[i] = lowerband.iloc[i - 1]
if dir_[i] < 0 and upperband.iloc[i] > upperband.iloc[i - 1]:
upperband.iloc[i] = upperband.iloc[i - 1]
if dir_[i] > 0:
trend[i] = long[i] = lowerband.iloc[i]
else:
trend[i] = short[i] = upperband.iloc[i]
# Prepare DataFrame to return
_props = f"_{length}_{multiplier}"
df = pd.DataFrame({
f"SUPERT": trend,
f"SUPERTd": dir_,
f"SUPERTl": long,
f"SUPERTs": short,
}, index=close.index)
df.name = f"SUPERT{_props}"
df.category = "overlap"
# Apply offset if needed
if offset != 0 and offset != None:
df = df.shift(offset)
return df
def tv_supertrend(high, low, close, length=14, multiplier=3):
high = pd.Series(high)
low = pd.Series(low)
close = pd.Series(close)
# calculate ATR
price_diffs = [high - low,
high - close.shift(),
low - close.shift()]
true_range = pd.concat(price_diffs, axis=1)
true_range = true_range.abs().max(axis=1)
true_range[0] = (high[0] + low[0])/2
# default ATR calculation in supertrend indicator
atr = true_range.ewm(alpha=1/length,min_periods=length,ignore_na=True,adjust=False).mean()
# atr = sma(true_range, length)
atr.fillna(0, inplace=True)
# HL2 is simply the average of high and low prices
hl2 = (high + low) / 2
# upperband and lowerband calculation
# notice that final bands are set to be equal to the respective bands
upperband = hl2 + (multiplier * atr)
lowerband = hl2 - (multiplier * atr)
# initialize Supertrend column to 1
dir = [np.NaN] * close.size
trend = [np.NaN] * close.size
for i in range(1, len(close)):
curr, prev = i, i-1
#lowerBand := lowerBand > prevLowerBand or close[1] < prevLowerBand ? lowerBand : prevLowerBand
lowerband[curr] = lowerband[curr] if \
lowerband[curr] > lowerband[prev] or close[prev] < lowerband[prev] \
else lowerband[prev]
#upperBand := upperBand < prevUpperBand or close[1] > prevUpperBand ? upperBand : prevUpperBand
upperband[curr] = upperband[curr] if \
upperband[curr] < upperband[prev] or close[prev] > upperband[prev] \
else upperband[prev]
if np.isnan(atr[prev]):
dir[curr] = -1
elif trend[prev] == upperband[prev]:
dir[curr] = 1 if close[curr] > upperband[curr] else -1
else:
dir[curr] = -1 if close[curr] < lowerband[curr] else 1
trend[curr] = lowerband[curr] if dir[curr] == 1 else upperband[curr]
return pd.DataFrame({
f"SUPERT": trend,
f"SUPERTd": dir,
f"SUPERTl": lowerband,
f"SUPERTs": upperband,
}, index=close.index)
def donchian(high, low, lower_length=None, upper_length=None, offset=None, **kwargs):
"""
Indicator: Donchian Channels (DC)
"""
# Validate arguments
high = | pd.Series(high) | pandas.Series |
import json
from itertools import product
from unittest.mock import ANY, MagicMock, patch
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from evalml.exceptions import PipelineScoreError
from evalml.model_understanding.prediction_explanations.explainers import (
ExplainPredictionsStage,
abs_error,
cross_entropy,
explain_predictions,
explain_predictions_best_worst,
)
from evalml.pipelines import (
BinaryClassificationPipeline,
MulticlassClassificationPipeline,
RegressionPipeline,
TimeSeriesBinaryClassificationPipeline,
TimeSeriesRegressionPipeline,
)
from evalml.pipelines.components.utils import _all_estimators
from evalml.problem_types import ProblemTypes, is_binary, is_multiclass
def compare_two_tables(table_1, table_2):
assert len(table_1) == len(table_2)
for row, row_answer in zip(table_1, table_2):
assert row.strip().split() == row_answer.strip().split()
def test_error_metrics():
np.testing.assert_array_equal(
abs_error(pd.Series([1, 2, 3]), pd.Series([4, 1, 0])), np.array([3, 1, 3])
)
np.testing.assert_allclose(
cross_entropy(
pd.Series([1, 0]), pd.DataFrame({"a": [0.1, 0.2], "b": [0.9, 0.8]})
),
np.array([-np.log(0.9), -np.log(0.2)]),
)
input_features_and_y_true = [
(
[[1]],
pd.Series([1]),
"^Input features must be a dataframe with more than 10 rows!",
),
(
pd.DataFrame({"a": [1]}),
pd.Series([1]),
"^Input features must be a dataframe with more than 10 rows!",
),
(
pd.DataFrame({"a": range(15)}),
pd.Series(range(12)),
"^Parameters y_true and input_features must have the same number of data points.",
),
]
@pytest.mark.parametrize(
"input_features,y_true,error_message", input_features_and_y_true
)
def test_explain_predictions_best_worst_value_errors(
input_features, y_true, error_message
):
with pytest.raises(ValueError, match=error_message):
explain_predictions_best_worst(None, input_features, y_true)
def test_explain_predictions_raises_pipeline_score_error():
with pytest.raises(PipelineScoreError, match="Division by zero!"):
def raise_zero_division(input_features):
raise ZeroDivisionError("Division by zero!")
pipeline = MagicMock()
pipeline.problem_type = ProblemTypes.BINARY
pipeline.predict_proba.side_effect = raise_zero_division
explain_predictions_best_worst(
pipeline, pd.DataFrame({"a": range(15)}), pd.Series(range(15))
)
def test_explain_predictions_value_errors():
with pytest.raises(
ValueError, match="Parameter input_features must be a non-empty dataframe."
):
explain_predictions(MagicMock(), pd.DataFrame(), y=None, indices_to_explain=[0])
with pytest.raises(ValueError, match="Explained indices should be between"):
explain_predictions(
MagicMock(),
pd.DataFrame({"a": [0, 1, 2, 3, 4]}),
y=None,
indices_to_explain=[5],
)
with pytest.raises(ValueError, match="Explained indices should be between"):
explain_predictions(
MagicMock(),
pd.DataFrame({"a": [0, 1, 2, 3, 4]}),
y=None,
indices_to_explain=[1, 5],
)
with pytest.raises(ValueError, match="Explained indices should be between"):
explain_predictions(
MagicMock(),
pd.DataFrame({"a": [0, 1, 2, 3, 4]}),
y=None,
indices_to_explain=[-1],
)
@pytest.mark.parametrize("training_target", [None, pd.Series([1, 2, 3])])
@pytest.mark.parametrize("training_data", [None, pd.DataFrame({"a": [1, 2, 3]})])
@pytest.mark.parametrize(
"problem_type",
[
ProblemTypes.TIME_SERIES_BINARY,
ProblemTypes.TIME_SERIES_REGRESSION,
ProblemTypes.TIME_SERIES_MULTICLASS,
],
)
def test_time_series_training_target_and_training_data_are_not_None(
training_target, training_data, problem_type
):
mock_ts_pipeline = MagicMock(problem_type=problem_type)
if training_data is not None and training_target is not None:
pytest.xfail("No exception raised in this case")
with pytest.raises(
ValueError, match="training_target and training_data are not None"
):
explain_predictions(
mock_ts_pipeline,
pd.DataFrame({"a": [0, 1, 2, 3, 4]}),
y=pd.Series([1, 2, 3, 4, 5]),
indices_to_explain=[2],
training_data=training_data,
training_target=training_target,
)
with pytest.raises(
ValueError, match="training_target and training_data are not None"
):
explain_predictions_best_worst(
mock_ts_pipeline,
pd.DataFrame({"a": [0, 1, 2, 3, 4]}),
y_true=pd.Series([1, 2, 3, 4, 5]),
num_to_explain=1,
training_data=training_data,
training_target=training_target,
)
def test_output_format_checked():
input_features, y_true = pd.DataFrame(data=[range(15)]), pd.Series(range(15))
with pytest.raises(
ValueError,
match="Parameter output_format must be either text, dict, or dataframe. Received bar",
):
explain_predictions(
pipeline=MagicMock(),
input_features=input_features,
y=None,
indices_to_explain=0,
output_format="bar",
)
input_features, y_true = pd.DataFrame(data=range(15)), pd.Series(range(15))
with pytest.raises(
ValueError,
match="Parameter output_format must be either text, dict, or dataframe. Received foo",
):
explain_predictions_best_worst(
pipeline=MagicMock(),
input_features=input_features,
y_true=y_true,
output_format="foo",
)
regression_best_worst_answer = """Test Pipeline Name
Parameters go here
Best 1 of 1
Predicted Value: 1
Target Value: 2
Absolute Difference: 1.0
Index ID: {index_0}
table goes here
Worst 1 of 1
Predicted Value: 2
Target Value: 3
Absolute Difference: 4.0
Index ID: {index_1}
table goes here
"""
regression_best_worst_answer_dict = {
"explanations": [
{
"rank": {"prefix": "best", "index": 1},
"predicted_values": {
"probabilities": None,
"predicted_value": 1,
"target_value": 2,
"error_name": "Absolute Difference",
"error_value": 1.0,
},
"explanations": ["explanation_dictionary_goes_here"],
},
{
"rank": {"prefix": "worst", "index": 1},
"predicted_values": {
"probabilities": None,
"predicted_value": 2,
"target_value": 3,
"error_name": "Absolute Difference",
"error_value": 4.0,
},
"explanations": ["explanation_dictionary_goes_here"],
},
]
}
regression_best_worst_answer_df = pd.DataFrame(
{
"feature_names": [0, 0],
"feature_values": [0, 0],
"qualitative_explanation": [0, 0],
"quantitative_explanation": [0, 0],
"rank": [1, 1],
"predicted_value": [1, 2],
"target_value": [2, 3],
"error_name": ["Absolute Difference"] * 2,
"error_value": [1.0, 4.0],
"prefix": ["best", "worst"],
}
)
no_best_worst_answer = """Test Pipeline Name
Parameters go here
1 of 2
table goes here
2 of 2
table goes here
"""
no_best_worst_answer_dict = {
"explanations": [
{"explanations": ["explanation_dictionary_goes_here"]},
{"explanations": ["explanation_dictionary_goes_here"]},
]
}
no_best_worst_answer_df = pd.DataFrame(
{
"feature_names": [0, 0],
"feature_values": [0, 0],
"qualitative_explanation": [0, 0],
"quantitative_explanation": [0, 0],
"prediction_number": [0, 1],
}
)
binary_best_worst_answer = """Test Pipeline Name
Parameters go here
Best 1 of 1
Predicted Probabilities: [benign: 0.05, malignant: 0.95]
Predicted Value: malignant
Target Value: malignant
Cross Entropy: 0.2
Index ID: {index_0}
table goes here
Worst 1 of 1
Predicted Probabilities: [benign: 0.1, malignant: 0.9]
Predicted Value: malignant
Target Value: benign
Cross Entropy: 0.78
Index ID: {index_1}
table goes here
"""
binary_best_worst_answer_dict = {
"explanations": [
{
"rank": {"prefix": "best", "index": 1},
"predicted_values": {
"probabilities": {"benign": 0.05, "malignant": 0.95},
"predicted_value": "malignant",
"target_value": "malignant",
"error_name": "Cross Entropy",
"error_value": 0.2,
},
"explanations": ["explanation_dictionary_goes_here"],
},
{
"rank": {"prefix": "worst", "index": 1},
"predicted_values": {
"probabilities": {"benign": 0.1, "malignant": 0.9},
"predicted_value": "malignant",
"target_value": "benign",
"error_name": "Cross Entropy",
"error_value": 0.78,
},
"explanations": ["explanation_dictionary_goes_here"],
},
]
}
binary_best_worst_answer_df = pd.DataFrame(
{
"feature_names": [0, 0],
"feature_values": [0, 0],
"qualitative_explanation": [0, 0],
"quantitative_explanation": [0, 0],
"rank": [1, 1],
"prefix": ["best", "worst"],
"label_benign_probability": [0.05, 0.1],
"label_malignant_probability": [0.95, 0.9],
"predicted_value": ["malignant", "malignant"],
"target_value": ["malignant", "benign"],
"error_name": ["Cross Entropy"] * 2,
"error_value": [0.2, 0.78],
}
)
multiclass_table = """Class: setosa
table goes here
Class: versicolor
table goes here
Class: virginica
table goes here"""
multiclass_best_worst_answer = """Test Pipeline Name
Parameters go here
Best 1 of 1
Predicted Probabilities: [setosa: 0.8, versicolor: 0.1, virginica: 0.1]
Predicted Value: setosa
Target Value: setosa
Cross Entropy: 0.15
Index ID: {{index_0}}
{multiclass_table}
Worst 1 of 1
Predicted Probabilities: [setosa: 0.2, versicolor: 0.75, virginica: 0.05]
Predicted Value: versicolor
Target Value: versicolor
Cross Entropy: 0.34
Index ID: {{index_1}}
{multiclass_table}
""".format(
multiclass_table=multiclass_table
)
multiclass_best_worst_answer_dict = {
"explanations": [
{
"rank": {"prefix": "best", "index": 1},
"predicted_values": {
"probabilities": {"setosa": 0.8, "versicolor": 0.1, "virginica": 0.1},
"predicted_value": "setosa",
"target_value": "setosa",
"error_name": "Cross Entropy",
"error_value": 0.15,
},
"explanations": ["explanation_dictionary_goes_here"],
},
{
"rank": {"prefix": "worst", "index": 1},
"predicted_values": {
"probabilities": {"setosa": 0.2, "versicolor": 0.75, "virginica": 0.05},
"predicted_value": "versicolor",
"target_value": "versicolor",
"error_name": "Cross Entropy",
"error_value": 0.34,
},
"explanations": ["explanation_dictionary_goes_here"],
},
]
}
multiclass_best_worst_answer_df = pd.DataFrame(
{
"feature_names": [0, 0],
"feature_values": [0, 0],
"qualitative_explanation": [0, 0],
"quantitative_explanation": [0, 0],
"rank": [1, 1],
"prefix": ["best", "worst"],
"label_setosa_probability": [0.8, 0.2],
"label_versicolor_probability": [0.1, 0.75],
"label_virginica_probability": [0.1, 0.05],
"predicted_value": ["setosa", "versicolor"],
"target_value": ["setosa", "versicolor"],
"error_name": ["Cross Entropy"] * 2,
"error_value": [0.15, 0.34],
}
)
multiclass_no_best_worst_answer = """Test Pipeline Name
Parameters go here
1 of 2
{multiclass_table}
2 of 2
{multiclass_table}
""".format(
multiclass_table=multiclass_table
)
def _add_custom_index(answer, index_best, index_worst, output_format):
if output_format == "text":
answer = answer.format(index_0=index_best, index_1=index_worst)
elif output_format == "dataframe":
col_name = "prefix" if "prefix" in answer.columns else "rank"
n_repeats = answer[col_name].value_counts().tolist()[0]
answer["index_id"] = [index_best] * n_repeats + [index_worst] * n_repeats
else:
answer["explanations"][0]["predicted_values"]["index_id"] = index_best
answer["explanations"][1]["predicted_values"]["index_id"] = index_worst
return answer
def _prep_pipeline_mock(problem_type, input_features):
pipeline = MagicMock()
pipeline.parameters = "Parameters go here"
pipeline.problem_type = problem_type
pipeline.name = "Test Pipeline Name"
pipeline.compute_estimator_features.return_value = input_features
return pipeline
def _compare_reports(report, predicted_report, output_format):
if output_format == "text":
compare_two_tables(report.splitlines(), predicted_report.splitlines())
elif output_format == "dataframe":
assert sorted(report.columns.tolist()) == sorted(
predicted_report.columns.tolist()
)
pd.testing.assert_frame_equal(report, predicted_report[report.columns])
else:
assert report == predicted_report
output_formats = ["text", "dict", "dataframe"]
algorithms = ["shap", "lime"]
regression_problem_types = [
ProblemTypes.REGRESSION,
ProblemTypes.TIME_SERIES_REGRESSION,
]
regression_custom_indices = [[0, 1], [4, 10], ["foo", "bar"]]
@pytest.mark.parametrize(
"problem_type,output_format,custom_index,algorithm",
product(
regression_problem_types, output_formats, regression_custom_indices, algorithms
),
)
@patch("evalml.model_understanding.prediction_explanations.explainers.DEFAULT_METRICS")
@patch(
"evalml.model_understanding.prediction_explanations._user_interface._make_single_prediction_explanation_table"
)
def test_explain_predictions_best_worst_and_explain_predictions_regression(
mock_make_table,
mock_default_metrics,
problem_type,
output_format,
custom_index,
algorithm,
):
if output_format == "text":
mock_make_table.return_value = "table goes here"
answer = regression_best_worst_answer
explain_predictions_answer = no_best_worst_answer
elif output_format == "dataframe":
explanation_table = pd.DataFrame(
{
"feature_names": [0],
"feature_values": [0],
"qualitative_explanation": [0],
"quantitative_explanation": [0],
}
)
# Use side effect so that we always get a new copy of the dataframe
mock_make_table.side_effect = lambda *args, **kwargs: explanation_table.copy()
answer = regression_best_worst_answer_df
explain_predictions_answer = no_best_worst_answer_df
else:
mock_make_table.return_value = {
"explanations": ["explanation_dictionary_goes_here"]
}
answer = regression_best_worst_answer_dict
explain_predictions_answer = no_best_worst_answer_dict
input_features = pd.DataFrame({"a": [3, 4]}, index=custom_index)
input_features.ww.init()
pipeline = _prep_pipeline_mock(problem_type, input_features)
pipeline.predict.return_value = ww.init_series(pd.Series([2, 1]))
pipeline.predict_in_sample.return_value = ww.init_series(pd.Series([2, 1]))
pipeline.transform_all_but_final.return_value = input_features
abs_error_mock = MagicMock(__name__="abs_error")
abs_error_mock.return_value = pd.Series([4.0, 1.0], dtype="float64")
mock_default_metrics.__getitem__.return_value = abs_error_mock
y_true = pd.Series([3, 2], index=custom_index)
answer = _add_custom_index(
answer,
index_best=custom_index[1],
index_worst=custom_index[0],
output_format=output_format,
)
report = explain_predictions(
pipeline,
input_features,
y=y_true,
indices_to_explain=[0, 1],
output_format=output_format,
training_data=input_features,
training_target=y_true,
algorithm=algorithm,
)
_compare_reports(report, explain_predictions_answer, output_format)
mock_make_table.assert_called_with(
ANY,
ANY,
ANY,
index_to_explain=ANY,
top_k=ANY,
include_explainer_values=ANY,
output_format=output_format,
algorithm=algorithm,
)
best_worst_report = explain_predictions_best_worst(
pipeline,
input_features,
y_true=y_true,
num_to_explain=1,
output_format=output_format,
training_data=input_features,
training_target=y_true,
algorithm=algorithm,
)
_compare_reports(best_worst_report, answer, output_format)
mock_make_table.assert_called_with(
ANY,
ANY,
ANY,
index_to_explain=ANY,
top_k=ANY,
include_explainer_values=ANY,
output_format=output_format,
algorithm=algorithm,
)
binary_problem_types = [ProblemTypes.BINARY, ProblemTypes.TIME_SERIES_BINARY]
binary_custom_indices = [[0, 1], [7, 11], ["first", "second"]]
@pytest.mark.parametrize(
"problem_type,output_format,custom_index,algorithm",
product(binary_problem_types, output_formats, binary_custom_indices, algorithms),
)
@patch("evalml.model_understanding.prediction_explanations.explainers.DEFAULT_METRICS")
@patch(
"evalml.model_understanding.prediction_explanations._user_interface._make_single_prediction_explanation_table"
)
def test_explain_predictions_best_worst_and_explain_predictions_binary(
mock_make_table,
mock_default_metrics,
problem_type,
output_format,
custom_index,
algorithm,
):
if output_format == "text":
mock_make_table.return_value = "table goes here"
answer = binary_best_worst_answer
explain_predictions_answer = no_best_worst_answer
elif output_format == "dataframe":
explanation_table = pd.DataFrame(
{
"feature_names": [0],
"feature_values": [0],
"qualitative_explanation": [0],
"quantitative_explanation": [0],
}
)
# Use side effect so that we always get a new copy of the dataframe
mock_make_table.side_effect = lambda *args, **kwargs: explanation_table.copy()
answer = binary_best_worst_answer_df
explain_predictions_answer = no_best_worst_answer_df
else:
mock_make_table.return_value = {
"explanations": ["explanation_dictionary_goes_here"]
}
answer = binary_best_worst_answer_dict
explain_predictions_answer = no_best_worst_answer_dict
input_features = pd.DataFrame({"a": [3, 4]}, index=custom_index)
input_features.ww.init()
pipeline = _prep_pipeline_mock(problem_type, input_features)
pipeline.classes_.return_value = ["benign", "malignant"]
cross_entropy_mock = MagicMock(__name__="cross_entropy")
mock_default_metrics.__getitem__.return_value = cross_entropy_mock
cross_entropy_mock.return_value = pd.Series([0.2, 0.78])
proba = pd.DataFrame({"benign": [0.05, 0.1], "malignant": [0.95, 0.9]})
proba.ww.init()
pipeline.predict_proba.return_value = proba
pipeline.predict_proba_in_sample.return_value = proba
pipeline.predict.return_value = ww.init_series(pd.Series(["malignant"] * 2))
pipeline.predict_in_sample.return_value = ww.init_series(
pd.Series(["malignant"] * 2)
)
pipeline.transform_all_but_final.return_value = input_features
y_true = pd.Series(["malignant", "benign"], index=custom_index)
answer = _add_custom_index(
answer,
index_best=custom_index[0],
index_worst=custom_index[1],
output_format=output_format,
)
report = explain_predictions(
pipeline,
input_features,
y=y_true,
indices_to_explain=[0, 1],
output_format=output_format,
training_data=input_features,
training_target=y_true,
algorithm=algorithm,
)
_compare_reports(report, explain_predictions_answer, output_format)
mock_make_table.assert_called_with(
ANY,
ANY,
ANY,
index_to_explain=ANY,
top_k=ANY,
include_explainer_values=ANY,
output_format=output_format,
algorithm=algorithm,
)
best_worst_report = explain_predictions_best_worst(
pipeline,
input_features,
y_true=y_true,
num_to_explain=1,
output_format=output_format,
training_data=input_features,
training_target=y_true,
algorithm=algorithm,
)
_compare_reports(best_worst_report, answer, output_format)
mock_make_table.assert_called_with(
ANY,
ANY,
ANY,
index_to_explain=ANY,
top_k=ANY,
include_explainer_values=ANY,
output_format=output_format,
algorithm=algorithm,
)
multiclass_problem_types = [
ProblemTypes.MULTICLASS,
ProblemTypes.TIME_SERIES_MULTICLASS,
]
multiclass_custom_indices = [[0, 1], [17, 235], ["2020-15", "2020-15"]]
@pytest.mark.parametrize(
"problem_type,output_format,custom_index,algorithm",
product(
multiclass_problem_types, output_formats, multiclass_custom_indices, algorithms
),
)
@patch("evalml.model_understanding.prediction_explanations.explainers.DEFAULT_METRICS")
@patch(
"evalml.model_understanding.prediction_explanations._user_interface._make_single_prediction_explanation_table"
)
def test_explain_predictions_best_worst_and_explain_predictions_multiclass(
mock_make_table,
mock_default_metrics,
problem_type,
output_format,
custom_index,
algorithm,
):
if output_format == "text":
mock_make_table.return_value = "table goes here"
answer = multiclass_best_worst_answer
explain_predictions_answer = multiclass_no_best_worst_answer
elif output_format == "dataframe":
explanation_table = pd.DataFrame(
{
"feature_names": [0],
"feature_values": [0],
"qualitative_explanation": [0],
"quantitative_explanation": [0],
}
)
# Use side effect so that we always get a new copy of the dataframe
mock_make_table.side_effect = lambda *args, **kwargs: explanation_table.copy()
answer = multiclass_best_worst_answer_df
explain_predictions_answer = no_best_worst_answer_df
else:
mock_make_table.return_value = {
"explanations": ["explanation_dictionary_goes_here"]
}
answer = multiclass_best_worst_answer_dict
explain_predictions_answer = no_best_worst_answer_dict
input_features = pd.DataFrame({"a": [3, 4]}, index=custom_index)
input_features.ww.init()
pipeline = _prep_pipeline_mock(problem_type, input_features)
# Multiclass text output is formatted slightly different so need to account for that
if output_format == "text":
mock_make_table.return_value = multiclass_table
pipeline.classes_.return_value = ["setosa", "versicolor", "virginica"]
cross_entropy_mock = MagicMock(__name__="cross_entropy")
mock_default_metrics.__getitem__.return_value = cross_entropy_mock
cross_entropy_mock.return_value = pd.Series([0.15, 0.34])
proba = pd.DataFrame(
{"setosa": [0.8, 0.2], "versicolor": [0.1, 0.75], "virginica": [0.1, 0.05]}
)
proba.ww.init()
pipeline.predict_proba.return_value = proba
pipeline.predict_proba_in_sample.return_value = proba
pipeline.predict.return_value = ww.init_series(pd.Series(["setosa", "versicolor"]))
pipeline.predict_in_sample.return_value = ww.init_series(
pd.Series(["setosa", "versicolor"])
)
pipeline.transform_all_but_final.return_value = input_features
y_true = pd.Series(["setosa", "versicolor"], index=custom_index)
answer = _add_custom_index(
answer,
index_best=custom_index[0],
index_worst=custom_index[1],
output_format=output_format,
)
report = explain_predictions(
pipeline,
input_features,
y=y_true,
indices_to_explain=[0, 1],
output_format=output_format,
training_data=input_features,
training_target=y_true,
algorithm=algorithm,
)
_compare_reports(report, explain_predictions_answer, output_format)
mock_make_table.assert_called_with(
ANY,
ANY,
ANY,
index_to_explain=ANY,
top_k=ANY,
include_explainer_values=ANY,
output_format=output_format,
algorithm=algorithm,
)
best_worst_report = explain_predictions_best_worst(
pipeline,
input_features,
y_true=y_true,
num_to_explain=1,
output_format=output_format,
training_data=input_features,
training_target=y_true,
algorithm=algorithm,
)
_compare_reports(best_worst_report, answer, output_format)
mock_make_table.assert_called_with(
ANY,
ANY,
ANY,
index_to_explain=ANY,
top_k=ANY,
include_explainer_values=ANY,
output_format=output_format,
algorithm=algorithm,
)
regression_custom_metric_answer = """Test Pipeline Name
Parameters go here
Best 1 of 1
Predicted Value: 1
Target Value: 2
sum: 3
Index ID: 1
table goes here
Worst 1 of 1
Predicted Value: 2
Target Value: 3
sum: 5
Index ID: 0
table goes here
"""
regression_custom_metric_answer_dict = {
"explanations": [
{
"rank": {"prefix": "best", "index": 1},
"predicted_values": {
"probabilities": None,
"predicted_value": 1,
"target_value": 2,
"error_name": "sum",
"error_value": 3,
"index_id": 1,
},
"explanations": ["explanation_dictionary_goes_here"],
},
{
"rank": {"prefix": "worst", "index": 1},
"predicted_values": {
"probabilities": None,
"predicted_value": 2,
"target_value": 3,
"error_name": "sum",
"error_value": 5,
"index_id": 0,
},
"explanations": ["explanation_dictionary_goes_here"],
},
]
}
@pytest.mark.parametrize(
"output_format,answer",
[
("text", regression_custom_metric_answer),
("dict", regression_custom_metric_answer_dict),
],
)
@patch(
"evalml.model_understanding.prediction_explanations._user_interface._make_single_prediction_explanation_table"
)
def test_explain_predictions_best_worst_custom_metric(
mock_make_table, output_format, answer
):
mock_make_table.return_value = (
"table goes here"
if output_format == "text"
else {"explanations": ["explanation_dictionary_goes_here"]}
)
pipeline = MagicMock()
pipeline.parameters = "Parameters go here"
input_features = pd.DataFrame({"a": [5, 6]})
pipeline.problem_type = ProblemTypes.REGRESSION
pipeline.name = "Test Pipeline Name"
input_features.ww.init()
pipeline.transform_all_but_final.return_value = input_features
pipeline.predict.return_value = ww.init_series(pd.Series([2, 1]))
y_true = pd.Series([3, 2])
def sum(y_true, y_pred):
return y_pred + y_true
best_worst_report = explain_predictions_best_worst(
pipeline,
input_features,
y_true=y_true,
num_to_explain=1,
metric=sum,
output_format=output_format,
)
if output_format == "text":
compare_two_tables(
best_worst_report.splitlines(), regression_custom_metric_answer.splitlines()
)
else:
assert best_worst_report == answer
def test_explain_predictions_time_series(ts_data):
X, y = ts_data
ts_pipeline = TimeSeriesRegressionPipeline(
component_graph=[
"Time Series Featurizer",
"DateTime Featurization Component",
"Random Forest Regressor",
],
parameters={
"pipeline": {
"date_index": "date",
"gap": 0,
"max_delay": 2,
"forecast_horizon": 1,
},
"Time Series Featurizer": {
"date_index": "date",
"gap": 0,
"max_delay": 2,
"forecast_horizon": 1,
},
"Random Forest Regressor": {"n_jobs": 1},
},
)
X_train, y_train = X[:15], y[:15]
X_validation, y_validation = X[15:], y[15:]
ts_pipeline.fit(X_train, y_train)
exp = explain_predictions(
pipeline=ts_pipeline,
input_features=X_validation,
y=y_validation,
indices_to_explain=[5, 11],
output_format="dict",
training_data=X_train,
training_target=y_train,
)
# Check that the computed features to be explained aren't NaN.
for exp_idx in range(len(exp["explanations"])):
assert not np.isnan(
np.array(exp["explanations"][exp_idx]["explanations"][0]["feature_values"])
).any()
@pytest.mark.parametrize("output_format", ["text", "dict", "dataframe"])
@pytest.mark.parametrize(
"pipeline_class, estimator",
[
(TimeSeriesRegressionPipeline, "Random Forest Regressor"),
(TimeSeriesBinaryClassificationPipeline, "Logistic Regression Classifier"),
],
)
def test_explain_predictions_best_worst_time_series(
output_format, pipeline_class, estimator, ts_data, ts_data_binary
):
X, y = ts_data
if is_binary(pipeline_class.problem_type):
X, y = ts_data_binary
ts_pipeline = pipeline_class(
component_graph=[
"Time Series Featurizer",
"DateTime Featurization Component",
estimator,
],
parameters={
"pipeline": {
"date_index": "date",
"gap": 0,
"max_delay": 2,
"forecast_horizon": 1,
},
"Time Series Featurizer": {
"gap": 0,
"max_delay": 2,
"forecast_horizon": 1,
"date_index": "date",
},
},
)
X_train, y_train = X[:15], y[:15]
X_validation, y_validation = X[15:], y[15:]
ts_pipeline.fit(X_train, y_train)
exp = explain_predictions_best_worst(
pipeline=ts_pipeline,
input_features=X_validation,
y_true=y_validation,
output_format=output_format,
training_data=X_train,
training_target=y_train,
)
if output_format == "dict":
# Check that the computed features to be explained aren't NaN.
for exp_idx in range(len(exp["explanations"])):
assert not np.isnan(
np.array(
exp["explanations"][exp_idx]["explanations"][0]["feature_values"]
)
).any()
@pytest.mark.parametrize(
"problem_type",
[ProblemTypes.REGRESSION, ProblemTypes.BINARY, ProblemTypes.MULTICLASS],
)
def test_json_serialization(
problem_type,
X_y_regression,
linear_regression_pipeline_class,
X_y_binary,
logistic_regression_binary_pipeline_class,
X_y_multi,
logistic_regression_multiclass_pipeline_class,
):
if problem_type == problem_type.REGRESSION:
X, y = X_y_regression
y = pd.Series(y)
pipeline = linear_regression_pipeline_class(
parameters={"Linear Regressor": {"n_jobs": 1}}
)
elif problem_type == problem_type.BINARY:
X, y = X_y_binary
y = pd.Series(y).astype("str")
pipeline = logistic_regression_binary_pipeline_class(
parameters={"Logistic Regression Classifier": {"n_jobs": 1}}
)
else:
X, y = X_y_multi
y = pd.Series(y).astype("str")
pipeline = logistic_regression_multiclass_pipeline_class(
parameters={"Logistic Regression Classifier": {"n_jobs": 1}}
)
pipeline.fit(X, y)
best_worst = explain_predictions_best_worst(
pipeline, pd.DataFrame(X), y, num_to_explain=1, output_format="dict"
)
assert json.loads(json.dumps(best_worst)) == best_worst
report = explain_predictions(
pipeline, pd.DataFrame(X), y=y, output_format="dict", indices_to_explain=[0]
)
assert json.loads(json.dumps(report)) == report
def transform_y_for_problem_type(problem_type, y):
if problem_type == ProblemTypes.REGRESSION:
y = y.astype("int")
elif problem_type == ProblemTypes.MULTICLASS:
y = pd.Series(y).astype("str")
y[:20] = "2"
return y
EXPECTED_DATETIME_FEATURES = {
"datetime_hour",
"datetime_year",
"datetime_month",
"datetime_day_of_week",
}
EXPECTED_DATETIME_FEATURES_OHE = {
"datetime_hour",
"datetime_year",
"datetime_month_3",
"datetime_day_of_week_0",
"datetime_day_of_week_1",
"datetime_day_of_week_2",
"datetime_day_of_week_3",
"datetime_day_of_week_4",
"datetime_day_of_week_5",
"datetime_day_of_week_6",
"datetime_month_0",
"datetime_month_1",
"datetime_month_2",
"datetime_month_4",
"datetime_month_5",
"datetime_month_6",
"datetime_month_7",
}
EXPECTED_CURRENCY_FEATURES = {
"currency_XDR",
"currency_HTG",
"currency_PAB",
"currency_CNY",
"currency_TZS",
"currency_LAK",
"currency_NAD",
"currency_IMP",
"currency_QAR",
"currency_EGP",
}
EXPECTED_PROVIDER_FEATURES_OHE = {
"provider_JCB 16 digit",
"provider_Discover",
"provider_American Express",
"provider_JCB 15 digit",
"provider_Maestro",
"provider_VISA 19 digit",
"provider_VISA 13 digit",
"provider_Mastercard",
"provider_VISA 16 digit",
"provider_Diners Club / Carte Blanche",
}
EXPECTED_PROVIDER_FEATURES_TEXT = {
"DIVERSITY_SCORE(provider)",
"LSA(provider)[0]",
"LSA(provider)[1]",
"MEAN_CHARACTERS_PER_WORD(provider)",
"NUM_CHARACTERS(provider)",
"NUM_WORDS(provider)",
"POLARITY_SCORE(provider)",
}
pipeline_test_cases = [
(BinaryClassificationPipeline, "Random Forest Classifier"),
(RegressionPipeline, "Random Forest Regressor"),
(MulticlassClassificationPipeline, "Random Forest Classifier"),
]
@pytest.mark.parametrize(
"pipeline_class_and_estimator,algorithm",
product(pipeline_test_cases, algorithms),
)
def test_categories_aggregated_linear_pipeline(
pipeline_class_and_estimator, algorithm, fraud_100
):
if algorithm == "lime":
pytest.importorskip(
"lime.lime_tabular",
reason="Skipping lime value errors test because lime not installed",
)
X, y = fraud_100
pipeline_class, estimator = pipeline_class_and_estimator
pipeline = pipeline_class(
component_graph=[
"Select Columns Transformer",
"One Hot Encoder",
"DateTime Featurization Component",
estimator,
],
parameters={
"Select Columns Transformer": {
"columns": ["amount", "provider", "currency"]
},
estimator: {"n_jobs": 1},
},
)
y = transform_y_for_problem_type(pipeline.problem_type, y)
pipeline.fit(X, y)
report = explain_predictions(
pipeline,
X,
y,
indices_to_explain=[0],
output_format="dict",
algorithm=algorithm,
)
for explanation in report["explanations"][0]["explanations"]:
assert set(explanation["feature_names"]) == {"amount", "provider", "currency"}
assert set(explanation["feature_values"]) == {"CUC", "Mastercard", 24900}
assert explanation["drill_down"].keys() == {"currency", "provider"}
assert (
set(explanation["drill_down"]["currency"]["feature_names"])
== EXPECTED_CURRENCY_FEATURES
)
assert (
set(explanation["drill_down"]["provider"]["feature_names"])
== EXPECTED_PROVIDER_FEATURES_OHE
)
@pytest.mark.parametrize(
"pipeline_class_and_estimator,algorithm",
product(pipeline_test_cases, algorithms),
)
def test_categories_aggregated_text(pipeline_class_and_estimator, algorithm, fraud_100):
if algorithm == "lime":
pytest.importorskip(
"lime.lime_tabular",
reason="Skipping lime value errors test because lime not installed",
)
X, y = fraud_100
pipeline_class, estimator = pipeline_class_and_estimator
X.ww.set_types(
logical_types={
"provider": "NaturalLanguage",
}
)
component_graph = [
"Select Columns Transformer",
"One Hot Encoder",
"Natural Language Featurization Component",
"DateTime Featurization Component",
estimator,
]
pipeline = pipeline_class(
component_graph,
parameters={
"Select Columns Transformer": {
"columns": ["amount", "provider", "currency", "datetime"]
},
estimator: {"n_jobs": 1},
},
)
y = transform_y_for_problem_type(pipeline.problem_type, y)
pipeline.fit(X, y)
report = explain_predictions(
pipeline,
X,
y,
indices_to_explain=[0],
top_k_features=4,
output_format="dict",
algorithm=algorithm,
)
for explanation in report["explanations"][0]["explanations"]:
assert set(explanation["feature_names"]) == {
"amount",
"provider",
"currency",
"datetime",
}
assert set(explanation["feature_values"]) == {
"CUC",
"Mastercard",
24900,
pd.Timestamp("2019-01-01 00:12:26"),
}
assert explanation["drill_down"].keys() == {"currency", "provider", "datetime"}
assert (
set(explanation["drill_down"]["currency"]["feature_names"])
== EXPECTED_CURRENCY_FEATURES
)
assert (
set(explanation["drill_down"]["provider"]["feature_names"])
== EXPECTED_PROVIDER_FEATURES_TEXT
)
assert (
set(explanation["drill_down"]["datetime"]["feature_names"])
== EXPECTED_DATETIME_FEATURES
)
@pytest.mark.parametrize(
"pipeline_class_and_estimator,algorithm",
product(pipeline_test_cases, algorithms),
)
def test_categories_aggregated_date_ohe(
pipeline_class_and_estimator, algorithm, fraud_100
):
if algorithm == "lime":
pytest.importorskip(
"lime.lime_tabular",
reason="Skipping lime value errors test because lime not installed",
)
X, y = fraud_100
pipeline_class, estimator = pipeline_class_and_estimator
pipeline = pipeline_class(
component_graph=[
"Select Columns Transformer",
"DateTime Featurization Component",
"One Hot Encoder",
estimator,
],
parameters={
"Select Columns Transformer": {
"columns": ["datetime", "amount", "provider", "currency"]
},
"DateTime Featurization Component": {"encode_as_categories": True},
estimator: {"n_jobs": 1},
},
)
y = transform_y_for_problem_type(pipeline.problem_type, y)
pipeline.fit(X, y)
report = explain_predictions(
pipeline,
X,
y,
indices_to_explain=[0],
output_format="dict",
top_k_features=7,
algorithm=algorithm,
)
for explanation in report["explanations"][0]["explanations"]:
assert set(explanation["feature_names"]) == {
"amount",
"provider",
"currency",
"datetime",
}
assert set(explanation["feature_values"]) == {
pd.Timestamp("2019-01-01 00:12:26"),
"Mastercard",
"CUC",
24900,
}
assert explanation["drill_down"].keys() == {"currency", "provider", "datetime"}
assert (
set(explanation["drill_down"]["datetime"]["feature_names"])
== EXPECTED_DATETIME_FEATURES_OHE
)
assert (
set(explanation["drill_down"]["currency"]["feature_names"])
== EXPECTED_CURRENCY_FEATURES
)
assert (
set(explanation["drill_down"]["provider"]["feature_names"])
== EXPECTED_PROVIDER_FEATURES_OHE
)
@pytest.mark.parametrize(
"pipeline_class_and_estimator,algorithm",
product(pipeline_test_cases, algorithms),
)
def test_categories_aggregated_pca_dag(
pipeline_class_and_estimator, algorithm, fraud_100
):
if algorithm == "lime":
pytest.importorskip(
"lime.lime_tabular",
reason="Skipping lime value errors test because lime not installed",
)
X, y = fraud_100
pipeline_class, estimator = pipeline_class_and_estimator
component_graph = {
"SelectNumeric": ["Select Columns Transformer", "X", "y"],
"SelectCategorical": ["Select Columns Transformer", "X", "y"],
"SelectDate": ["Select Columns Transformer", "X", "y"],
"OHE": ["One Hot Encoder", "SelectCategorical.x", "y"],
"DT": ["DateTime Featurization Component", "SelectDate.x", "y"],
"PCA": ["PCA Transformer", "SelectNumeric.x", "y"],
"Estimator": [estimator, "PCA.x", "DT.x", "OHE.x", "y"],
}
parameters = {
"SelectNumeric": {"columns": ["card_id", "store_id", "amount", "lat", "lng"]},
"SelectCategorical": {"columns": ["currency", "provider"]},
"SelectDate": {"columns": ["datetime"]},
"PCA": {"n_components": 2},
"Estimator": {"n_jobs": 1},
}
pipeline = pipeline_class(component_graph=component_graph, parameters=parameters)
y = transform_y_for_problem_type(pipeline.problem_type, y)
pipeline.fit(X, y)
report = explain_predictions(
pipeline,
X,
y,
indices_to_explain=[0],
output_format="dict",
top_k_features=7,
algorithm=algorithm,
)
for explanation in report["explanations"][0]["explanations"]:
assert set(explanation["feature_names"]) == {
"component_0",
"component_1",
"provider",
"currency",
"datetime",
}
assert all(
[
f in explanation["feature_values"]
for f in [pd.Timestamp("2019-01-01 00:12:26"), "Mastercard", "CUC"]
]
)
assert explanation["drill_down"].keys() == {"currency", "provider", "datetime"}
assert (
set(explanation["drill_down"]["currency"]["feature_names"])
== EXPECTED_CURRENCY_FEATURES
)
assert (
set(explanation["drill_down"]["provider"]["feature_names"])
== EXPECTED_PROVIDER_FEATURES_OHE
)
assert (
set(explanation["drill_down"]["datetime"]["feature_names"])
== EXPECTED_DATETIME_FEATURES
)
@pytest.mark.parametrize(
"pipeline_class_and_estimator,algorithm",
product(pipeline_test_cases, algorithms),
)
def test_categories_aggregated_but_not_those_that_are_dropped(
pipeline_class_and_estimator, algorithm, fraud_100
):
if algorithm == "lime":
pytest.importorskip(
"lime.lime_tabular",
reason="Skipping lime value errors test because lime not installed",
)
X, y = fraud_100
pipeline_class, estimator = pipeline_class_and_estimator
component_graph = [
"Select Columns Transformer",
"One Hot Encoder",
"DateTime Featurization Component",
"Drop Columns Transformer",
estimator,
]
parameters = {
"Select Columns Transformer": {
"columns": ["amount", "provider", "currency", "datetime"]
},
"Drop Columns Transformer": {"columns": list(EXPECTED_DATETIME_FEATURES)},
estimator: {"n_jobs": 1},
}
pipeline = pipeline_class(component_graph=component_graph, parameters=parameters)
y = transform_y_for_problem_type(pipeline.problem_type, y)
pipeline.fit(X, y)
report = explain_predictions(
pipeline,
X,
y,
indices_to_explain=[0],
output_format="dict",
algorithm=algorithm,
)
for explanation in report["explanations"][0]["explanations"]:
assert set(explanation["feature_names"]) == {"amount", "provider", "currency"}
assert set(explanation["feature_values"]) == {"CUC", "Mastercard", 24900}
assert explanation["drill_down"].keys() == {"currency", "provider"}
assert (
set(explanation["drill_down"]["currency"]["feature_names"])
== EXPECTED_CURRENCY_FEATURES
)
assert (
set(explanation["drill_down"]["provider"]["feature_names"])
== EXPECTED_PROVIDER_FEATURES_OHE
)
@pytest.mark.parametrize(
"pipeline_class_and_estimator,algorithm",
product(pipeline_test_cases, algorithms),
)
def test_categories_aggregated_when_some_are_dropped(
pipeline_class_and_estimator, algorithm, fraud_100
):
if algorithm == "lime":
pytest.importorskip(
"lime.lime_tabular",
reason="Skipping lime value errors test because lime not installed",
)
X, y = fraud_100
pipeline_class, estimator = pipeline_class_and_estimator
component_graph = [
"Select Columns Transformer",
"One Hot Encoder",
"DateTime Featurization Component",
"Drop Columns Transformer",
estimator,
]
parameters = {
"Select Columns Transformer": {
"columns": ["amount", "provider", "currency", "datetime"]
},
"Drop Columns Transformer": {"columns": ["datetime_month", "datetime_hour"]},
estimator: {"n_jobs": 1},
}
pipeline = pipeline_class(component_graph=component_graph, parameters=parameters)
y = transform_y_for_problem_type(pipeline.problem_type, y)
pipeline.fit(X, y)
report = explain_predictions(
pipeline,
X,
y,
indices_to_explain=[0],
output_format="dict",
top_k_features=4,
algorithm=algorithm,
)
for explanation in report["explanations"][0]["explanations"]:
assert set(explanation["feature_names"]) == {
"amount",
"provider",
"currency",
"datetime",
}
assert set(explanation["feature_values"]) == {
"CUC",
"Mastercard",
24900,
pd.Timestamp("2019-01-01 00:12:26"),
}
assert explanation["drill_down"].keys() == {"currency", "provider", "datetime"}
assert (
set(explanation["drill_down"]["currency"]["feature_names"])
== EXPECTED_CURRENCY_FEATURES
)
assert (
set(explanation["drill_down"]["provider"]["feature_names"])
== EXPECTED_PROVIDER_FEATURES_OHE
)
assert set(explanation["drill_down"]["datetime"]["feature_names"]) == {
"datetime_year",
"datetime_day_of_week",
}
@pytest.mark.parametrize(
"algorithm",
["shap", "lime"],
)
@pytest.mark.parametrize(
"problem_type",
[ProblemTypes.BINARY, ProblemTypes.MULTICLASS, ProblemTypes.REGRESSION],
)
def test_explain_predictions_stacked_ensemble(
algorithm,
problem_type,
fraud_100,
X_y_multi,
X_y_regression,
):
if algorithm == "lime":
pytest.importorskip(
"lime.lime_tabular",
reason="Skipping lime value errors test because lime not installed",
)
if is_binary(problem_type):
X, y = fraud_100
pipeline = BinaryClassificationPipeline(
{
"DT": ["DateTime Featurization Component", "X", "y"],
"Imputer": ["Imputer", "DT.x", "y"],
"One Hot Encoder": ["One Hot Encoder", "Imputer.x", "y"],
"Drop Columns Transformer": [
"Drop Columns Transformer",
"One Hot Encoder.x",
"y",
],
"Regression": [
"Logistic Regression Classifier",
"Drop Columns Transformer.x",
"y",
],
"RF": ["Random Forest Classifier", "One Hot Encoder.x", "y"],
"Stacked Ensembler": [
"Stacked Ensemble Classifier",
"Regression.x",
"RF.x",
"y",
],
}
)
exp_feature_names = {"Col 1 RF.x", "Col 1 Regression.x"}
elif is_multiclass(problem_type):
X, y = X_y_multi
pipeline = MulticlassClassificationPipeline(
{
"Imputer": ["Imputer", "X", "y"],
"Regression": ["Logistic Regression Classifier", "Imputer.x", "y"],
"RF": ["Random Forest Classifier", "X", "y"],
"Stacked Ensembler": [
"Stacked Ensemble Classifier",
"Regression.x",
"RF.x",
"y",
],
}
)
exp_feature_names = {
"Col 0 RF.x",
"Col 1 RF.x",
"Col 2 RF.x",
"Col 0 Regression.x",
"Col 1 Regression.x",
"Col 2 Regression.x",
}
else:
X, y = X_y_regression
pipeline = RegressionPipeline(
{
"Imputer": ["Imputer", "X", "y"],
"Regression": [
"Linear Regressor",
"Imputer.x",
"y",
],
"RF": ["Random Forest Regressor", "X", "y"],
"Stacked Ensembler": [
"Stacked Ensemble Regressor",
"Regression.x",
"RF.x",
"y",
],
}
)
exp_feature_names = {"RF.x", "Regression.x"}
pipeline.fit(X, y)
report = explain_predictions(
pipeline,
X,
y,
indices_to_explain=[0],
output_format="dict",
top_k_features=10,
algorithm=algorithm,
)
explanations_data = report["explanations"][0]["explanations"][0]
assert set(explanations_data["feature_names"]) == exp_feature_names
assert (
explanations_data["quantitative_explanation"]
== [None, None, None, None, None, None]
if problem_type == ProblemTypes.MULTICLASS
else [None, None]
)
report = explain_predictions_best_worst(
pipeline, X, y, top_k_features=10, output_format="dict", algorithm=algorithm
)
explanations_data = report["explanations"]
for entry in explanations_data:
assert set(entry["explanations"][0]["feature_names"]) == exp_feature_names
@pytest.mark.parametrize(
"estimator,algorithm",
product(
[
e
for e in _all_estimators()
if (
"Classifier" in e.name
and not any(
s in e.name
for s in ["Baseline", "Cat", "Elastic", "KN", "Ensemble", "Vowpal"]
)
)
],
algorithms,
),
)
def test_explain_predictions_oversampler(estimator, algorithm, fraud_100):
pytest.importorskip(
"imblearn.over_sampling",
reason="Skipping test because imbalanced-learn not installed",
)
if algorithm == "lime":
pytest.importorskip(
"lime.lime_tabular",
reason="Skipping lime value errors test because lime not installed",
)
X, y = fraud_100
pipeline = BinaryClassificationPipeline(
component_graph={
"Imputer": ["Imputer", "X", "y"],
"One Hot Encoder": ["One Hot Encoder", "Imputer.x", "y"],
"DateTime Featurization Component": [
"DateTime Featurization Component",
"One Hot Encoder.x",
"y",
],
"Oversampler": [
"Oversampler",
"DateTime Featurization Component.x",
"y",
],
estimator: [estimator, "Oversampler.x", "Oversampler.y"],
}
)
pipeline.fit(X, y)
report = explain_predictions(
pipeline,
X,
y,
indices_to_explain=[0],
output_format="dataframe",
top_k_features=4,
algorithm=algorithm,
)
assert report["feature_names"].isnull().sum() == 0
assert report["feature_values"].isnull().sum() == 0
@patch(
"evalml.model_understanding.prediction_explanations._user_interface._make_single_prediction_explanation_table"
)
def test_explain_predictions_best_worst_callback(mock_make_table):
pipeline = MagicMock()
pipeline.parameters = "Mock parameters"
input_features = pd.DataFrame({"a": [5, 6]})
pipeline.problem_type = ProblemTypes.REGRESSION
pipeline.name = "Test Pipeline Name"
input_features.ww.init()
pipeline.transform_all_but_final.return_value = input_features
pipeline.predict.return_value = ww.init_series(pd.Series([2, 1]))
y_true = pd.Series([3, 2])
class MockCallback:
def __init__(self):
self.progress_stages = []
self.total_elapsed_time = 0
def __call__(self, progress_stage, time_elapsed):
self.progress_stages.append(progress_stage)
self.total_elapsed_time = time_elapsed
mock_callback = MockCallback()
explain_predictions_best_worst(
pipeline, input_features, y_true, num_to_explain=1, callback=mock_callback
)
assert mock_callback.progress_stages == [e for e in ExplainPredictionsStage]
assert mock_callback.total_elapsed_time > 0
@pytest.mark.parametrize("indices", [0, 1])
def test_explain_predictions_unknown(indices, X_y_binary):
X, y = X_y_binary
X = | pd.DataFrame(X) | pandas.DataFrame |
import pandas as pd
import requests
import numpy as np
import json
import csv
import time
import datetime
import urllib3
import sys
import os
import warnings
import pandas as pd
import os
import numpy as np
from sqlalchemy import create_engine
import psycopg2
import warnings
from datetime import datetime
from pandas.core.common import SettingWithCopyWarning
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
engine = create_engine('postgres://gajpivqijkldsv:e71d7868249438e0b78e6cc37825dad10f322ef598118335a624165f9311720f@ec2-54-211-210-149.compute-1.amazonaws.com:5432/dc5355dnsr456p')
conn = psycopg2.connect(dbname='dc5355dnsr456p', user='gajpivqijkldsv', password='<PASSWORD>',
host='ec2-54-211-210-149.compute-1.amazonaws.com', port='5432', sslmode='require')
cursor = conn.cursor()
most_recent = pd.read_csv("most_recent_tweets.csv")
## need to get all twitter handles
## might be easier to look for the twitter handles based on
import tweepy
import csv
import pandas as pd
import numpy as np
import time
import datetime
ACCESS_TOKEN = "<KEY>"
ACCESS_TOKEN_SECRET = "<KEY>"
CONSUMER_KEY = "<KEY>"
CONSUMER_SECRET = "<KEY>"
# OAuth process, using the keys and tokens
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
# Creation of the actual interface, using authentication
api = tweepy.API(auth, wait_on_rate_limit=True)
handle = most_recent['handle'].to_list()
since_id = most_recent['status_id'].to_list()
user_name_list = []
fulltest_list = []
status_id_list = []
created_list = []
source_list = []
favorite_count = []
retweet_count = []
coordinates_list = []
geo_list = []
handle_list = []
follower_count_list = []
city_list = []
city_abr_list = []
league_list = []
division_list = []
for i in range(0,len(handle)):
followercount = api.get_user('{}'.format(handle[i])).followers_count
for status in tweepy.Cursor(api.user_timeline, screen_name= handle[i], tweet_mode="extended",since_id=since_id[i]).items():
# for status in tweepy.Cursor(api.user_timeline, screen_name= mlbHandles[i], tweet_mode="extended").items():
user_name_list.append(status.user.name)
fulltest_list.append(status.full_text)
status_id_list.append(np.array(status.id))
created_list.append(status.created_at)
source_list.append(status.source)
favorite_count.append(status.favorite_count)
retweet_count.append(status.retweet_count)
coordinates_list.append(status.coordinates)
geo_list.append(status.geo)
handle_list.append(handle[i])
# city_list.append(city_mlb[i])
# city_abr_list.append(city_abr_mlb[i])
# league_list.append(league_mlb[i])
# division_list.append(division_mlb[i])
follower_count_list.append(followercount)
time.sleep(5)
newest_tweets = pd.DataFrame({
'username': user_name_list,
'full_text': fulltest_list,
'status_id': status_id_list,
'create_at': created_list,
'source': source_list,
'favorite_count': favorite_count,
'retweet_count': retweet_count,
'coordinates': coordinates_list,
'geo': geo_list,
'handle': handle_list,
'followercount': follower_count_list,
# 'city': city_list,
# 'city_abr':city_abr_list,
# 'league':league_list,
# 'disivion':division_list
})
print('finished pulling new tweets')
# save off latest tweet id
most_recent = newest_tweets[['handle', 'create_at', 'status_id']]
most_recent = most_recent[most_recent['create_at'] == most_recent.groupby('handle')['create_at'].transform('max')]
most_recent = most_recent.reset_index(drop=True)
most_recent.to_csv("most_recent_tweets.csv")
# code to create top ten tweets
now = pd.to_datetime('now')
from datetime import datetime, timedelta
d = now - timedelta(days=1)
#get ratio and volume
top_5_volume = newest_tweets[newest_tweets['create_at'] > d]
top_5_volume = top_5_volume.sort_values(by=['favorite_count'], ascending=False)
top_5_volume['favorite_ratio'] = top_5_volume['favorite_count']/top_5_volume['followercount']
top_5_ratio = top_5_volume.sort_values(by=['favorite_ratio'], ascending=False)
top_5_ratio = top_5_ratio.drop_duplicates(subset=['username'])
top_5_volume = top_5_volume.drop_duplicates(subset=['username'])
top_5_volume = top_5_volume.head(5)
top_5_list = top_5_volume['status_id'].to_list()
# make sure ratio tweets don't have tweets from top 5
top_5_ratio = top_5_ratio[~top_5_ratio['status_id'].isin(top_5_list)]
top_5_ratio = top_5_ratio.drop_duplicates(subset = ['username'])
top_5_ratio = top_5_ratio.head(5)
# create one dataframe
tweets_top_ten = | pd.concat([top_5_volume, top_5_ratio]) | pandas.concat |
import pandas as pd
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
class OutlierRemover(TransformerMixin, BaseEstimator):
def __init__(self,
dependent_col=None,
estimator= None,
regression=False
):
self.estimator = estimator
self.dependent = dependent_col
self.reg = regression
if estimator is None:
raise ValueError(
""" Input an estimator """
""" The estimator can be KNeigbourRegressor or RandomForestRegressor"""
)
def fit(self, X, Y):
if self.reg is False:
chk_fea = [
feature for feature in X.columns
if X[feature].dtype != object and X[feature].nunique() > 7
]
for feature in X[chk_fea].columns:
Q1 = X[feature].quantile(0.25) ###Lowe quantile
Q3 = X[feature].quantile(0.75) ###Upper quantile
IQR = Q3 - Q1 ###Interquantile range
lower_boundary = Q1 - (1.5 * IQR)
upper_boundary = Q3 + (1.5 * IQR)
###Replacing the outlier with a nan value
X.loc[X[feature] > upper_boundary, feature] = np.nan
X.loc[X[feature] < lower_boundary, feature] = np.nan
return self
if self.reg is True:
new_data = | pd.concat([X,Y],axis="columns") | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# # Extract Covid-19 data from website grainmart.in using BeautifulSoup
# In[1]:
# importing the libraries
from bs4 import BeautifulSoup
import requests
import csv
import pandas as pd
# In[2]:
covid_source_filename = "/home/sanjay/campaign/dev_codes/jupyter_lab/covid_19_hackathon/dataset/covid_data_unformatted.csv"
covid_target_filename = "/home/sanjay/campaign/dev_codes/jupyter_lab/covid_19_hackathon/dataset/covid_data.csv"
gsdp_source_filename = "/home/sanjay/campaign/dev_codes/jupyter_lab/covid_19_hackathon/dataset/gsdp_state_wise_unformatted.csv"
gsdp_target_filename = "/home/sanjay/campaign/dev_codes/jupyter_lab/covid_19_hackathon/dataset/gsdp_state_wise.csv"
unemployment_source_filename = "/home/sanjay/campaign/dev_codes/jupyter_lab/covid_19_hackathon/dataset/unemployment_unformatted.csv"
unemployment_target_filename = '/home/sanjay/campaign/dev_codes/jupyter_lab/covid_19_hackathon/dataset/unemployment.csv'
population_source_filename = "/home/sanjay/campaign/dev_codes/jupyter_lab/covid_19_hackathon/dataset/population_unformatted.csv"
population_target_filename = "/home/sanjay/campaign/dev_codes/jupyter_lab/covid_19_hackathon/dataset/population.csv"
covid_pop_uemp_gsdp_output_file_name = "/home/sanjay/campaign/dev_codes/jupyter_lab/covid_19_hackathon/dataset/final_data_test_1.csv"
# In[3]:
url="https://www.grainmart.in/news/covid-19-coronavirus-india-state-and-district-wise-tally"
# Make a GET request to fetch the raw HTML content
html_content = requests.get(url).text
# Parse the html content
#soup = BeautifulSoup(html_content, "lxml")
soup = BeautifulSoup(html_content,"html.parser")
# print(soup.prettify()) # print the parsed data of html
# In[4]:
section_data = soup.find("section", {"id": "covid-19-table"})
main_list = []
# In[5]:
states_rows = section_data.find_all("div", {"class": "skgm-states"})
# In[6]:
def clean_up_soup(pre_content):
pre_content.select("div.td-ddd.td-ddr.td-ddc")
for tags in pre_content.findAll(True):
tags.attrs = {}
return pre_content
# In[7]:
final_state_list = []
fianle_district_list = []
headers_list = [
'state_name',
'cases',
'cured',
'active',
'deaths',
'district_name',
'district_cases',
'district_cured',
'district_active',
'district_deaths'
]
for dat in states_rows:
state_list = []
state_name = dat.find_all("span", {"class": "show-district"})[0].contents[-1]
cases = int(dat.find_all("div", {"class": "td-sc"})[0].contents[0])
cured = int(dat.find_all("div", {"class": "td-sr"})[0].contents[0])
active = int(dat.find_all("div", {"class": "td-sa"})[0].contents[0])
deaths = int(dat.find_all("div", {"class": "td-sd"})[0].contents[0])
state_list.append(state_name)
state_list.append(cases)
state_list.append(cured)
state_list.append(active)
state_list.append(deaths)
final_state_list.append(state_list)
inner_main = dat.find_all("div", {"class": "skgm-tr"})
index = 0
for inner_list in inner_main:
if index == 0:
index = index + 1
continue
inner_list = clean_up_soup(inner_list)
childs = list(inner_list.children)
district_names = childs[0].contents[0]
district_cases = childs[1].contents[0].contents[0]
district_cured = childs[2].contents[0].contents[0]
district_active = childs[3].contents[0].contents[0]
district_deaths = childs[4].contents[0].contents[0]
each_district_list = [
state_name,
cases,
cured,
active,
deaths,
district_names,
district_cases,
district_cured,
district_active,
district_deaths
]
fianle_district_list.append(each_district_list)
# In[8]:
# writing to csv file
with open(covid_source_filename, 'w') as csvfile:
# creating a csv writer object
csvwriter = csv.writer(csvfile)
# writing the fields
csvwriter.writerow(headers_list)
# writing the data rows
csvwriter.writerows(fianle_district_list)
# # Transform GSDP dataset as per requirements using Pandas
# In[33]:
# reading csv file
gsdp_df = pd.read_csv(gsdp_source_filename)
gsdp_df.rename(
columns={
'year':'gsdp_year'
},
inplace=True
)
# In[34]:
gsdp_dataset = gsdp_df.melt(id_vars=["gsdp_year"], var_name="gsdp_region", value_name="gsdp_in_crores")
gsdp_dataset['gsdp_year'] = pd.to_datetime(gsdp_dataset['gsdp_year'].astype(str), format='%Y')
gsdp_dataset['gsdp_year'] = pd.to_datetime(gsdp_dataset['gsdp_year']).dt.to_period('Y')
# In[35]:
gsdp_dataset= gsdp_dataset.applymap(lambda s:s.lower() if type(s) == str else s)
# In[36]:
gsdp_dataset.to_csv(gsdp_target_filename, sep=',', encoding='utf-8', index = False)
# # Unemployment dataset
# ### source - unemploymentinindia.cmie.com
# In[37]:
# reading csv file
unemployment_df = pd.read_csv(unemployment_source_filename)
unemployment_df= unemployment_df.applymap(lambda s:s.lower() if type(s) == str else s)
# In[38]:
unemployment_df.rename(
columns={
'Estimated Unemployment Rate (%)':'unemployment_rate',
'Region':'unemployment_region',
'Date': 'date',
'Estimated Employed': 'employed',
'Estimated Labour Participation Rate (%)': 'lbour_participation_rate'
},
inplace=True
)
# In[39]:
unemployment_df['unemployment_year'] = pd.to_datetime(unemployment_df['date']).dt.to_period('Y')
unemployment_grouped_df = unemployment_df.groupby(['unemployment_region', 'unemployment_year']).agg(
{'unemployment_rate': ['mean', 'min', 'max'],
'employed': ['mean', 'min', 'max'],
'lbour_participation_rate': ['mean', 'min', 'max']})
# In[40]:
unemployment_grouped_df.columns = ["_".join(x) for x in unemployment_grouped_df.columns.ravel()]
# In[41]:
unemployment_grouped_df['unemployment_rate_mean'] = unemployment_grouped_df['unemployment_rate_mean'].round(2)
unemployment_grouped_df['employed_mean'] = unemployment_grouped_df['employed_mean'].astype(int)
unemployment_grouped_df['lbour_participation_rate_mean'] = unemployment_grouped_df['lbour_participation_rate_mean'].round(2)
# In[42]:
unemployment_grouped_df.reset_index(inplace=True)
unemployment_grouped_df.to_csv(unemployment_target_filename, sep=',', encoding='utf-8', index = False)
# # Population Dataset
# ### Source - PDF - https://nhm.gov.in/New_Updates_2018/Report_Population_Projection_2019.pdf
# In[85]:
# reading csv file
population_df = pd.read_csv(population_source_filename)
population_df.rename(
columns={
'Year':'population_year'
},
inplace=True
)
# In[86]:
population_dataset = population_df.melt(
id_vars=["population_year"], var_name="population_region", value_name="population"
)
population_dataset['population_year'] = pd.to_datetime(population_dataset['population_year'].astype(str), format='%Y')
population_dataset['population_year'] = pd.to_datetime(population_dataset['population_year']).dt.to_period('Y')
population_dataset= population_dataset.applymap(lambda s:s.lower() if type(s) == str else s)
# In[87]:
def modify_data(row, return_type='state_province_country'):
if return_type == 'population_type':
value = row['population_region']
if '.1' in value:
return 'male'
elif '.2' in value:
return 'female'
else:
return 'total'
if return_type == 'state_province_country':
value = row['population_region']
if '.1' in value:
return value.replace('.1', '')
elif '.2' in value:
return value.replace('.2', '')
else:
return value
# In[88]:
population_dataset['population_type'] = population_dataset.apply(
lambda row: modify_data(row, return_type="population_type"), axis=1
)
# In[89]:
population_dataset['population_region'] = population_dataset.apply(
lambda row: modify_data(row, return_type="state_province_country"), axis=1
)
# In[90]:
population_dataset.to_csv(population_target_filename, sep=',', encoding='utf-8', index = False)
# # Covid data
# In[25]:
covid_df = pd.read_csv(covid_source_filename)
covid_df= covid_df.applymap(lambda s:s.lower() if type(s) == str else s)
# In[26]:
def calculate_new_rows(row, _type="IFR", region='state'):
if _type == "IFR":
if region == 'state':
try:
return round(row['deaths']/row['active'] * 100, 2)
except ZeroDivisionError:
return 0
else:
try:
return round(row['district_deaths']/row['district_active'] * 100, 2)
except ZeroDivisionError:
return 0
elif _type == "CFR":
if region == 'state':
try:
return round(row['deaths']/row['cases'] * 100, 2)
except ZeroDivisionError:
return 0
else:
try:
return round(row['district_deaths']/row['district_cases'] * 100, 2)
except ZeroDivisionError:
return 0
elif _type == "CFR_CURRENT":
if region == 'state':
try:
return round(row['deaths']/(row['deaths'] + row['cured']) * 100, 2)
except ZeroDivisionError:
return 0
else:
try:
return round(row['district_deaths']/(row['district_deaths'] + row['district_cured']) * 100, 2)
except ZeroDivisionError:
return 0
# # IFR per state and district
# ### Infection fatality ratio (IFR, in %) = Number of deaths from disease/Number of infected individuals x 100
#
# In[27]:
covid_df['state_IFR'] = covid_df.apply(
lambda row: calculate_new_rows(row, _type="IFR"), axis=1
)
covid_df['district_IFR'] = covid_df.apply(
lambda row: calculate_new_rows(row, _type="IFR", region='district'), axis=1
)
# # CFR per state and district
# ## Case fatality ratio (IFR, in %) = Number of deaths from disease/Number of confirmed cases x 100
# In[28]:
covid_df['state_CFR'] = covid_df.apply(
lambda row: calculate_new_rows(row, _type="CFR"), axis=1
)
covid_df['district_CFR'] = covid_df.apply(
lambda row: calculate_new_rows(row, _type="CFR", region='district'), axis=1
)
# ## CFR per state and district during an ongoing epidemic
# ## Infection fatality ratio (IFR, in %) = Number of deaths from disease/(Number of deaths from disease + Number of recovered cases) x 100
# In[29]:
covid_df['state_CFR_CURRENT'] = covid_df.apply(
lambda row: calculate_new_rows(row, _type="CFR_CURRENT"), axis=1
)
covid_df['district_CFR_CURRENT'] = covid_df.apply(
lambda row: calculate_new_rows(row, _type="CFR_CURRENT", region='district'), axis=1
)
# In[30]:
covid_df.to_csv(covid_target_filename, sep=',', encoding='utf-8', index = False)
# # Transformation based on all source Dataset
# In[124]:
covid_df = | pd.read_csv(covid_target_filename) | pandas.read_csv |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.