prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
try:
import json
except ImportError:
import simplejson as json
import math
import pytz
import locale
import pytest
import time
import datetime
import calendar
import re
import decimal
import dateutil
from functools import partial
from pandas.compat import range, StringIO, u
from pandas._libs.tslib import Timestamp
import pandas._libs.json as ujson
import pandas.compat as compat
import numpy as np
from pandas import DataFrame, Series, Index, NaT, DatetimeIndex, date_range
import pandas.util.testing as tm
json_unicode = (json.dumps if compat.PY3
else partial(json.dumps, encoding="utf-8"))
def _clean_dict(d):
"""
Sanitize dictionary for JSON by converting all keys to strings.
Parameters
----------
d : dict
The dictionary to convert.
Returns
-------
cleaned_dict : dict
"""
return {str(k): v for k, v in compat.iteritems(d)}
@pytest.fixture(params=[
None, # Column indexed by default.
"split",
"records",
"values",
"index"])
def orient(request):
return request.param
@pytest.fixture(params=[None, True])
def numpy(request):
return request.param
class TestUltraJSONTests(object):
@pytest.mark.skipif(compat.is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
def test_encode_decimal(self):
sut = decimal.Decimal("1337.1337")
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert decoded == 1337.1337
sut = decimal.Decimal("0.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.94")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "0.9"
decoded = ujson.decode(encoded)
assert decoded == 0.9
sut = decimal.Decimal("1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "2.0"
decoded = ujson.decode(encoded)
assert decoded == 2.0
sut = decimal.Decimal("-1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "-2.0"
decoded = ujson.decode(encoded)
assert decoded == -2.0
sut = decimal.Decimal("0.995")
encoded = ujson.encode(sut, double_precision=2)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.9995")
encoded = ujson.encode(sut, double_precision=3)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.99999999999999944")
encoded = ujson.encode(sut, double_precision=15)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
@pytest.mark.parametrize("ensure_ascii", [True, False])
def test_encode_string_conversion(self, ensure_ascii):
string_input = "A string \\ / \b \f \n \r \t </script> &"
not_html_encoded = ('"A string \\\\ \\/ \\b \\f \\n '
'\\r \\t <\\/script> &"')
html_encoded = ('"A string \\\\ \\/ \\b \\f \\n \\r \\t '
'\\u003c\\/script\\u003e \\u0026"')
def helper(expected_output, **encode_kwargs):
output = ujson.encode(string_input,
ensure_ascii=ensure_ascii,
**encode_kwargs)
assert output == expected_output
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
# Default behavior assumes encode_html_chars=False.
helper(not_html_encoded)
# Make sure explicit encode_html_chars=False works.
helper(not_html_encoded, encode_html_chars=False)
# Make sure explicit encode_html_chars=True does the encoding.
helper(html_encoded, encode_html_chars=True)
@pytest.mark.parametrize("long_number", [
-4342969734183514, -12345678901234.56789012, -528656961.4399388
])
def test_double_long_numbers(self, long_number):
sut = {u("a"): long_number}
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert sut == decoded
def test_encode_non_c_locale(self):
lc_category = locale.LC_NUMERIC
# We just need one of these locales to work.
for new_locale in ("it_IT.UTF-8", "Italian_Italy"):
if tm.can_set_locale(new_locale, lc_category):
with tm.set_locale(new_locale, lc_category):
assert ujson.loads(ujson.dumps(4.78e60)) == 4.78e60
assert ujson.loads("4.78", precise_float=True) == 4.78
break
def test_decimal_decode_test_precise(self):
sut = {u("a"): 4.56}
encoded = ujson.encode(sut)
decoded = ujson.decode(encoded, precise_float=True)
assert sut == decoded
@pytest.mark.skipif(compat.is_platform_windows() and not compat.PY3,
reason="buggy on win-64 for py2")
def test_encode_double_tiny_exponential(self):
num = 1e-40
assert num == ujson.decode(ujson.encode(num))
num = 1e-100
assert num == ujson.decode(ujson.encode(num))
num = -1e-45
assert num == ujson.decode(ujson.encode(num))
num = -1e-145
assert np.allclose(num, ujson.decode(ujson.encode(num)))
@pytest.mark.parametrize("unicode_key", [
u("key1"), u("بن")
])
def test_encode_dict_with_unicode_keys(self, unicode_key):
unicode_dict = {unicode_key: u("value1")}
assert unicode_dict == ujson.decode(ujson.encode(unicode_dict))
@pytest.mark.parametrize("double_input", [
math.pi,
-math.pi # Should work with negatives too.
])
def test_encode_double_conversion(self, double_input):
output = ujson.encode(double_input)
assert round(double_input, 5) == round(json.loads(output), 5)
assert round(double_input, 5) == round(ujson.decode(output), 5)
def test_encode_with_decimal(self):
decimal_input = 1.0
output = ujson.encode(decimal_input)
assert output == "1.0"
def test_encode_array_of_nested_arrays(self):
nested_input = [[[[]]]] * 20
output = ujson.encode(nested_input)
assert nested_input == json.loads(output)
assert nested_input == ujson.decode(output)
nested_input = np.array(nested_input)
tm.assert_numpy_array_equal(nested_input, ujson.decode(
output, numpy=True, dtype=nested_input.dtype))
def test_encode_array_of_doubles(self):
doubles_input = [31337.31337, 31337.31337,
31337.31337, 31337.31337] * 10
output = ujson.encode(doubles_input)
assert doubles_input == json.loads(output)
assert doubles_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(doubles_input),
ujson.decode(output, numpy=True))
def test_double_precision(self):
double_input = 30.012345678901234
output = ujson.encode(double_input, double_precision=15)
assert double_input == json.loads(output)
assert double_input == ujson.decode(output)
for double_precision in (3, 9):
output = ujson.encode(double_input,
double_precision=double_precision)
rounded_input = round(double_input, double_precision)
assert rounded_input == json.loads(output)
assert rounded_input == ujson.decode(output)
@pytest.mark.parametrize("invalid_val", [
20, -1, "9", None
])
def test_invalid_double_precision(self, invalid_val):
double_input = 30.12345678901234567890
expected_exception = (ValueError if isinstance(invalid_val, int)
else TypeError)
with pytest.raises(expected_exception):
ujson.encode(double_input, double_precision=invalid_val)
def test_encode_string_conversion2(self):
string_input = "A string \\ / \b \f \n \r \t"
output = ujson.encode(string_input)
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
assert output == '"A string \\\\ \\/ \\b \\f \\n \\r \\t"'
@pytest.mark.parametrize("unicode_input", [
"Räksmörgås اسامة بن محمد بن عوض بن لادن",
"\xe6\x97\xa5\xd1\x88"
])
def test_encode_unicode_conversion(self, unicode_input):
enc = ujson.encode(unicode_input)
dec = ujson.decode(enc)
assert enc == json_unicode(unicode_input)
assert dec == json.loads(enc)
def test_encode_control_escaping(self):
escaped_input = "\x19"
enc = ujson.encode(escaped_input)
dec = ujson.decode(enc)
assert escaped_input == dec
assert enc == json_unicode(escaped_input)
def test_encode_unicode_surrogate_pair(self):
surrogate_input = "\xf0\x90\x8d\x86"
enc = ujson.encode(surrogate_input)
dec = ujson.decode(enc)
assert enc == json_unicode(surrogate_input)
assert dec == json.loads(enc)
def test_encode_unicode_4bytes_utf8(self):
four_bytes_input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
enc = ujson.encode(four_bytes_input)
dec = ujson.decode(enc)
assert enc == json_unicode(four_bytes_input)
assert dec == json.loads(enc)
def test_encode_unicode_4bytes_utf8highest(self):
four_bytes_input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
enc = ujson.encode(four_bytes_input)
dec = ujson.decode(enc)
assert enc == json_unicode(four_bytes_input)
assert dec == json.loads(enc)
def test_encode_array_in_array(self):
arr_in_arr_input = [[[[]]]]
output = ujson.encode(arr_in_arr_input)
assert arr_in_arr_input == json.loads(output)
assert output == json.dumps(arr_in_arr_input)
assert arr_in_arr_input ==
|
ujson.decode(output)
|
pandas._libs.json.decode
|
import pandas as pd
import numpy as np
from mstrio.connection import Connection
from mstrio.application_objects.datasets import SuperCube
from functools import reduce
def vdo_invoice_calc(df):
"""Calculates only bill values."""
df['REVENUE'] = df['REVENUE'].astype(float)
df['PRICE_NO_DISCOUNT'] = df['PRICE_NO_DISCOUNT'].astype(float)
df['PRICE_TAG'] = df['PRICE_TAG'].astype(int)
df['ENTER_DATE'] = pd.DatetimeIndex(df['ENTER_DATE'])
df['ENTER_DATE'] = df['ENTER_DATE'].dt.date
df['LEAVE_DATE'] = pd.DatetimeIndex(df['LEAVE_DATE'])
df['LEAVE_DATE'] = df['LEAVE_DATE'].dt.date
df['BIRTH_DATE'] = pd.DatetimeIndex(df['BIRTH_DATE'])
df['MAIN_CATEGORY_CODE'] = df['MAIN_CATEGORY_CODE'].astype(str)
df.loc[df['MAIN_CATEGORY_CODE'].isin(['115', '129', '147', '161']), 'BUNDLE'] = 'YES'
df.loc[(df['MAIN_CATEGORY_CODE'] == '44') & (df['PRICE_TAG'] >= 25), 'BUNDLE'] = 'YES'
df['SESSION_NO'] = df['CUSTOMERNO'].astype(str) + df['DATE_TAG'].astype(str)
df['AGE'] = pd.DatetimeIndex(df['ENTER_DATE']).year - pd.DatetimeIndex(df['BIRTH_DATE']).year
df['STORE_NAME'].replace(to_replace=['Aadvark'], value='ADVRK', inplace=True)
df_bndl_revenue = df[df['BUNDLE'] == 'YES'].groupby('SESSION_NO').agg({'REVENUE': 'sum'})
df_bndl_revenue = pd.DataFrame(df_bndl_revenue).reset_index()
df_bndl_revenue.columns = ['SESSION_NO', 'BUNDLE_REVENUE']
df_bndl_individual_rvn = df[df['PAYMENT_TAG'] == 'P'].groupby('SESSION_NO').agg({'REVENUE': 'sum'})
df_bndl_individual_rvn = pd.DataFrame(df_bndl_individual_rvn).reset_index()
df_bndl_individual_rvn.columns = ['SESSION_NO', 'BUNDLE_IND_SUM']
merged = df_bndl_revenue.merge(df_bndl_individual_rvn, on='SESSION_NO', how='outer',
left_index=False, right_index=False)
merged['RATIO'] = merged['BUNDLE_REVENUE'] / merged['BUNDLE_IND_SUM']
df_last = df.merge(merged[['SESSION_NO', 'RATIO']], on='SESSION_NO', how='outer', left_index=False, right_index=False)
df_last.loc[df_last['PAYMENT_TAG'] == 'P', 'UNBUNDLED_DISCOUNTED'] = df_last['REVENUE'] * df_last['RATIO']
df_last.loc[df_last['PAYMENT_TAG'] != 'P', 'UNBUNDLED_DISCOUNTED'] = df_last['REVENUE']
df_last.loc[df_last['MAIN_CATEGORY_CODE'].isin(['102', '108', '109', '110', '111', '112', '114',
'115', '116', '117', '120', '121', '123', '124', '125', '129',
'132', '133', '134', '136', '141', '149',
'150', '157', '166', '175', '179', '182']), 'MATCHING_CAT'] = 'SUPPLEMENT'
df_last.loc[df_last['MAIN_CATEGORY_CODE'].isin(['113', '122', '128', '145', '163']), 'MATCHING_CAT'] = 'CONSUMABLES'
df_last.loc[df_last['MAIN_CATEGORY_CODE'].isin(['103', '118', '135']), 'MATCHING_CAT'] = 'HOMEBUILD'
df_last.loc[df_last['MAIN_CATEGORY_CODE'].isin(['106', '107', '131', '154', '181']), 'MATCHING_CAT'] = 'PROFESSIONAL'
df_last.loc[(df_last['MAIN_CATEGORY_CODE'] == '04') & (df_last['PRICE_TAG'].between(10, 60)), 'MATCHING_CAT'] = 'PROFESSIONAL'
df_last.loc[(df_last['MAIN_CATEGORY_CODE'] == '04') & (df_last['PRICE_TAG'] == 100), 'MATCHING_CAT'] = 'HOMEBUILD'
df_last.loc[(df_last['MAIN_CATEGORY_CODE'] == '04') & (df_last['PRICE_TAG'] == 70), 'MATCHING_CAT'] = 'HOMEBUILD'
df_last.loc[df_last['MATCHING_CAT'].isna(), 'MATCHING_CAT'] = 'Other' # to detect uncategorized goods
seller_fee_includes = df_last[(df_last['MAIN_CATEGORY_CODE'] == '04') & (df_last['PRICE_TAG'] == 1)]['CUSTOMERNO'].unique()
depot_fee_includes = df_last[df_last['MAIN_CATEGORY_CODE'] == '45']['CUSTOMERNO'].unique()
pivot = pd.pivot_table(df_last[(df_last['BUNDLE'] != 'YES') & (~df_last['PAYMENT_TAG'].isin(['M', 'S', 'X']))],
index='CUSTOMERNO', columns='MATCHING_CAT', values='UNBUNDLED_DISCOUNTED', aggfunc=np.sum)
pivot = pd.DataFrame(pivot).reset_index()
pivot_info = df_last.groupby('CUSTOMERNO').agg({'SESSION_NO': 'first',
'STATE': 'first',
'GENDER': 'first',
'AGE': 'first',
'STORE_CODE': 'first',
'STORE_NAME': 'first',
'OPERATION': 'first',
'NAME_SURNAME': 'first',
'SELLER_NAME': 'first',
'ENTER_DATE': 'first',
'LEAVE_DATE': 'first'})
pivot_info =
|
pd.DataFrame(pivot_info)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 23 11:11:57 2018
@author: kazuki.onodera
-d- -> /
-x- -> *
-p- -> +
-m- -> -
nohup python -u 000.py 0 > LOG/log_000.py_0.txt &
nohup python -u 000.py 1 > LOG/log_000.py_1.txt &
nohup python -u 000.py 2 > LOG/log_000.py_2.txt &
nohup python -u 000.py 3 > LOG/log_000.py_3.txt &
nohup python -u 000.py 4 > LOG/log_000.py_4.txt &
nohup python -u 000.py 5 > LOG/log_000.py_5.txt &
nohup python -u 000.py 6 > LOG/log_000.py_6.txt &
"""
import numpy as np
import pandas as pd
from multiprocessing import Pool, cpu_count
NTHREAD = cpu_count()
from itertools import combinations
from tqdm import tqdm
import sys
argv = sys.argv
import os, utils, gc
utils.start(__file__)
#==============================================================================
folders = [
# '../data',
'../feature', '../feature_unused',
# '../feature_var0', '../feature_corr1'
]
for fol in folders:
os.system(f'rm -rf {fol}')
os.system(f'mkdir {fol}')
col_app_money = ['app_AMT_INCOME_TOTAL', 'app_AMT_CREDIT', 'app_AMT_ANNUITY', 'app_AMT_GOODS_PRICE']
col_app_day = ['app_DAYS_BIRTH', 'app_DAYS_EMPLOYED', 'app_DAYS_REGISTRATION', 'app_DAYS_ID_PUBLISH', 'app_DAYS_LAST_PHONE_CHANGE']
def get_trte():
usecols = ['SK_ID_CURR', 'AMT_INCOME_TOTAL', 'AMT_CREDIT', 'AMT_ANNUITY', 'AMT_GOODS_PRICE']
usecols += ['DAYS_BIRTH', 'DAYS_EMPLOYED', 'DAYS_REGISTRATION', 'DAYS_ID_PUBLISH', 'DAYS_LAST_PHONE_CHANGE']
rename_di = {
'AMT_INCOME_TOTAL': 'app_AMT_INCOME_TOTAL',
'AMT_CREDIT': 'app_AMT_CREDIT',
'AMT_ANNUITY': 'app_AMT_ANNUITY',
'AMT_GOODS_PRICE': 'app_AMT_GOODS_PRICE',
'DAYS_BIRTH': 'app_DAYS_BIRTH',
'DAYS_EMPLOYED': 'app_DAYS_EMPLOYED',
'DAYS_REGISTRATION': 'app_DAYS_REGISTRATION',
'DAYS_ID_PUBLISH': 'app_DAYS_ID_PUBLISH',
'DAYS_LAST_PHONE_CHANGE': 'app_DAYS_LAST_PHONE_CHANGE',
}
trte = pd.concat([pd.read_csv('../input/application_train.csv.zip', usecols=usecols).rename(columns=rename_di),
pd.read_csv('../input/application_test.csv.zip', usecols=usecols).rename(columns=rename_di)],
ignore_index=True)
return trte
def prep_prev(df):
df['AMT_APPLICATION'].replace(0, np.nan, inplace=True)
df['AMT_CREDIT'].replace(0, np.nan, inplace=True)
df['CNT_PAYMENT'].replace(0, np.nan, inplace=True)
df['AMT_DOWN_PAYMENT'].replace(np.nan, 0, inplace=True)
df.loc[df['NAME_CONTRACT_STATUS']!='Approved', 'AMT_DOWN_PAYMENT'] = np.nan
df['RATE_DOWN_PAYMENT'].replace(np.nan, 0, inplace=True)
df.loc[df['NAME_CONTRACT_STATUS']!='Approved', 'RATE_DOWN_PAYMENT'] = np.nan
# df['xxx'].replace(0, np.nan, inplace=True)
# df['xxx'].replace(0, np.nan, inplace=True)
return
p = int(argv[1])
if True:
#def multi(p):
if p==0:
# =============================================================================
# application
# =============================================================================
def f1(df):
df['CODE_GENDER'] = 1 - (df['CODE_GENDER']=='F')*1 # 4 'XNA' are converted to 'M'
df['FLAG_OWN_CAR'] = (df['FLAG_OWN_CAR']=='Y')*1
df['FLAG_OWN_REALTY'] = (df['FLAG_OWN_REALTY']=='Y')*1
df['EMERGENCYSTATE_MODE'] = (df['EMERGENCYSTATE_MODE']=='Yes')*1
df['AMT_CREDIT-d-AMT_INCOME_TOTAL'] = df['AMT_CREDIT'] / df['AMT_INCOME_TOTAL']
df['AMT_ANNUITY-d-AMT_INCOME_TOTAL'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-d-AMT_INCOME_TOTAL'] = df['AMT_GOODS_PRICE'] / df['AMT_INCOME_TOTAL']
df['AMT_CREDIT-d-AMT_ANNUITY'] = df['AMT_CREDIT'] / df['AMT_ANNUITY'] # how long should user pay?(month)
df['AMT_GOODS_PRICE-d-AMT_ANNUITY'] = df['AMT_GOODS_PRICE'] / df['AMT_ANNUITY']# how long should user pay?(month)
df['AMT_GOODS_PRICE-d-AMT_CREDIT'] = df['AMT_GOODS_PRICE'] / df['AMT_CREDIT']
df['AMT_GOODS_PRICE-m-AMT_CREDIT'] = df['AMT_GOODS_PRICE'] - df['AMT_CREDIT']
df['AMT_GOODS_PRICE-m-AMT_CREDIT-d-AMT_INCOME_TOTAL'] = df['AMT_GOODS_PRICE-m-AMT_CREDIT'] / df['AMT_INCOME_TOTAL']
df['age_finish_payment'] = df['DAYS_BIRTH'].abs() + (df['AMT_CREDIT-d-AMT_ANNUITY']*30)
# df['age_finish_payment'] = (df['DAYS_BIRTH']/-365) + df['credit-d-annuity']
df.loc[df['DAYS_EMPLOYED']==365243, 'DAYS_EMPLOYED'] = np.nan
df['DAYS_EMPLOYED-m-DAYS_BIRTH'] = df['DAYS_EMPLOYED'] - df['DAYS_BIRTH']
df['DAYS_REGISTRATION-m-DAYS_BIRTH'] = df['DAYS_REGISTRATION'] - df['DAYS_BIRTH']
df['DAYS_ID_PUBLISH-m-DAYS_BIRTH'] = df['DAYS_ID_PUBLISH'] - df['DAYS_BIRTH']
df['DAYS_LAST_PHONE_CHANGE-m-DAYS_BIRTH'] = df['DAYS_LAST_PHONE_CHANGE'] - df['DAYS_BIRTH']
df['DAYS_REGISTRATION-m-DAYS_EMPLOYED'] = df['DAYS_REGISTRATION'] - df['DAYS_EMPLOYED']
df['DAYS_ID_PUBLISH-m-DAYS_EMPLOYED'] = df['DAYS_ID_PUBLISH'] - df['DAYS_EMPLOYED']
df['DAYS_LAST_PHONE_CHANGE-m-DAYS_EMPLOYED'] = df['DAYS_LAST_PHONE_CHANGE'] - df['DAYS_EMPLOYED']
df['DAYS_ID_PUBLISH-m-DAYS_REGISTRATION'] = df['DAYS_ID_PUBLISH'] - df['DAYS_REGISTRATION']
df['DAYS_LAST_PHONE_CHANGE-m-DAYS_REGISTRATION'] = df['DAYS_LAST_PHONE_CHANGE'] - df['DAYS_REGISTRATION']
df['DAYS_LAST_PHONE_CHANGE-m-DAYS_ID_PUBLISH'] = df['DAYS_LAST_PHONE_CHANGE'] - df['DAYS_ID_PUBLISH']
col = ['DAYS_EMPLOYED-m-DAYS_BIRTH',
'DAYS_REGISTRATION-m-DAYS_BIRTH',
'DAYS_ID_PUBLISH-m-DAYS_BIRTH',
'DAYS_LAST_PHONE_CHANGE-m-DAYS_BIRTH',
'DAYS_REGISTRATION-m-DAYS_EMPLOYED',
'DAYS_ID_PUBLISH-m-DAYS_EMPLOYED',
'DAYS_LAST_PHONE_CHANGE-m-DAYS_EMPLOYED',
'DAYS_ID_PUBLISH-m-DAYS_REGISTRATION',
'DAYS_LAST_PHONE_CHANGE-m-DAYS_REGISTRATION',
'DAYS_LAST_PHONE_CHANGE-m-DAYS_ID_PUBLISH'
]
col_comb = list(combinations(col, 2))
for i,j in col_comb:
df[f'{i}-d-{j}'] = df[i] / df[j]
df['DAYS_EMPLOYED-d-DAYS_BIRTH'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['DAYS_REGISTRATION-d-DAYS_BIRTH'] = df['DAYS_REGISTRATION'] / df['DAYS_BIRTH']
df['DAYS_ID_PUBLISH-d-DAYS_BIRTH'] = df['DAYS_ID_PUBLISH'] / df['DAYS_BIRTH']
df['DAYS_LAST_PHONE_CHANGE-d-DAYS_BIRTH'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_BIRTH']
df['DAYS_REGISTRATION-d-DAYS_EMPLOYED'] = df['DAYS_REGISTRATION'] / df['DAYS_EMPLOYED']
df['DAYS_ID_PUBLISH-d-DAYS_EMPLOYED'] = df['DAYS_ID_PUBLISH'] / df['DAYS_EMPLOYED']
df['DAYS_LAST_PHONE_CHANGE-d-DAYS_EMPLOYED'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_EMPLOYED']
df['DAYS_ID_PUBLISH-d-DAYS_REGISTRATION'] = df['DAYS_ID_PUBLISH'] / df['DAYS_REGISTRATION']
df['DAYS_LAST_PHONE_CHANGE-d-DAYS_REGISTRATION'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_REGISTRATION']
df['DAYS_LAST_PHONE_CHANGE-d-DAYS_ID_PUBLISH'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_ID_PUBLISH']
df['OWN_CAR_AGE-d-DAYS_BIRTH'] = (df['OWN_CAR_AGE']*(-365)) / df['DAYS_BIRTH']
df['OWN_CAR_AGE-m-DAYS_BIRTH'] = df['DAYS_BIRTH'] + (df['OWN_CAR_AGE']*365)
df['OWN_CAR_AGE-d-DAYS_EMPLOYED'] = df['OWN_CAR_AGE'] / df['DAYS_EMPLOYED']
df['OWN_CAR_AGE-m-DAYS_EMPLOYED'] = df['DAYS_EMPLOYED'] + (df['OWN_CAR_AGE']*365)
df['cnt_adults'] = df['CNT_FAM_MEMBERS'] - df['CNT_CHILDREN']
df['CNT_CHILDREN-d-CNT_FAM_MEMBERS'] = df['CNT_CHILDREN'] / df['CNT_FAM_MEMBERS']
df['income_per_adult'] = df['AMT_INCOME_TOTAL'] / df['cnt_adults']
# df.loc[df['CNT_CHILDREN']==0, 'CNT_CHILDREN'] = np.nan
df['AMT_INCOME_TOTAL-d-CNT_CHILDREN'] = df['AMT_INCOME_TOTAL'] / (df['CNT_CHILDREN']+0.000001)
df['AMT_CREDIT-d-CNT_CHILDREN'] = df['AMT_CREDIT'] / (df['CNT_CHILDREN']+0.000001)
df['AMT_ANNUITY-d-CNT_CHILDREN'] = df['AMT_ANNUITY'] / (df['CNT_CHILDREN']+0.000001)
df['AMT_GOODS_PRICE-d-CNT_CHILDREN'] = df['AMT_GOODS_PRICE'] / (df['CNT_CHILDREN']+0.000001)
df['AMT_INCOME_TOTAL-d-cnt_adults'] = df['AMT_INCOME_TOTAL'] / df['cnt_adults']
df['AMT_CREDIT-d-cnt_adults'] = df['AMT_CREDIT'] / df['cnt_adults']
df['AMT_ANNUITY-d-cnt_adults'] = df['AMT_ANNUITY'] / df['cnt_adults']
df['AMT_GOODS_PRICE-d-cnt_adults'] = df['AMT_GOODS_PRICE'] / df['cnt_adults']
df['AMT_INCOME_TOTAL-d-CNT_FAM_MEMBERS'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS']
df['AMT_CREDIT-d-CNT_FAM_MEMBERS'] = df['AMT_CREDIT'] / df['CNT_FAM_MEMBERS']
df['AMT_ANNUITY-d-CNT_FAM_MEMBERS'] = df['AMT_ANNUITY'] / df['CNT_FAM_MEMBERS']
df['AMT_GOODS_PRICE-d-CNT_FAM_MEMBERS'] = df['AMT_GOODS_PRICE'] / df['CNT_FAM_MEMBERS']
# EXT_SOURCE_x
df['EXT_SOURCES_prod'] = df['EXT_SOURCE_1'] * df['EXT_SOURCE_2'] * df['EXT_SOURCE_3']
df['EXT_SOURCES_sum'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].sum(axis=1)
df['EXT_SOURCES_sum'] = df['EXT_SOURCES_sum'].fillna(df['EXT_SOURCES_sum'].mean())
df['EXT_SOURCES_mean'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1)
df['EXT_SOURCES_mean'] = df['EXT_SOURCES_mean'].fillna(df['EXT_SOURCES_mean'].mean())
df['EXT_SOURCES_std'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1)
df['EXT_SOURCES_std'] = df['EXT_SOURCES_std'].fillna(df['EXT_SOURCES_std'].mean())
df['EXT_SOURCES_1-2-3'] = df['EXT_SOURCE_1'] - df['EXT_SOURCE_2'] - df['EXT_SOURCE_3']
df['EXT_SOURCES_2-1-3'] = df['EXT_SOURCE_2'] - df['EXT_SOURCE_1'] - df['EXT_SOURCE_3']
df['EXT_SOURCES_1-2'] = df['EXT_SOURCE_1'] - df['EXT_SOURCE_2']
df['EXT_SOURCES_2-3'] = df['EXT_SOURCE_2'] - df['EXT_SOURCE_3']
df['EXT_SOURCES_1-3'] = df['EXT_SOURCE_1'] - df['EXT_SOURCE_3']
# =========
# https://www.kaggle.com/jsaguiar/updated-0-792-lb-lightgbm-with-simple-features/code
# =========
df['DAYS_EMPLOYED_PERC'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['INCOME_PER_PERSON'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS']
df['PAYMENT_RATE'] = df['AMT_ANNUITY'] / df['AMT_CREDIT']
# =========
# https://www.kaggle.com/poohtls/fork-of-fork-lightgbm-with-simple-features/code
# =========
docs = [_f for _f in df.columns if 'FLAG_DOC' in _f]
live = [_f for _f in df.columns if ('FLAG_' in _f) & ('FLAG_DOC' not in _f) & ('_FLAG_' not in _f)]
inc_by_org = df[['AMT_INCOME_TOTAL', 'ORGANIZATION_TYPE']].groupby('ORGANIZATION_TYPE').median()['AMT_INCOME_TOTAL']
df['alldocs_kurt'] = df[docs].kurtosis(axis=1)
df['alldocs_skew'] = df[docs].skew(axis=1)
df['alldocs_mean'] = df[docs].mean(axis=1)
df['alldocs_sum'] = df[docs].sum(axis=1)
df['alldocs_std'] = df[docs].std(axis=1)
df['NEW_LIVE_IND_SUM'] = df[live].sum(axis=1)
df['NEW_INC_PER_CHLD'] = df['AMT_INCOME_TOTAL'] / (1 + df['CNT_CHILDREN'])
df['NEW_INC_BY_ORG'] = df['ORGANIZATION_TYPE'].map(inc_by_org)
df['NEW_ANNUITY_TO_INCOME_RATIO'] = df['AMT_ANNUITY'] / (1 + df['AMT_INCOME_TOTAL'])
df['NEW_CAR_TO_BIRTH_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_BIRTH']
df['NEW_CAR_TO_EMPLOY_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_EMPLOYED']
df['NEW_PHONE_TO_BIRTH_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_BIRTH']
df['NEW_PHONE_TO_EMPLOYED_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_EMPLOYED']
# =============================================================================
# Maxwell features
# =============================================================================
bdg_avg = df.filter(regex='_AVG$').columns
bdg_mode = df.filter(regex='_MODE$').columns
bdg_medi = df.filter(regex='_MEDI$').columns[:len(bdg_avg)] # ignore FONDKAPREMONT_MODE...
df['building_score_avg_mean'] = df[bdg_avg].mean(1)
df['building_score_avg_std'] = df[bdg_avg].std(1)
df['building_score_avg_sum'] = df[bdg_avg].sum(1)
df['building_score_mode_mean'] = df[bdg_mode].mean(1)
df['building_score_mode_std'] = df[bdg_mode].std(1)
df['building_score_mode_sum'] = df[bdg_mode].sum(1)
df['building_score_medi_mean'] = df[bdg_medi].mean(1)
df['building_score_medi_std'] = df[bdg_medi].std(1)
df['building_score_medi_sum'] = df[bdg_medi].sum(1)
df['maxwell_feature_1'] = (df['EXT_SOURCE_1'] * df['EXT_SOURCE_3']) ** (1 / 2)
df.replace(np.inf, np.nan, inplace=True) # TODO: any other plan?
df.replace(-np.inf, np.nan, inplace=True)
return
df = pd.read_csv('../input/application_train.csv.zip')
f1(df)
utils.to_pickles(df, '../data/train', utils.SPLIT_SIZE)
utils.to_pickles(df[['TARGET']], '../data/label', utils.SPLIT_SIZE)
df = pd.read_csv('../input/application_test.csv.zip')
f1(df)
utils.to_pickles(df, '../data/test', utils.SPLIT_SIZE)
df[['SK_ID_CURR']].to_pickle('../data/sub.p')
elif p==1:
# =============================================================================
# prev
# =============================================================================
"""
df = utils.read_pickles('../data/previous_application')
"""
df = pd.merge(pd.read_csv('../data/prev_new_v4.csv.gz'),
get_trte(), on='SK_ID_CURR', how='left')
# df = pd.merge(pd.read_csv('../input/previous_application.csv.zip'),
# get_trte(), on='SK_ID_CURR', how='left')
prep_prev(df)
df['FLAG_LAST_APPL_PER_CONTRACT'] = (df['FLAG_LAST_APPL_PER_CONTRACT']=='Y')*1
# day
for c in ['DAYS_FIRST_DRAWING', 'DAYS_FIRST_DUE', 'DAYS_LAST_DUE_1ST_VERSION',
'DAYS_LAST_DUE', 'DAYS_TERMINATION']:
df.loc[df[c]==365243, c] = np.nan
df['days_fdue-m-fdrw'] = df['DAYS_FIRST_DUE'] - df['DAYS_FIRST_DRAWING']
df['days_ldue1-m-fdrw'] = df['DAYS_LAST_DUE_1ST_VERSION'] - df['DAYS_FIRST_DRAWING']
df['days_ldue-m-fdrw'] = df['DAYS_LAST_DUE'] - df['DAYS_FIRST_DRAWING'] # total span
df['days_trm-m-fdrw'] = df['DAYS_TERMINATION'] - df['DAYS_FIRST_DRAWING']
df['days_ldue1-m-fdue'] = df['DAYS_LAST_DUE_1ST_VERSION'] - df['DAYS_FIRST_DUE']
df['days_ldue-m-fdue'] = df['DAYS_LAST_DUE'] - df['DAYS_FIRST_DUE']
df['days_trm-m-fdue'] = df['DAYS_TERMINATION'] - df['DAYS_FIRST_DUE']
df['days_ldue-m-ldue1'] = df['DAYS_LAST_DUE'] - df['DAYS_LAST_DUE_1ST_VERSION']
df['days_trm-m-ldue1'] = df['DAYS_TERMINATION'] - df['DAYS_LAST_DUE_1ST_VERSION']
df['days_trm-m-ldue'] = df['DAYS_TERMINATION'] - df['DAYS_LAST_DUE']
# money
df['total_debt'] = df['AMT_ANNUITY'] * df['CNT_PAYMENT']
df['AMT_CREDIT-d-total_debt'] = df['AMT_CREDIT'] / df['total_debt']
df['AMT_GOODS_PRICE-d-total_debt'] = df['AMT_GOODS_PRICE'] / df['total_debt']
df['AMT_GOODS_PRICE-d-AMT_CREDIT'] = df['AMT_GOODS_PRICE'] / df['AMT_CREDIT']
# app & money
df['AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = df['AMT_ANNUITY'] / df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-d-app_AMT_INCOME_TOTAL'] = df['AMT_APPLICATION'] / df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = df['AMT_CREDIT'] / df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = df['AMT_GOODS_PRICE'] / df['app_AMT_INCOME_TOTAL']
df['AMT_ANNUITY-m-app_AMT_INCOME_TOTAL'] = df['AMT_ANNUITY'] - df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-m-app_AMT_INCOME_TOTAL'] = df['AMT_APPLICATION'] - df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-m-app_AMT_INCOME_TOTAL'] = df['AMT_CREDIT'] - df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-m-app_AMT_INCOME_TOTAL'] = df['AMT_GOODS_PRICE'] - df['app_AMT_INCOME_TOTAL']
df['AMT_ANNUITY-d-app_AMT_CREDIT'] = df['AMT_ANNUITY'] / df['app_AMT_CREDIT']
df['AMT_APPLICATION-d-app_AMT_CREDIT'] = df['AMT_APPLICATION'] / df['app_AMT_CREDIT']
df['AMT_CREDIT-d-app_AMT_CREDIT'] = df['AMT_CREDIT'] / df['app_AMT_CREDIT']
df['AMT_GOODS_PRICE-d-app_AMT_CREDIT'] = df['AMT_GOODS_PRICE'] / df['app_AMT_CREDIT']
df['AMT_ANNUITY-m-app_AMT_CREDIT'] = df['AMT_ANNUITY'] - df['app_AMT_CREDIT']
df['AMT_APPLICATION-m-app_AMT_CREDIT'] = df['AMT_APPLICATION'] - df['app_AMT_CREDIT']
df['AMT_CREDIT-m-app_AMT_CREDIT'] = df['AMT_CREDIT'] - df['app_AMT_CREDIT']
df['AMT_GOODS_PRICE-m-app_AMT_CREDIT'] = df['AMT_GOODS_PRICE'] - df['app_AMT_CREDIT']
df['AMT_ANNUITY-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = (df['AMT_ANNUITY'] - df['app_AMT_CREDIT']) / df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = (df['AMT_APPLICATION'] - df['app_AMT_CREDIT']) / df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = (df['AMT_CREDIT'] - df['app_AMT_CREDIT']) / df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = (df['AMT_GOODS_PRICE'] - df['app_AMT_CREDIT']) / df['app_AMT_INCOME_TOTAL']
df['AMT_ANNUITY-d-app_AMT_ANNUITY'] = df['AMT_ANNUITY'] / df['app_AMT_ANNUITY']
df['AMT_APPLICATION-d-app_AMT_ANNUITY'] = df['AMT_APPLICATION'] / df['app_AMT_ANNUITY']
df['AMT_CREDIT-d-app_AMT_ANNUITY'] = df['AMT_CREDIT'] / df['app_AMT_ANNUITY']
df['AMT_GOODS_PRICE-d-app_AMT_ANNUITY'] = df['AMT_GOODS_PRICE'] / df['app_AMT_ANNUITY']
df['AMT_ANNUITY-m-app_AMT_ANNUITY'] = df['AMT_ANNUITY'] - df['app_AMT_ANNUITY']
df['AMT_APPLICATION-m-app_AMT_ANNUITY'] = df['AMT_APPLICATION'] - df['app_AMT_ANNUITY']
df['AMT_CREDIT-m-app_AMT_ANNUITY'] = df['AMT_CREDIT'] - df['app_AMT_ANNUITY']
df['AMT_GOODS_PRICE-m-app_AMT_ANNUITY'] = df['AMT_GOODS_PRICE'] - df['app_AMT_ANNUITY']
df['AMT_ANNUITY-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = (df['AMT_ANNUITY'] - df['app_AMT_ANNUITY']) / df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = (df['AMT_APPLICATION'] - df['app_AMT_ANNUITY']) / df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = (df['AMT_CREDIT'] - df['app_AMT_ANNUITY']) / df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = (df['AMT_GOODS_PRICE'] - df['app_AMT_ANNUITY']) / df['app_AMT_INCOME_TOTAL']
df['AMT_ANNUITY-d-app_AMT_GOODS_PRICE'] = df['AMT_ANNUITY'] / df['app_AMT_GOODS_PRICE']
df['AMT_APPLICATION-d-app_AMT_GOODS_PRICE'] = df['AMT_APPLICATION'] / df['app_AMT_GOODS_PRICE']
df['AMT_CREDIT-d-app_AMT_GOODS_PRICE'] = df['AMT_CREDIT'] / df['app_AMT_GOODS_PRICE']
df['AMT_GOODS_PRICE-d-app_AMT_GOODS_PRICE'] = df['AMT_GOODS_PRICE'] / df['app_AMT_GOODS_PRICE']
df['AMT_ANNUITY-m-app_AMT_GOODS_PRICE'] = df['AMT_ANNUITY'] - df['app_AMT_GOODS_PRICE']
df['AMT_APPLICATION-m-app_AMT_GOODS_PRICE'] = df['AMT_APPLICATION'] - df['app_AMT_GOODS_PRICE']
df['AMT_CREDIT-m-app_AMT_GOODS_PRICE'] = df['AMT_CREDIT'] - df['app_AMT_GOODS_PRICE']
df['AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE'] = df['AMT_GOODS_PRICE'] - df['app_AMT_GOODS_PRICE']
df['AMT_ANNUITY-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = (df['AMT_ANNUITY'] - df['app_AMT_GOODS_PRICE']) / df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = (df['AMT_APPLICATION'] - df['app_AMT_GOODS_PRICE']) / df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = (df['AMT_CREDIT'] - df['app_AMT_GOODS_PRICE']) / df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = (df['AMT_GOODS_PRICE'] - df['app_AMT_GOODS_PRICE']) / df['app_AMT_INCOME_TOTAL']
# nejumi
f_name='nejumi'; init_rate=0.9; n_iter=500
df['AMT_ANNUITY_d_AMT_CREDIT_temp'] = df.AMT_ANNUITY / df.AMT_CREDIT
df[f_name] = df['AMT_ANNUITY_d_AMT_CREDIT_temp']*((1 + init_rate)**df.CNT_PAYMENT - 1)/((1 + init_rate)**df.CNT_PAYMENT)
for i in range(n_iter):
df[f_name] = df['AMT_ANNUITY_d_AMT_CREDIT_temp']*((1 + df[f_name])**df.CNT_PAYMENT - 1)/((1 + df[f_name])**df.CNT_PAYMENT)
df.drop(['AMT_ANNUITY_d_AMT_CREDIT_temp'], axis=1, inplace=True)
df.sort_values(['SK_ID_CURR', 'DAYS_DECISION'], inplace=True)
df.reset_index(drop=True, inplace=True)
col = [
'total_debt',
'AMT_CREDIT-d-total_debt',
'AMT_GOODS_PRICE-d-total_debt',
'AMT_GOODS_PRICE-d-AMT_CREDIT',
'AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'AMT_ANNUITY-d-app_AMT_CREDIT',
'AMT_APPLICATION-d-app_AMT_CREDIT',
'AMT_CREDIT-d-app_AMT_CREDIT',
'AMT_GOODS_PRICE-d-app_AMT_CREDIT',
'AMT_ANNUITY-d-app_AMT_ANNUITY',
'AMT_APPLICATION-d-app_AMT_ANNUITY',
'AMT_CREDIT-d-app_AMT_ANNUITY',
'AMT_GOODS_PRICE-d-app_AMT_ANNUITY',
'AMT_ANNUITY-d-app_AMT_GOODS_PRICE',
'AMT_APPLICATION-d-app_AMT_GOODS_PRICE',
'AMT_CREDIT-d-app_AMT_GOODS_PRICE',
'AMT_GOODS_PRICE-d-app_AMT_GOODS_PRICE',
'AMT_ANNUITY-m-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-m-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-m-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-m-app_AMT_INCOME_TOTAL',
'AMT_ANNUITY-m-app_AMT_CREDIT',
'AMT_APPLICATION-m-app_AMT_CREDIT',
'AMT_CREDIT-m-app_AMT_CREDIT',
'AMT_GOODS_PRICE-m-app_AMT_CREDIT',
'AMT_ANNUITY-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_ANNUITY-m-app_AMT_ANNUITY',
'AMT_APPLICATION-m-app_AMT_ANNUITY',
'AMT_CREDIT-m-app_AMT_ANNUITY',
'AMT_GOODS_PRICE-m-app_AMT_ANNUITY',
'AMT_ANNUITY-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_ANNUITY-m-app_AMT_GOODS_PRICE',
'AMT_APPLICATION-m-app_AMT_GOODS_PRICE',
'AMT_CREDIT-m-app_AMT_GOODS_PRICE',
'AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE',
'AMT_ANNUITY-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'nejumi'
]
def multi_prev(c):
ret_diff = []
ret_pctchng = []
key_bk = x_bk = None
for key, x in df[['SK_ID_CURR', c]].values:
# for key, x in tqdm(df[['SK_ID_CURR', c]].values, mininterval=30):
if key_bk is None:
ret_diff.append(None)
ret_pctchng.append(None)
else:
if key_bk == key:
ret_diff.append(x - x_bk)
ret_pctchng.append( (x_bk-x) / x_bk)
else:
ret_diff.append(None)
ret_pctchng.append(None)
key_bk = key
x_bk = x
ret_diff = pd.Series(ret_diff, name=f'{c}_diff')
ret_pctchng = pd.Series(ret_pctchng, name=f'{c}_pctchange')
ret = pd.concat([ret_diff, ret_pctchng], axis=1)
return ret
pool = Pool(len(col))
callback = pd.concat(pool.map(multi_prev, col), axis=1)
print('===== PREV ====')
print(callback.columns.tolist())
pool.close()
df = pd.concat([df, callback], axis=1)
# app & day
col_prev = ['DAYS_FIRST_DRAWING', 'DAYS_FIRST_DUE', 'DAYS_LAST_DUE_1ST_VERSION',
'DAYS_LAST_DUE', 'DAYS_TERMINATION']
for c1 in col_prev:
for c2 in col_app_day:
# print(f"'{c1}-m-{c2}',")
df[f'{c1}-m-{c2}'] = df[c1] - df[c2]
df[f'{c1}-d-{c2}'] = df[c1] / df[c2]
df['cnt_paid'] = df.apply(lambda x: min( np.ceil(x['DAYS_FIRST_DUE']/-30), x['CNT_PAYMENT'] ), axis=1)
df['cnt_paid_ratio'] = df['cnt_paid'] / df['CNT_PAYMENT']
df['cnt_unpaid'] = df['CNT_PAYMENT'] - df['cnt_paid']
df['amt_paid'] = df['AMT_ANNUITY'] * df['cnt_paid']
# df['amt_paid_ratio'] = df['amt_paid'] / df['total_debt'] # same as cnt_paid_ratio
df['amt_unpaid'] = df['total_debt'] - df['amt_paid']
df['active'] = (df['cnt_unpaid']>0)*1
df['completed'] = (df['cnt_unpaid']==0)*1
# future payment
df_tmp = pd.DataFrame()
print('future payment')
rem_max = df['cnt_unpaid'].max() # 80
# rem_max = 1
df['cnt_unpaid_tmp'] = df['cnt_unpaid']
for i in range(int( rem_max )):
c = f'future_payment_{i+1}m'
df_tmp[c] = df['cnt_unpaid_tmp'].map(lambda x: min(x, 1)) * df['AMT_ANNUITY']
df_tmp.loc[df_tmp[c]==0, c] = np.nan
df['cnt_unpaid_tmp'] -= 1
df['cnt_unpaid_tmp'] = df['cnt_unpaid_tmp'].map(lambda x: max(x, 0))
# df['prev_future_payment_max'] = df.filter(regex='^prev_future_payment_').max(1)
del df['cnt_unpaid_tmp']
df = pd.concat([df, df_tmp], axis=1)
# past payment
df_tmp = pd.DataFrame()
print('past payment')
rem_max = df['cnt_paid'].max() # 72
df['cnt_paid_tmp'] = df['cnt_paid']
for i in range(int( rem_max )):
c = f'past_payment_{i+1}m'
df_tmp[c] = df['cnt_paid_tmp'].map(lambda x: min(x, 1)) * df['AMT_ANNUITY']
df_tmp.loc[df_tmp[c]==0, c] = np.nan
df['cnt_paid_tmp'] -= 1
df['cnt_paid_tmp'] = df['cnt_paid_tmp'].map(lambda x: max(x, 0))
# df['prev_past_payment_max'] = df.filter(regex='^prev_past_payment_').max(1)
del df['cnt_paid_tmp']
df = pd.concat([df, df_tmp], axis=1)
df['APP_CREDIT_PERC'] = df['AMT_APPLICATION'] / df['AMT_CREDIT']
#df.filter(regex='^amt_future_payment_')
df.replace(np.inf, np.nan, inplace=True) # TODO: any other plan?
df.replace(-np.inf, np.nan, inplace=True)
utils.to_pickles(df, '../data/previous_application', utils.SPLIT_SIZE)
elif p==2:
# =============================================================================
# POS
# =============================================================================
"""
df = utils.read_pickles('../data/POS_CASH_balance')
"""
df = pd.read_csv('../input/POS_CASH_balance.csv.zip')
# data cleansing!!!
## drop signed. sample SK_ID_PREV==1769939
df = df[df.NAME_CONTRACT_STATUS!='Signed']
## Zombie NAME_CONTRACT_STATUS=='Completed' and CNT_INSTALMENT_FUTURE!=0. 1134377
df.loc[(df.NAME_CONTRACT_STATUS=='Completed') & (df.CNT_INSTALMENT_FUTURE!=0), 'NAME_CONTRACT_STATUS'] = 'Active'
## CNT_INSTALMENT_FUTURE=0 and Active. sample SK_ID_PREV==1998905, 2174168
df.loc[(df.CNT_INSTALMENT_FUTURE==0) & (df.NAME_CONTRACT_STATUS=='Active'), 'NAME_CONTRACT_STATUS'] = 'Completed'
## remove duplicated CNT_INSTALMENT_FUTURE=0. sample SK_ID_PREV==2601827
df_0 = df[df['CNT_INSTALMENT_FUTURE']==0]
df_1 = df[df['CNT_INSTALMENT_FUTURE']>0]
df_0['NAME_CONTRACT_STATUS'] = 'Completed'
df_0.sort_values(['SK_ID_PREV', 'MONTHS_BALANCE'], ascending=[True, False], inplace=True)
df_0.drop_duplicates('SK_ID_PREV', keep='last', inplace=True)
df = pd.concat([df_0, df_1], ignore_index=True)
del df_0, df_1; gc.collect()
# TODO: end in active. 1002879
# df['CNT_INSTALMENT_FUTURE_min'] = df.groupby('SK_ID_PREV').CNT_INSTALMENT_FUTURE.transform('min')
# df['MONTHS_BALANCE_max'] = df.groupby('SK_ID_PREV').MONTHS_BALANCE.transform('max')
# df.loc[(df.CNT_INSTALMENT_FUTURE_min!=0) & (df.MONTHS_BALANCE_max!=-1)]
df['CNT_INSTALMENT-m-CNT_INSTALMENT_FUTURE'] = df['CNT_INSTALMENT'] - df['CNT_INSTALMENT_FUTURE']
df['CNT_INSTALMENT_FUTURE-d-CNT_INSTALMENT'] = df['CNT_INSTALMENT_FUTURE'] / df['CNT_INSTALMENT']
df.sort_values(['SK_ID_PREV', 'MONTHS_BALANCE'], inplace=True)
df.reset_index(drop=True, inplace=True)
col = ['CNT_INSTALMENT_FUTURE', 'SK_DPD', 'SK_DPD_DEF']
def multi_pos(c):
ret_diff = []
ret_pctchng = []
key_bk = x_bk = None
for key, x in df[['SK_ID_PREV', c]].values:
# for key, x in tqdm(df[['SK_ID_CURR', c]].values, mininterval=30):
if key_bk is None:
ret_diff.append(None)
ret_pctchng.append(None)
else:
if key_bk == key:
ret_diff.append(x - x_bk)
ret_pctchng.append( (x_bk-x) / x_bk)
else:
ret_diff.append(None)
ret_pctchng.append(None)
key_bk = key
x_bk = x
ret_diff = pd.Series(ret_diff, name=f'{c}_diff')
ret_pctchng = pd.Series(ret_pctchng, name=f'{c}_pctchange')
ret = pd.concat([ret_diff, ret_pctchng], axis=1)
return ret
pool = Pool(len(col))
callback = pd.concat(pool.map(multi_pos, col), axis=1)
print('===== POS ====')
print(callback.columns.tolist())
pool.close()
df = pd.concat([df, callback], axis=1)
df['SK_DPD-m-SK_DPD_DEF'] = df['SK_DPD'] - df['SK_DPD_DEF']
# df['SK_DPD_diff_over0'] = (df['SK_DPD_diff']>0)*1
# df['SK_DPD_diff_over5'] = (df['SK_DPD_diff']>5)*1
# df['SK_DPD_diff_over10'] = (df['SK_DPD_diff']>10)*1
# df['SK_DPD_diff_over15'] = (df['SK_DPD_diff']>15)*1
# df['SK_DPD_diff_over20'] = (df['SK_DPD_diff']>20)*1
# df['SK_DPD_diff_over25'] = (df['SK_DPD_diff']>25)*1
df.replace(np.inf, np.nan, inplace=True) # TODO: any other plan?
df.replace(-np.inf, np.nan, inplace=True)
utils.to_pickles(df, '../data/POS_CASH_balance', utils.SPLIT_SIZE)
elif p==3:
# =============================================================================
# ins
# =============================================================================
"""
df = utils.read_pickles('../data/installments_payments')
"""
df = pd.read_csv('../input/installments_payments.csv.zip')
trte = get_trte()
df = pd.merge(df, trte, on='SK_ID_CURR', how='left')
prev = pd.read_csv('../input/previous_application.csv.zip',
usecols=['SK_ID_PREV', 'CNT_PAYMENT', 'AMT_ANNUITY'])
prev['CNT_PAYMENT'].replace(0, np.nan, inplace=True)
# prep_prev(prev)
df = pd.merge(df, prev, on='SK_ID_PREV', how='left')
del trte, prev; gc.collect()
df['month'] = (df['DAYS_ENTRY_PAYMENT']/30).map(np.floor)
# app
df['DAYS_ENTRY_PAYMENT-m-app_DAYS_BIRTH'] = df['DAYS_ENTRY_PAYMENT'] - df['app_DAYS_BIRTH']
df['DAYS_ENTRY_PAYMENT-m-app_DAYS_EMPLOYED'] = df['DAYS_ENTRY_PAYMENT'] - df['app_DAYS_EMPLOYED']
df['DAYS_ENTRY_PAYMENT-m-app_DAYS_REGISTRATION'] = df['DAYS_ENTRY_PAYMENT'] - df['app_DAYS_REGISTRATION']
df['DAYS_ENTRY_PAYMENT-m-app_DAYS_ID_PUBLISH'] = df['DAYS_ENTRY_PAYMENT'] - df['app_DAYS_ID_PUBLISH']
df['DAYS_ENTRY_PAYMENT-m-app_DAYS_LAST_PHONE_CHANGE'] = df['DAYS_ENTRY_PAYMENT'] - df['app_DAYS_LAST_PHONE_CHANGE']
df['AMT_PAYMENT-d-app_AMT_INCOME_TOTAL'] = df['AMT_PAYMENT'] / df['app_AMT_INCOME_TOTAL']
df['AMT_PAYMENT-d-app_AMT_CREDIT'] = df['AMT_PAYMENT'] / df['app_AMT_CREDIT']
df['AMT_PAYMENT-d-app_AMT_ANNUITY'] = df['AMT_PAYMENT'] / df['app_AMT_ANNUITY']
df['AMT_PAYMENT-d-app_AMT_GOODS_PRICE'] = df['AMT_PAYMENT'] / df['app_AMT_GOODS_PRICE']
# prev
df['NUM_INSTALMENT_ratio'] = df['NUM_INSTALMENT_NUMBER'] / df['CNT_PAYMENT']
df['AMT_PAYMENT-d-AMT_ANNUITY'] = df['AMT_PAYMENT'] / df['AMT_ANNUITY']
df['days_delayed_payment'] = df['DAYS_ENTRY_PAYMENT'] - df['DAYS_INSTALMENT']
df['amt_ratio'] = df['AMT_PAYMENT'] / df['AMT_INSTALMENT']
df['amt_delta'] = df['AMT_INSTALMENT'] - df['AMT_PAYMENT']
df['days_weighted_delay'] = df['amt_ratio'] * df['days_delayed_payment']
# Days past due and days before due (no negative values)
df['DPD'] = df['DAYS_ENTRY_PAYMENT'] - df['DAYS_INSTALMENT']
df['DBD'] = df['DAYS_INSTALMENT'] - df['DAYS_ENTRY_PAYMENT']
df['DPD'] = df['DPD'].apply(lambda x: x if x > 0 else 0)
df['DBD'] = df['DBD'].apply(lambda x: x if x > 0 else 0)
decay = 0.0003 # decay rate per a day
feature = f'days_weighted_delay_tsw3' # Time Series Weight
df[feature] = df['days_weighted_delay'] * (1 + (df['DAYS_ENTRY_PAYMENT']*decay) )
# df_tmp = pd.DataFrame()
# for i in range(0, 50, 5):
# c1 = f'delayed_day_over{i}'
# df_tmp[c1] = (df['days_delayed_payment']>i)*1
#
# c2 = f'delayed_money_{i}'
# df_tmp[c2] = df_tmp[c1] * df.AMT_PAYMENT
#
# c3 = f'delayed_money_ratio_{i}'
# df_tmp[c3] = df_tmp[c1] * df.amt_ratio
#
# c1 = f'not-delayed_day_{i}'
# df_tmp[c1] = (df['days_delayed_payment']<=i)*1
#
# c2 = f'not-delayed_money_{i}'
# df_tmp[c2] = df_tmp[c1] * df.AMT_PAYMENT
#
# c3 = f'not-delayed_money_ratio_{i}'
# df_tmp[c3] = df_tmp[c1] * df.amt_ratio
#
# df = pd.concat([df, df_tmp], axis=1)
df.replace(np.inf, np.nan, inplace=True) # TODO: any other plan?
df.replace(-np.inf, np.nan, inplace=True)
utils.to_pickles(df, '../data/installments_payments', utils.SPLIT_SIZE)
utils.to_pickles(df[df['days_delayed_payment']>0].reset_index(drop=True),
'../data/installments_payments_delay', utils.SPLIT_SIZE)
utils.to_pickles(df[df['days_delayed_payment']<=0].reset_index(drop=True),
'../data/installments_payments_notdelay', utils.SPLIT_SIZE)
elif p==4:
# =============================================================================
# credit card
# =============================================================================
"""
df = utils.read_pickles('../data/credit_card_balance')
"""
df = pd.read_csv('../input/credit_card_balance.csv.zip')
df = pd.merge(df, get_trte(), on='SK_ID_CURR', how='left')
df[col_app_day] = df[col_app_day]/30
# app
df['AMT_BALANCE-d-app_AMT_INCOME_TOTAL'] = df['AMT_BALANCE'] / df['app_AMT_INCOME_TOTAL']
df['AMT_BALANCE-d-app_AMT_CREDIT'] = df['AMT_BALANCE'] / df['app_AMT_CREDIT']
df['AMT_BALANCE-d-app_AMT_ANNUITY'] = df['AMT_BALANCE'] / df['app_AMT_ANNUITY']
df['AMT_BALANCE-d-app_AMT_GOODS_PRICE'] = df['AMT_BALANCE'] / df['app_AMT_GOODS_PRICE']
df['AMT_DRAWINGS_CURRENT-d-app_AMT_INCOME_TOTAL'] = df['AMT_DRAWINGS_CURRENT'] / df['app_AMT_INCOME_TOTAL']
df['AMT_DRAWINGS_CURRENT-d-app_AMT_CREDIT'] = df['AMT_DRAWINGS_CURRENT'] / df['app_AMT_CREDIT']
df['AMT_DRAWINGS_CURRENT-d-app_AMT_ANNUITY'] = df['AMT_DRAWINGS_CURRENT'] / df['app_AMT_ANNUITY']
df['AMT_DRAWINGS_CURRENT-d-app_AMT_GOODS_PRICE'] = df['AMT_DRAWINGS_CURRENT'] / df['app_AMT_GOODS_PRICE']
for c in col_app_day:
print(f'MONTHS_BALANCE-m-{c}')
df[f'MONTHS_BALANCE-m-{c}'] = df['MONTHS_BALANCE'] - df[c]
df['AMT_BALANCE-d-AMT_CREDIT_LIMIT_ACTUAL'] = df['AMT_BALANCE'] / df['AMT_CREDIT_LIMIT_ACTUAL']
df['AMT_BALANCE-d-AMT_DRAWINGS_CURRENT'] = df['AMT_BALANCE'] / df['AMT_DRAWINGS_CURRENT']
df['AMT_DRAWINGS_CURRENT-d-AMT_CREDIT_LIMIT_ACTUAL'] = df['AMT_DRAWINGS_CURRENT'] / df['AMT_CREDIT_LIMIT_ACTUAL']
df['AMT_TOTAL_RECEIVABLE-m-AMT_RECEIVABLE_PRINCIPAL'] = df['AMT_TOTAL_RECEIVABLE'] - df['AMT_RECEIVABLE_PRINCIPAL']
df['AMT_RECEIVABLE_PRINCIPAL-d-AMT_TOTAL_RECEIVABLE'] = df['AMT_RECEIVABLE_PRINCIPAL'] / df['AMT_TOTAL_RECEIVABLE']
df['SK_DPD-m-SK_DPD_DEF'] = df['SK_DPD'] - df['SK_DPD_DEF']
df['SK_DPD-m-SK_DPD_DEF_over0'] = (df['SK_DPD-m-SK_DPD_DEF']>0)*1
df['SK_DPD-m-SK_DPD_DEF_over5'] = (df['SK_DPD-m-SK_DPD_DEF']>5)*1
df['SK_DPD-m-SK_DPD_DEF_over10'] = (df['SK_DPD-m-SK_DPD_DEF']>10)*1
df['SK_DPD-m-SK_DPD_DEF_over15'] = (df['SK_DPD-m-SK_DPD_DEF']>15)*1
df['SK_DPD-m-SK_DPD_DEF_over20'] = (df['SK_DPD-m-SK_DPD_DEF']>20)*1
df['SK_DPD-m-SK_DPD_DEF_over25'] = (df['SK_DPD-m-SK_DPD_DEF']>25)*1
col = ['AMT_BALANCE', 'AMT_CREDIT_LIMIT_ACTUAL', 'AMT_DRAWINGS_ATM_CURRENT',
'AMT_DRAWINGS_CURRENT', 'AMT_DRAWINGS_OTHER_CURRENT',
'AMT_DRAWINGS_POS_CURRENT', 'AMT_INST_MIN_REGULARITY',
'AMT_PAYMENT_CURRENT', 'AMT_PAYMENT_TOTAL_CURRENT',
'AMT_RECEIVABLE_PRINCIPAL', 'AMT_RECIVABLE', 'AMT_TOTAL_RECEIVABLE',
'CNT_DRAWINGS_ATM_CURRENT', 'CNT_DRAWINGS_CURRENT',
'CNT_DRAWINGS_OTHER_CURRENT', 'CNT_DRAWINGS_POS_CURRENT',
'CNT_INSTALMENT_MATURE_CUM', 'SK_DPD',
'SK_DPD_DEF', 'AMT_BALANCE-d-app_AMT_INCOME_TOTAL',
'AMT_BALANCE-d-app_AMT_CREDIT', 'AMT_BALANCE-d-app_AMT_ANNUITY',
'AMT_BALANCE-d-app_AMT_GOODS_PRICE', 'AMT_DRAWINGS_CURRENT-d-app_AMT_INCOME_TOTAL',
'AMT_DRAWINGS_CURRENT-d-app_AMT_CREDIT', 'AMT_DRAWINGS_CURRENT-d-app_AMT_ANNUITY',
'AMT_DRAWINGS_CURRENT-d-app_AMT_GOODS_PRICE', 'AMT_BALANCE-d-AMT_CREDIT_LIMIT_ACTUAL',
'AMT_BALANCE-d-AMT_DRAWINGS_CURRENT', 'AMT_DRAWINGS_CURRENT-d-AMT_CREDIT_LIMIT_ACTUAL',
'AMT_TOTAL_RECEIVABLE-m-AMT_RECEIVABLE_PRINCIPAL',
'AMT_RECEIVABLE_PRINCIPAL-d-AMT_TOTAL_RECEIVABLE'
]
df.sort_values(['SK_ID_PREV', 'MONTHS_BALANCE'], inplace=True)
df.reset_index(drop=True, inplace=True)
def multi_cre(c):
ret_diff = []
ret_pctchng = []
key_bk = x_bk = None
for key, x in df[['SK_ID_PREV', c]].values:
if key_bk is None:
ret_diff.append(None)
ret_pctchng.append(None)
else:
if key_bk == key:
ret_diff.append(x - x_bk)
ret_pctchng.append( (x_bk-x) / x_bk)
else:
ret_diff.append(None)
ret_pctchng.append(None)
key_bk = key
x_bk = x
ret_diff = pd.Series(ret_diff, name=f'{c}_diff')
ret_pctchng = pd.Series(ret_pctchng, name=f'{c}_pctchange')
ret = pd.concat([ret_diff, ret_pctchng], axis=1)
return ret
pool = Pool(len(col))
callback1 = pd.concat(pool.map(multi_cre, col), axis=1)
print('===== CRE ====')
col = callback1.columns.tolist()
print(col)
pool.close()
# callback1['SK_ID_PREV'] = df['SK_ID_PREV']
df = pd.concat([df, callback1], axis=1)
del callback1; gc.collect()
pool = Pool(10)
callback2 = pd.concat(pool.map(multi_cre, col), axis=1)
print('===== CRE ====')
col = callback2.columns.tolist()
print(col)
pool.close()
df = pd.concat([df, callback2], axis=1)
del callback2; gc.collect()
df.replace(np.inf, np.nan, inplace=True) # TODO: any other plan?
df.replace(-np.inf, np.nan, inplace=True)
utils.to_pickles(df, '../data/credit_card_balance', utils.SPLIT_SIZE)
elif p==5:
# =============================================================================
# bureau
# =============================================================================
df = pd.read_csv('../input/bureau.csv.zip')
df = pd.merge(df, get_trte(), on='SK_ID_CURR', how='left')
col_bure_money = ['AMT_CREDIT_SUM', 'AMT_CREDIT_SUM_DEBT',
'AMT_CREDIT_SUM_LIMIT', 'AMT_CREDIT_SUM_OVERDUE']
col_bure_day = ['DAYS_CREDIT', 'DAYS_CREDIT_ENDDATE', 'DAYS_ENDDATE_FACT']
# app
for c1 in col_bure_money:
for c2 in col_app_money:
# print(f"'{c1}-d-{c2}',")
df[f'{c1}-d-{c2}'] = df[c1] / df[c2]
for c1 in col_bure_day:
for c2 in col_app_day:
# print(f"'{c1}-m-{c2}',")
df[f'{c1}-m-{c2}'] = df[c1] - df[c2]
df[f'{c1}-d-{c2}'] = df[c1] / df[c2]
df['DAYS_CREDIT_ENDDATE-m-DAYS_CREDIT'] = df['DAYS_CREDIT_ENDDATE'] - df['DAYS_CREDIT']
df['DAYS_ENDDATE_FACT-m-DAYS_CREDIT'] = df['DAYS_ENDDATE_FACT'] - df['DAYS_CREDIT']
df['DAYS_ENDDATE_FACT-m-DAYS_CREDIT_ENDDATE'] = df['DAYS_ENDDATE_FACT'] - df['DAYS_CREDIT_ENDDATE']
df['DAYS_CREDIT_UPDATE-m-DAYS_CREDIT'] = df['DAYS_CREDIT_UPDATE'] - df['DAYS_CREDIT']
df['DAYS_CREDIT_UPDATE-m-DAYS_CREDIT_ENDDATE'] = df['DAYS_CREDIT_UPDATE'] - df['DAYS_CREDIT_ENDDATE']
df['DAYS_CREDIT_UPDATE-m-DAYS_ENDDATE_FACT'] = df['DAYS_CREDIT_UPDATE'] - df['DAYS_ENDDATE_FACT']
df['AMT_CREDIT_SUM-m-AMT_CREDIT_SUM_DEBT'] = df['AMT_CREDIT_SUM'] - df['AMT_CREDIT_SUM_DEBT']
df['AMT_CREDIT_SUM_DEBT-d-AMT_CREDIT_SUM'] = df['AMT_CREDIT_SUM_DEBT'] / df['AMT_CREDIT_SUM']
df['AMT_CREDIT_SUM-m-AMT_CREDIT_SUM_DEBT-d-AMT_CREDIT_SUM_LIMIT'] = df['AMT_CREDIT_SUM-m-AMT_CREDIT_SUM_DEBT'] / df['AMT_CREDIT_SUM_LIMIT']
df['AMT_CREDIT_SUM_DEBT-d-AMT_CREDIT_SUM_LIMIT'] = df['AMT_CREDIT_SUM_DEBT'] / df['AMT_CREDIT_SUM_LIMIT']
df['AMT_CREDIT_SUM_DEBT-p-AMT_CREDIT_SUM_LIMIT'] = df['AMT_CREDIT_SUM_DEBT'] + df['AMT_CREDIT_SUM_LIMIT']
df['AMT_CREDIT_SUM-d-debt-p-AMT_CREDIT_SUM_DEBT-p-AMT_CREDIT_SUM_LIMIT'] = df['AMT_CREDIT_SUM'] / df['AMT_CREDIT_SUM_DEBT-p-AMT_CREDIT_SUM_LIMIT']
col = ['AMT_CREDIT_MAX_OVERDUE', 'CNT_CREDIT_PROLONG',
'AMT_CREDIT_SUM', 'AMT_CREDIT_SUM_DEBT', 'AMT_CREDIT_SUM_LIMIT',
'AMT_CREDIT_SUM_OVERDUE', 'DAYS_CREDIT_UPDATE',
'AMT_ANNUITY', 'AMT_CREDIT_SUM-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT_SUM-d-app_AMT_CREDIT', 'AMT_CREDIT_SUM-d-app_AMT_ANNUITY',
'AMT_CREDIT_SUM-d-app_AMT_GOODS_PRICE',
'AMT_CREDIT_SUM_DEBT-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT_SUM_DEBT-d-app_AMT_CREDIT',
'AMT_CREDIT_SUM_DEBT-d-app_AMT_ANNUITY',
'AMT_CREDIT_SUM_DEBT-d-app_AMT_GOODS_PRICE',
'AMT_CREDIT_SUM_LIMIT-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT_SUM_LIMIT-d-app_AMT_CREDIT',
'AMT_CREDIT_SUM_LIMIT-d-app_AMT_ANNUITY',
'AMT_CREDIT_SUM_LIMIT-d-app_AMT_GOODS_PRICE',
'AMT_CREDIT_SUM_OVERDUE-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT_SUM_OVERDUE-d-app_AMT_CREDIT',
'AMT_CREDIT_SUM_OVERDUE-d-app_AMT_ANNUITY',
'AMT_CREDIT_SUM_OVERDUE-d-app_AMT_GOODS_PRICE',
'AMT_CREDIT_SUM-m-AMT_CREDIT_SUM_DEBT',
'AMT_CREDIT_SUM_DEBT-d-AMT_CREDIT_SUM',
'AMT_CREDIT_SUM-m-AMT_CREDIT_SUM_DEBT-d-AMT_CREDIT_SUM_LIMIT',
'AMT_CREDIT_SUM_DEBT-d-AMT_CREDIT_SUM_LIMIT',
'AMT_CREDIT_SUM_DEBT-p-AMT_CREDIT_SUM_LIMIT',
'AMT_CREDIT_SUM-d-debt-p-AMT_CREDIT_SUM_DEBT-p-AMT_CREDIT_SUM_LIMIT'
]
df.sort_values(['SK_ID_CURR', 'DAYS_CREDIT'], inplace=True)
df.reset_index(drop=True, inplace=True)
def multi_b(c):
ret_diff = []
ret_pctchng = []
key_bk = x_bk = None
for key, x in df[['SK_ID_CURR', c]].values:
# for key, x in tqdm(df[['SK_ID_CURR', c]].values, mininterval=30):
if key_bk is None:
ret_diff.append(None)
ret_pctchng.append(None)
else:
if key_bk == key:
ret_diff.append(x - x_bk)
ret_pctchng.append( (x_bk-x) / x_bk)
else:
ret_diff.append(None)
ret_pctchng.append(None)
key_bk = key
x_bk = x
ret_diff = pd.Series(ret_diff, name=f'{c}_diff')
ret_pctchng = pd.Series(ret_pctchng, name=f'{c}_pctchange')
ret =
|
pd.concat([ret_diff, ret_pctchng], axis=1)
|
pandas.concat
|
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import pandas as pd
from analytics.fp_acceptance_percentage import FpAcceptance
from classifiers.svm_classifier import SvmClassifier
from dataset.biometric_dataset import BioDataSet
from dataset.min_max_scaling_operation import MinMaxScaling
from matplotlib.colors import ListedColormap
from mpl_toolkits.mplot3d import Axes3D
from metrics.confusion_matrix import ConfusionMatrix
from metrics.fcs import FCS
from metrics.roc_curve import RocCurve
from synth_data_gen.gauss_blob_generator import GaussBlob
from matplotlib.lines import Line2D
from analytics.dataoverlap_interval import OverLapInt
import os
from pathlib import Path
from sklearn.utils import shuffle
from dataset.outlier_removal import OutLierRemoval
import seaborn as sns
root_path = Path(__file__).parent.parent.parent.parent
hmog_in = os.path.join(root_path, 'raw_data\\hmog_dataset\\public_dataset')
hmog_out = os.path.join(root_path, 'processed_data\\hmog_touch\\df_example.csv')
scaled_data_path = os.path.join(root_path,
'experiment_results\\overlap_test\\df_ovr.csv')
data_metric_save_path = os.path.join(root_path,
'experiment_results\\overlap_test\\')
rand_state = 42
features = 2
samples = 575
users = 10
train_data_size = 0.6
cv = 5
sns.set_theme(context="poster")
sns.set_style("whitegrid")
# Creating dictionaries for gathering figures
roc_fcs_fig_dict = dict()
roc_fcs_fig_dict_at = dict()
fcs_fig_dict_fc = dict()
fcs_fig_dict_at = dict()
db_fig_dict = dict()
# Creating dictionaries for gathering predictions
full_feat_test_set_pred_dict = dict()
full_feat_overlap_set_pred_dict = dict()
full_feat_test_set_pred_dict_prob = dict()
full_feat_overlap_set_pred_dict_prob = dict()
cm_val_full_feat_test_set_svm_dict = dict()
cm_val_full_feat_overlap_set_svm_dict = dict()
test_roc_dict = dict()
overlap_roc_dict = dict()
# Creating dataframes for results gathering
fp_accept_pers_svm_df = \
|
pd.DataFrame(columns=["user", "cv_iter", "c_val", "rand_state", "fp_accept_pers", "fp_accept_pers_at"])
|
pandas.DataFrame
|
"""
Modified from pvlib python pvlib/tests/test_bsrn.py.
See LICENSES/PVLIB-PYTHON_LICENSE
"""
import inspect
import gzip
import os
from pathlib import Path
import re
import pandas as pd
import pytest
from solarforecastarbiter.io.fetch import bsrn
from pandas.testing import assert_index_equal, assert_frame_equal
DATA_DIR = Path(os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))) / 'data'
@pytest.mark.parametrize('testfile,open_func,mode,expected_index', [
('bsrn-pay0616.dat.gz', gzip.open, 'rt',
pd.date_range(start='20160601', periods=43200, freq='1min', tz='UTC')),
('bsrn-lr0100-pay0616.dat', open, 'r',
pd.date_range(start='20160601', periods=43200, freq='1min', tz='UTC')),
])
def test_read_bsrn(testfile, open_func, mode, expected_index):
with open_func(DATA_DIR / testfile, mode) as buf:
data = bsrn.parse_bsrn(buf)
assert_index_equal(expected_index, data.index)
assert 'ghi' in data.columns
assert 'dni_std' in data.columns
assert 'dhi_min' in data.columns
assert 'lwd_max' in data.columns
assert 'relative_humidity' in data.columns
@pytest.mark.parametrize('year', [2020, '2020'])
@pytest.mark.parametrize('month', [1, '01', '1'])
def test_read_bsrn_month_from_nasa_larc(year, month, requests_mock):
# all 2020-01 int/str variants should produce this url
expected_url = 'https://cove.larc.nasa.gov/BSRN/LRC49/2020/lrc0120.dat'
with open(DATA_DIR / 'bsrn-lr0100-pay0616.dat') as f:
content = f.read()
matcher = re.compile('https://cove.larc.nasa.gov/BSRN/LRC49/.*')
r = requests_mock.register_uri('GET', matcher, content=content.encode())
out = bsrn.read_bsrn_month_from_nasa_larc(year, month)
assert isinstance(out, pd.DataFrame)
assert r.last_request.url == expected_url
@pytest.mark.parametrize('start,end,ncalls', [
('20200101', '20210101', 13),
('20200101', '20200103', 1)
])
def test_read_bsrn_from_nasa_larc(start, end, ncalls, mocker):
start, end = pd.Timestamp(start, tz='UTC'), pd.Timestamp(end, tz='UTC')
m = mocker.patch(
'solarforecastarbiter.io.fetch.bsrn.read_bsrn_month_from_nasa_larc')
m.return_value = pd.DataFrame()
out = bsrn.read_bsrn_from_nasa_larc(start, end)
assert m.call_count == ncalls
assert isinstance(out, pd.DataFrame)
def test_read_bsrn_from_nasa_larc_now_limiter(mocker):
mocked_now = mocker.patch('pandas.Timestamp.now')
mocked_now.return_value =
|
pd.Timestamp('2021-02-16 15:15:15', tz='UTC')
|
pandas.Timestamp
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
Period("2011-01", freq="M"),
Period("2013", freq="A"),
Period("2011-05", freq="M"),
]
)
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
class TestPeriodIndexSeriesComparisonConsistency:
""" Test PeriodIndex and Period Series Ops consistency """
# TODO: needs parametrization+de-duplication
def _check(self, values, func, expected):
# Test PeriodIndex and Period Series Ops consistency
idx = pd.PeriodIndex(values)
result = func(idx)
# check that we don't pass an unwanted type to tm.assert_equal
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.Period("2011-03", freq="M")
exp = np.array([False, False, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x == pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: x != pd.NaT
exp = np.array([True, True, True, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x < pd.Period("2011-03", freq="M")
exp = np.array([True, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
# ------------------------------------------------------------------
# Arithmetic
class TestPeriodFrameArithmetic:
def test_ops_frame_period(self):
# GH#13043
df = pd.DataFrame(
{
"A": [pd.Period("2015-01", freq="M"), pd.Period("2015-02", freq="M")],
"B": [pd.Period("2014-01", freq="M"), pd.Period("2014-02", freq="M")],
}
)
assert df["A"].dtype == "Period[M]"
assert df["B"].dtype == "Period[M]"
p = pd.Period("2015-03", freq="M")
off = p.freq
# dtype will be object because of original dtype
exp = pd.DataFrame(
{
"A": np.array([2 * off, 1 * off], dtype=object),
"B": np.array([14 * off, 13 * off], dtype=object),
}
)
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -1 * exp)
df2 = pd.DataFrame(
{
"A": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
"B": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
}
)
assert df2["A"].dtype == "Period[M]"
assert df2["B"].dtype == "Period[M]"
exp = pd.DataFrame(
{
"A": np.array([4 * off, 4 * off], dtype=object),
"B": np.array([16 * off, 16 * off], dtype=object),
}
)
tm.assert_frame_equal(df2 - df, exp)
tm.assert_frame_equal(df - df2, -1 * exp)
class TestPeriodIndexArithmetic:
# ---------------------------------------------------------------
# __add__/__sub__ with PeriodIndex
# PeriodIndex + other is defined for integers and timedelta-like others
# PeriodIndex - other is defined for integers, timedelta-like others,
# and PeriodIndex (with matching freq)
def test_parr_add_iadd_parr_raises(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
# An earlier implementation of PeriodIndex addition performed
# a set operation (union). This has since been changed to
# raise a TypeError. See GH#14164 and GH#13077 for historical
# reference.
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
def test_pi_sub_isub_pi(self):
# GH#20049
# For historical reference see GH#14164, GH#13077.
# PeriodIndex subtraction originally performed set difference,
# then changed to raise TypeError before being implemented in GH#20049
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
off = rng.freq
expected = pd.Index([-5 * off] * 5)
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_sub_pi_with_nat(self):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = rng[1:].insert(0, pd.NaT)
assert other[1:].equals(rng[1:])
result = rng - other
off = rng.freq
expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off])
tm.assert_index_equal(result, expected)
def test_parr_sub_pi_mismatched_freq(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="H", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(IncompatibleFrequency):
rng - other
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1_d = "19910905"
p2_d = "19920406"
p1 = pd.PeriodIndex([p1_d], freq=tick_classes(n))
p2 = pd.PeriodIndex([p2_d], freq=tick_classes(n))
expected = pd.PeriodIndex([p2_d], freq=p2.freq.base) - pd.PeriodIndex(
[p1_d], freq=p1.freq.base
)
tm.assert_index_equal((p2 - p1), expected)
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(pd.offsets.YearEnd, "month"),
(pd.offsets.QuarterEnd, "startingMonth"),
(pd.offsets.MonthEnd, None),
(pd.offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
freq = offset(n, normalize=False, **kwds)
p1 = pd.PeriodIndex([p1_d], freq=freq)
p2 = pd.PeriodIndex([p2_d], freq=freq)
result = p2 - p1
expected = pd.PeriodIndex([p2_d], freq=freq.base) - pd.PeriodIndex(
[p1_d], freq=freq.base
)
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])
def test_parr_add_sub_float_raises(self, op, other, box_with_array):
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")
pi = dti.to_period("D")
pi = tm.box_expected(pi, box_with_array)
with pytest.raises(TypeError):
op(pi, other)
@pytest.mark.parametrize(
"other",
[
# datetime scalars
pd.Timestamp.now(),
pd.Timestamp.now().to_pydatetime(),
pd.Timestamp.now().to_datetime64(),
# datetime-like arrays
pd.date_range("2016-01-01", periods=3, freq="H"),
pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"),
pd.date_range("2016-01-01", periods=3, freq="S")._data,
pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data,
# Miscellaneous invalid types
],
)
def test_parr_add_sub_invalid(self, other, box_with_array):
# GH#23215
rng = pd.period_range("1/1/2000", freq="D", periods=3)
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
other + rng
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
other - rng
# -----------------------------------------------------------------
# __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]
def test_pi_add_sub_td64_array_non_tick_raises(self):
rng = pd.period_range("1/1/2000", freq="Q", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
with pytest.raises(IncompatibleFrequency):
rng + tdarr
with pytest.raises(IncompatibleFrequency):
tdarr + rng
with pytest.raises(IncompatibleFrequency):
rng - tdarr
with pytest.raises(TypeError):
tdarr - rng
def test_pi_add_sub_td64_array_tick(self):
# PeriodIndex + Timedelta-like is allowed only with
# tick-like frequencies
rng = pd.period_range("1/1/2000", freq="90D", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = pd.period_range("12/31/1999", freq="90D", periods=3)
result = rng + tdi
tm.assert_index_equal(result, expected)
result = rng + tdarr
tm.assert_index_equal(result, expected)
result = tdi + rng
tm.assert_index_equal(result, expected)
result = tdarr + rng
tm.assert_index_equal(result, expected)
expected = pd.period_range("1/2/2000", freq="90D", periods=3)
result = rng - tdi
tm.assert_index_equal(result, expected)
result = rng - tdarr
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
tdarr - rng
with pytest.raises(TypeError):
tdi - rng
# -----------------------------------------------------------------
# operations with array/Index of DateOffset objects
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_add_offset_array(self, box):
# GH#18849
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
offs = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = pd.PeriodIndex([pd.Period("2015Q2"), pd.Period("2015Q4")])
with tm.assert_produces_warning(PerformanceWarning):
res = pi + offs
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = offs + pi
tm.assert_index_equal(res2, expected)
unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
# addition/subtraction ops with incompatible offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi + unanchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
unanchored + pi
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_sub_offset_array(self, box):
# GH#18824
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
other = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = PeriodIndex([pi[n] - other[n] for n in range(len(pi))])
with tm.assert_produces_warning(PerformanceWarning):
res = pi - other
tm.assert_index_equal(res, expected)
anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi - anchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
anchored - pi
def test_pi_add_iadd_int(self, one):
# Variants of `one` for #19012
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng + one
expected = pd.period_range("2000-01-01 10:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_int(self, one):
"""
PeriodIndex.__sub__ and __isub__ with several representations of
the integer 1, e.g. int, np.int64, np.uint8, ...
"""
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng - one
expected = pd.period_range("2000-01-01 08:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng -= one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize("five", [5, np.array(5, dtype=np.int64)])
def test_pi_sub_intlike(self, five):
rng = period_range("2007-01", periods=50)
result = rng - five
exp = rng + (-five)
tm.assert_index_equal(result, exp)
def test_pi_sub_isub_offset(self):
# offset
# DateOffset
rng = pd.period_range("2014", "2024", freq="A")
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range("2009", "2019", freq="A")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
rng = pd.period_range("2014-01", "2016-12", freq="M")
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range("2013-08", "2016-07", freq="M")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_offset_n_gt1(self, box_transpose_fail):
# GH#23215
# add offset to PeriodIndex with freq.n > 1
box, transpose = box_transpose_fail
per = pd.Period("2016-01", freq="2M")
pi = pd.PeriodIndex([per])
expected = pd.PeriodIndex(["2016-03"], freq="2M")
pi = tm.box_expected(pi, box, transpose=transpose)
expected = tm.box_expected(expected, box, transpose=transpose)
result = pi + per.freq
tm.assert_equal(result, expected)
result = per.freq + pi
tm.assert_equal(result, expected)
def test_pi_add_offset_n_gt1_not_divisible(self, box_with_array):
# GH#23215
# PeriodIndex with freq.n > 1 add offset with offset.n % freq.n != 0
pi = pd.PeriodIndex(["2016-01"], freq="2M")
expected = pd.PeriodIndex(["2016-04"], freq="2M")
# FIXME: with transposing these tests fail
pi = tm.box_expected(pi, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = pi + to_offset("3M")
tm.assert_equal(result, expected)
result = to_offset("3M") + pi
tm.assert_equal(result, expected)
# ---------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_pi_add_intarray(self, int_holder, op):
# GH#19959
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
other = int_holder([4, -1])
result = op(pi, other)
expected = pd.PeriodIndex([pd.Period("2016Q1"), pd.Period("NaT")])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_pi_sub_intarray(self, int_holder):
# GH#19959
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
other = int_holder([4, -1])
result = pi - other
expected = pd.PeriodIndex([pd.Period("2014Q1"), pd.Period("NaT")])
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - pi
# ---------------------------------------------------------------
# Timedelta-like (timedelta, timedelta64, Timedelta, Tick)
# TODO: Some of these are misnomers because of non-Tick DateOffsets
def test_pi_add_timedeltalike_minute_gt1(self, three_days):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# minute frequency with n != 1. A more general case is tested below
# in test_pi_add_timedeltalike_tick_gt1, but here we write out the
# expected result more explicitly.
other = three_days
rng = pd.period_range("2014-05-01", periods=3, freq="2D")
expected = pd.PeriodIndex(["2014-05-04", "2014-05-06", "2014-05-08"], freq="2D")
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = pd.PeriodIndex(["2014-04-28", "2014-04-30", "2014-05-02"], freq="2D")
result = rng - other
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - rng
@pytest.mark.parametrize("freqstr", ["5ns", "5us", "5ms", "5s", "5T", "5h", "5d"])
def test_pi_add_timedeltalike_tick_gt1(self, three_days, freqstr):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# tick-like frequency with n != 1
other = three_days
rng = pd.period_range("2014-05-01", periods=6, freq=freqstr)
expected = pd.period_range(rng[0] + other, periods=6, freq=freqstr)
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = pd.period_range(rng[0] - other, periods=6, freq=freqstr)
result = rng - other
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - rng
def test_pi_add_iadd_timedeltalike_daily(self, three_days):
# Tick
other = three_days
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
expected = pd.period_range("2014-05-04", "2014-05-18", freq="D")
result = rng + other
tm.assert_index_equal(result, expected)
rng += other
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_timedeltalike_daily(self, three_days):
# Tick-like 3 Days
other = three_days
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
expected = pd.period_range("2014-04-28", "2014-05-12", freq="D")
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_daily(self, not_daily):
other = not_daily
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=D\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_pi_add_iadd_timedeltalike_hourly(self, two_hours):
other = two_hours
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
expected = pd.period_range("2014-01-01 12:00", "2014-01-05 12:00", freq="H")
result = rng + other
tm.assert_index_equal(result, expected)
rng += other
tm.assert_index_equal(rng, expected)
def test_pi_add_timedeltalike_mismatched_freq_hourly(self, not_hourly):
other = not_hourly
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=H\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
def test_pi_sub_isub_timedeltalike_hourly(self, two_hours):
other = two_hours
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
expected = pd.period_range("2014-01-01 08:00", "2014-01-05 08:00", freq="H")
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_add_iadd_timedeltalike_annual(self):
# offset
# DateOffset
rng = pd.period_range("2014", "2024", freq="A")
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range("2019", "2029", freq="A")
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_annual(self, mismatched_freq):
other = mismatched_freq
rng = pd.period_range("2014", "2024", freq="A")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_pi_add_iadd_timedeltalike_M(self):
rng = pd.period_range("2014-01", "2016-12", freq="M")
expected = pd.period_range("2014-06", "2017-05", freq="M")
result = rng + pd.offsets.MonthEnd(5)
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_monthly(self, mismatched_freq):
other = mismatched_freq
rng = pd.period_range("2014-01", "2016-12", freq="M")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=M\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_parr_add_sub_td64_nat(self, box_transpose_fail):
# GH#23320 special handling for timedelta64("NaT")
box, transpose = box_transpose_fail
pi = pd.period_range("1994-04-01", periods=9, freq="19D")
other = np.timedelta64("NaT")
expected = pd.PeriodIndex(["NaT"] * 9, freq="19D")
obj = tm.box_expected(pi, box, transpose=transpose)
expected = tm.box_expected(expected, box, transpose=transpose)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
other - obj
@pytest.mark.parametrize(
"other",
[
np.array(["NaT"] * 9, dtype="m8[ns]"),
TimedeltaArray._from_sequence(["NaT"] * 9),
],
)
def test_parr_add_sub_tdt64_nat_array(self, box_df_fail, other):
# FIXME: DataFrame fails because when when operating column-wise
# timedelta64 entries become NaT and are treated like datetimes
box = box_df_fail
pi = pd.period_range("1994-04-01", periods=9, freq="19D")
expected = pd.PeriodIndex(["NaT"] * 9, freq="19D")
obj = tm.box_expected(pi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
other - obj
# ---------------------------------------------------------------
# Unsorted
def test_parr_add_sub_index(self):
# Check that PeriodArray defers to Index on arithmetic ops
pi =
|
pd.period_range("2000-12-31", periods=3)
|
pandas.period_range
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
self.assertRaises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
self.assertRaises(AttributeError, lambda: Series(np.arange(5.)).cat)
self.assertRaises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = np.array([1, 2, 3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
s = s.cat.set_categories(["c", "b", "a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = np.array(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), com.CategoricalDtype()], index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(),
com.CategoricalDtype()],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns), 1)
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp, a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx =
|
pd.period_range('2011-01', freq='M', periods=5)
|
pandas.period_range
|
import argparse
import matplotlib.pyplot as plt
import os
import pandas as pd
import pathlib
import psutil
import scipy.interpolate
import subprocess
import threading
import time
from .utils import run_command, _init_modes, _init_precs
from .timing import _aggregate_along_rows, _LINESTYLES, _COLORS
DCGM_FIELD_IDS = {
'DCGM_FI_PROF_GR_ENGINE_ACTIVE': 1001,
'DCGM_FI_PROF_DRAM_ACTIVE': 1005,
'DCGM_FI_DEV_GPU_UTIL': 203,
'DCGM_FI_PROF_PIPE_TENSOR_ACTIVE': 1004,
'DCGM_FI_PROF_PIPE_FP16_ACTIVE': 1008,
'DCGM_FI_PROF_PIPE_FP32_ACTIVE': 1007,
'DCGM_FI_PROF_PIPE_FP64_ACTIVE': 1006,
'DCGM_FI_PROF_SM_OCCUPANCY': 1003,
'DCGM_FI_PROF_SM_ACTIVE': 1002,
'DCGM_FI_DEV_FB_TOTAL': 250,
'DCGM_FI_DEV_FB_FREE': 251,
'DCGM_FI_DEV_FB_USED': 252,
'DCGM_FI_PROF_PCIE_TX_BYTES': 1009,
'DCGM_FI_PROF_PCIE_RX_BYTES': 1010,
'DCGM_FI_DEV_MEM_COPY_UTIL': 204,
}
def _get_dcgm_fields_enabled_for(device_model):
if device_model in {'v100', 'a100'}:
return [
'DCGM_FI_DEV_GPU_UTIL',
'DCGM_FI_PROF_PIPE_TENSOR_ACTIVE',
'DCGM_FI_PROF_PIPE_FP16_ACTIVE',
'DCGM_FI_PROF_PIPE_FP32_ACTIVE',
'DCGM_FI_PROF_PIPE_FP64_ACTIVE',
'DCGM_FI_PROF_SM_OCCUPANCY',
'DCGM_FI_PROF_SM_ACTIVE',
'DCGM_FI_DEV_FB_USED',
'DCGM_FI_PROF_PCIE_RX_BYTES',
'DCGM_FI_PROF_PCIE_TX_BYTES',
'DCGM_FI_DEV_MEM_COPY_UTIL',
'DCGM_FI_PROF_GR_ENGINE_ACTIVE',
'DCGM_FI_PROF_DRAM_ACTIVE',
]
else:
return [
"DCGM_FI_DEV_GPU_UTIL",
"DCGM_FI_DEV_FB_USED",
"DCGM_FI_DEV_MEM_COPY_UTIL",
]
class DcgmMonitor:
def __init__(self, device_model):
self.fields = _get_dcgm_fields_enabled_for(device_model)
self.field_ids = [DCGM_FIELD_IDS[f] for f in self.fields]
self.field_ids_str = ','.join(map(str, self.field_ids))
self.reset()
def reset(self):
self.metrics = {f: [] for f in self.fields}
self.metrics.update({
'timestamp': [],
'cpu_percent': [],
'host_mem_total': [],
'host_mem_available': [],
})
self.to_shutdown = False
def sample_metrics(self, interval=1.0):
cpu = psutil.cpu_percent(interval=interval, percpu=False)
self.metrics['cpu_percent'].append(cpu)
mem = psutil.virtual_memory()
self.metrics['host_mem_total'].append(mem.total)
self.metrics['host_mem_available'].append(mem.available)
self.metrics['timestamp'].append(time.time())
dcgmi_out = run_command('dcgmi dmon -e {} -c 5'.format(self.field_ids_str))
dcgmi_samples = {f: [] for f in self.fields}
for line in dcgmi_out.split('\n')[-4:-1]:
# THIS ASSUMES THAT THE OUTPUT OF DCGM MONITOR HAS THE FORMAT GPU X METRIC1 METRIC2 ...
for idx, val in enumerate(line.split()[2:]):
if val == 'N/A':
continue
dcgmi_samples[self.fields[idx]].append(float(val))
for f, vals in dcgmi_samples.items():
if len(vals) > 0:
self.metrics[f].append(sum(vals) / len(vals))
else:
self.metrics[f].append(float('nan'))
def save(self, output_dir, filename='dcgm_metrics.csv'):
csv_path = os.path.join(output_dir, filename)
|
pd.DataFrame(self.metrics)
|
pandas.DataFrame
|
import datetime
import pandas as pd
import numpy as np
import os
from sklearn.linear_model import LinearRegression
import time
import pika
class analitica():
ventana = 15
pronostico = 1
file_name = "data_base.csv "
servidor = "rabbit"
desc = {} # diccionario con los datos de analitica descriptiva
pred = {} # diccionario con los datos de analitica predictiva
def __init__(self):
self.load_data()
def load_data(self):
if self.file_name not in os.listdir(os.getcwd()):
self.df =
|
pd.DataFrame(columns=["fecha", "sensor", "valor"])
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import talib
class Indicators(object):
"""
Input: Price DataFrame, Moving average/lookback period and standard deviation multiplier
This function returns a dataframe with 5 columns
Output: Prices, Moving Average, Upper BB, Lower BB and BB Val
"""
def bb(self, l_sym, df_price, time_period, st_dev_u, st_dev_l):
df_bb_u = pd.DataFrame(columns=l_sym, index=df_price.index)
df_bb_m = pd.DataFrame(columns=l_sym, index=df_price.index)
df_bb_l = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_bb_u[sym], df_bb_m[sym], df_bb_l[sym] = talib.BBANDS(np.asarray(df_price[sym]), timeperiod=time_period, nbdevup=st_dev_u, nbdevdn=st_dev_l)
except:
pass
return df_bb_u, df_bb_m, df_bb_l
def ema(self, l_sym, df_price, time_period):
df_ema = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_ema[sym] = talib.EMA(np.asarray(df_price[sym]), timeperiod=time_period)
except:
pass
return df_ema
def ma(self, l_sym, df_price, time_period):
df_ma = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_ma[sym] = talib.MA(np.asarray(df_price[sym]), timeperiod=time_period)
except:
pass
return df_ma
def sma(self, l_sym, df_price, time_period):
df_sma = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_sma[sym] = talib.SMA(np.asarray(df_price[sym]), timeperiod=time_period)
except:
pass
return df_sma
def adx(self, l_sym, df_high, df_low, df_close, time_period):
df_adx = pd.DataFrame(columns=l_sym, index=df_high.index)
for sym in l_sym:
try:
df_adx[sym] = talib.ADX(high=np.asarray(df_high[sym]), low=np.asarray(df_low[sym]), close=np.asarray(df_close[sym]), timeperiod = time_period)
except:
pass
return df_adx
def mom(self, l_sym, df_price, time_period):
df_mom = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_mom[sym] = talib.MOM(np.asarray(df_price[sym]), timeperiod = time_period)
except:
pass
return df_mom
def atr(self, l_sym, df_high, df_low, df_close, time_period):
df_atr = pd.DataFrame(columns=l_sym, index=df_high.index)
for sym in l_sym:
try:
df_atr[sym] = talib.ATR(high=np.asarray(df_high[sym]), low=np.asarray(df_low[sym]), close=np.asarray(df_close[sym]), timeperiod=time_period)
except:
pass
return df_atr
def macd(self, l_sym, df_price, fast_period, slow_period, signal_period):
df_macd = pd.DataFrame(columns=l_sym, index=df_price.index)
df_macdsignal = pd.DataFrame(columns=l_sym, index=df_price.index)
df_macdhist = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_macd[sym], df_macdsignal[sym], df_macdhist[sym] = talib.MACD(np.asarray(df_price[sym]), fastperiod=fast_period, slowperiod=slow_period, signalperiod=signal_period)
except:
pass
return df_macd, df_macdsignal, df_macdhist
def wavec(self, l_sym, df_three, df_four, df_five):
df_ca =
|
pd.DataFrame(columns=l_sym, index=df_three.index)
|
pandas.DataFrame
|
# Functionality to read and store in HDF5 files
import h5py
import pandas as pd
import random
import string
import os
import datetime
import json
from data_science.data_transfer.data_api import Dataset
class HDF5Dataset:
def __init__(self, file_name, file_path, dataset_id,
random_string_in_name=10):
"""
Initialization.
:param self:
:param file_name: name for the file. No ending necessary
:param file_path: location path
:param dataset_id: dataset's id
:param random_string_in_name: lenght of the random string to add to
the name
"""
self.file_name = file_name
self.file_path = file_path
self.file_w_path = os.path.join(self.file_path, self.file_name)
self._dataset_id = dataset_id
self.random_string_in_name = random_string_in_name
@property
def dataset_id(self):
# do something
return self._dataset_id
@dataset_id.setter
def dataset_id(self, value):
self._dataset_id = value
def create_h5_file(self):
"""
Create the h5 file and add the groups.
:param self: self
"""
self.file_name = self.file_name + '-' + \
_generate_random_string(l=self.random_string_in_name) + '.h5'
self.file_w_path = os.path.join(self.file_path, self.file_name)
try:
f = h5py.File(self.file_w_path, 'a')
f.create_group('meta')
f.create_group('meta/columns')
f.create_group('data')
f.close()
return self.file_name
except ValueError as e:
print(e)
return
def add_dataset_meta_to_h5(self, dataset):
"""
Add the meta data of the dataset to the file. Consist of:
A dataframe with the dataset attributes.
A dataframe with the columns of the dataset.
:param self: self
:param dataset: a Dataset instance
"""
if not isinstance(dataset, Dataset):
raise TypeError('A dataset has to be provided.')
# create a df with the metadata
columns = list(dataset.dump_attributes_to_dictionary().keys())
df =
|
pd.DataFrame(columns=columns)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 25 17:27:29 2019
@author: ryw
"""
import pandas as pd
import numpy as np
from scipy import stats
import time
import statsmodels.tsa.stattools as ts
from datetime import datetime
import matplotlib
from matplotlib.font_manager import FontProperties
from matplotlib.font_manager import _rebuild
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
from pylab import mpl
myfont = matplotlib.font_manager.FontProperties(fname='/Users/weihuang/Downloads/simheittf/simhei.ttf')
mpl.rcParams['font.sans-serif'] = ['SimHei'] # 指定默认字体
mpl.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
#inFilepathStk='merge-30.csv'
outFileNumbers='D:/18-19/graduationDesign/data/out/resultNumbers.csv'
#timePeriod='20150504'
#inFilepathStk='D:/18-19/graduationDesign/data/Stk_Tick/Stk_Tick_SZ01/SZ000001_20150504.csv'
#inFilepathIdx='D:/18-19/graduationDesign/data/Stk_Tick/Stk_Tick_SZ399107/sz399107_20150504.csv'
#outFilepathCsv='D:/18-19/graduationDesign/data/out/dfFluctuationDeviation_20150504.csv'
#outFilepathPng='D:/18-19/graduationDesign/图/code/fluctuation/最新价+涨跌幅_20150504.png'
#outliersThreshold=0.02
timePeriod='201505'
outliersThreshold=0.04
# 0.05 0.06
inFilepathStk='D:/18-19/graduationDesign/data/Stk_Tick/Stk_Tick_SZ01/mergeSZ000001_Tick_201505.csv'
inFilepathIdx='D:/18-19/graduationDesign/data/Stk_Tick/Stk_Tick_SZ399107/mergeSZ399107_Tick_201505.csv'
outFilepathCsv='D:/18-19/graduationDesign/data/out/dfFluctuationDeviation_201505_'+str(outliersThreshold)+'.csv'
outFilepathPng='D:/18-19/graduationDesign/图/code/fluctuation/最新价+涨跌幅_201505_'+str(outliersThreshold)+'.png'
#timePeriod='201812'
#outliersThreshold=0.02
#inFilepathStk='D:/18-19/graduationDesign/data/Stk_Tick/Stk_Tick_SZ01/mergeSZ000001_Tick_201812.csv'
#inFilepathIdx='D:/18-19/graduationDesign/data/Stk_Tick/Stk_Tick_SZ399107/mergeSZ399107_Tick_201812.csv'
#outFilepathCsv='D:/18-19/graduationDesign/data/out/dfFluctuationDeviation_201812_'+str(outliersThreshold)+'.csv'
#outFilepathPng='D:/18-19/graduationDesign/图/code/fluctuation/最新价+涨跌幅_201812_'+str(outliersThreshold)+'.png'
#%% 转化函数
dateTimePattern="%Y-%m-%d %H:%M:%S"
minPattern="%Y-%m-%d %H:%M"
zscore = lambda x: (x-x.mean())/x.std()
timestamp=lambda x :int(time.mktime(time.strptime(x,dateTimePattern)))
hour=lambda x :int((int(time.mktime(time.strptime(x,dateTimePattern))))/3600)
minute=lambda x :int((int(time.mktime(time.strptime(x,dateTimePattern))))/60)
threeSecond=lambda x :int(
(int(time.mktime(time.strptime(x,dateTimePattern))))+
(3-int(time.mktime(time.strptime(x,dateTimePattern))))%3)
# int(time.mktime(datetime.strptime(x[:-3],"%Y-%m-%d %H:%M"))
# if ((int(time.mktime(time.strptime(x,dateTimePattern))))%86400-33900)<600
## time.strftime('%H:%M',datetime.strptime(x[11:-3],minPattern))
# else
date23Second=lambda x :time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(x))
date2Min=lambda x :datetime.strptime(x[:-3],"%Y-%m-%d %H:%M")
date2Hour=lambda x :datetime.strptime(x[:-6],"%Y-%m-%d %H")
date2Date=lambda x :datetime.strptime(x[:-9],"%Y-%m-%d").strftime("%Y-%m-%d")
print('start')
#%% 读入股票tick日期-最新价
dfStk=pd.read_csv(inFilepathStk)
#head="市场代码,证券代码,时间,最新价,成交笔数,成交额,成交量,方向,买一价,买二价,买三价,买四价,买五价,卖一价,卖二价,卖三价,卖四价,卖五价,买一量,买二量,买三量,买四量,买五量,卖一量,卖二量,卖三量,卖四量,卖五量"
#columnHeadTuple=head.split(",")
dfStk['日期']=dfStk['时间'].transform(date2Date)
dfStk['交易时间按3s']=dfStk['时间'].transform(threeSecond)
dfStk['3s']=dfStk['交易时间按3s'].transform(date23Second)
dfStkU=dfStk.loc[:,['时间','交易时间按3s','日期','最新价','3s']]
#print(dfStkU.columns)
print(dfStkU[0:5])
#%% 读入股票日线 昨收价
dfSDay=pd.read_csv('D:/18-19/graduationDesign/data/stk_day_20190307/SZ000001_day.csv')
#dayHead='代码,时间,开盘价,最高价,最低价,收盘价,成交量(股),成交额(元)'
#dayHeadTuple=dayHead.split(',')
dfSDay.rename(columns={'时间':'当前交易日'}, inplace = True)
lastDay=list(dfSDay['当前交易日'].drop(0))
lastDay.append(lastDay[-1])
dfSDay['下一交易日']=lastDay
dfSDayU=dfSDay.loc[:,['当前交易日','下一交易日','收盘价']]
#print(dfSDayU.columns)
#print(dfSDayU[0:5])
#%% 股票tick -join昨收
dfStkDay=pd.merge(dfStkU, dfSDayU, how='left', on=None, left_on='日期', right_on='下一交易日',
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False)
#print(dfStkDay[0:5])
print('股票tick -join昨收')
#%% 计算股票涨跌幅
dfSPrice=dfStkDay.loc[:,['交易时间按3s','最新价','收盘价']]
dfSPrice.rename(columns={'收盘价':'昨收价'}, inplace = True)
dfSFluctuation=dfSPrice.groupby(dfStk['交易时间按3s']).last()
dfSUniqueTime=dfStk.drop_duplicates(['3s'])['3s']
dfSFluctuation.index = pd.Index(dfSUniqueTime)
dfSFluctuation.dropna(inplace=True)
dfSFluctuation['股票涨跌幅']=(dfSFluctuation['最新价']-dfSFluctuation['昨收价'])/dfSFluctuation['昨收价']
#print(dfSFluctuation[0:5])
#%% 读入股指tick日期-最新价
dfItk=pd.read_csv(inFilepathIdx)
#head="市场代码,证券代码,时间,最新价,成交笔数,成交额,成交量,方向,买一价,买二价,买三价,买四价,买五价,卖一价,卖二价,卖三价,卖四价,卖五价,买一量,买二量,买三量,买四量,买五量,卖一量,卖二量,卖三量,卖四量,卖五量"
#columnHeadTuple=head.split(",")
dfItk['日期']=dfItk['时间'].transform(date2Date)
dfItk['交易时间按3s']=dfItk['时间'].transform(threeSecond)
dfItk['3s']=dfItk['交易时间按3s'].transform(date23Second)
dfItkU=dfItk.loc[:,['时间','交易时间按3s','日期','最新价','3s']]
#print(dfItkU.columns)
#print(dfItkU[0:5])
#%% 读入股指日线 昨收价
dfIDay=
|
pd.read_csv('D:/18-19/graduationDesign/data/Idx_DAY_20190223/SZ399107_day.csv')
|
pandas.read_csv
|
import re
import pandas as pd
from lxml import etree
def get_data(file_name,genre):
sentences = []
with (open('./data/ar_PAN/{}.xml'.format(file_name),'r',encoding='utf8')) as f:
doc = etree.parse(f)
root = doc.getroot()
for i in range(len(root[0])):
sentences.append([preprocessing(root[0][i].text),genre])
return sentences
def get_tweets(df):
sentences = []
for index, row in df.iterrows():
sentences +=get_data(row.id,row.genre)
return
|
pd.DataFrame(sentences,columns=['text','label'])
|
pandas.DataFrame
|
"""
This module contains function to plot smooth ROC curves using KFold
Examples:
result, aucs = roc_curve_cv(xgb.XGBClassifier(), X, y, n_splits=6)
plot_roc_curve_cv(result)
plt.show()
plot_specificity_cv(result)
plt.show()
plot_specificity_cv(result, invert_x=True, invert_y=True)
plt.show()
print(f"AUC: {np.mean(aucs)} (std:{np.std(aucs)})")
Comparing models:
result_xgb, aucs = roc_curve_cv(xgb.XGBClassifier(), X, y, n_splits=6, n_repeats=4)
result_rf, aucs = roc_curve_cv(RandomForestClassifier(), X, y, n_splits=6, n_repeats=4)
plot_specificity_cv({'XGB': result_xgb, 'RF':result_rf})
plt.show()
Comparing hyperparameters
results = []
for max_depth in (3,10):
for max_features in (0.5, 0.9):
result, _ = roc_curve_cv(
RandomForestClassifier(max_depth=max_depth, max_features=max_features),
x_full, y_full, n_repeats=4,
properties={'max features':max_features, 'max depth':max_depth})
results.append(result)
plot_specificity_cv(results, hue='max features', style='max depth', ci=False)
plt.show()
plot_roc_curve_cv(results, hue='max features', style='max depth', ci=False)
plt.show()
"""
from sklearn.model_selection import StratifiedKFold, RepeatedStratifiedKFold
from numpy import interp
import numpy as np
from sklearn.metrics import roc_curve, auc
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import label_binarize
def roc_curve_simple(model, X, y):
y_pred = model.predict_proba(X)[:,1]
fpr, tpr, thres = roc_curve(y, y_pred)
result_df = pd.DataFrame({'fpr':fpr, 'tpr':tpr, 'threshold':thres}, index=range(len(fpr)))
return result_df, auc(fpr,tpr)
def roc_curve_cv(model, X, y, n_splits=5, n_repeats=1, properties=None):
if n_repeats > 1:
cv = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats)
else:
cv = StratifiedKFold(n_splits=n_splits)
auc_list = []
result_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
# Copyright 2019-2020 The Lux Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .context import lux
import pytest
import pandas as pd
from lux.vis.Vis import Vis
def test_vary_filter_val(global_var):
lux.config.set_executor_type("Pandas")
df = pytest.olympic
vis = Vis(["Height", "SportType=Ball"], df)
df.set_intent_as_vis(vis)
df._ipython_display_()
assert len(df.recommendation["Filter"]) == len(df["SportType"].unique()) - 1
linechart = list(filter(lambda x: x.mark == "line", df.recommendation["Enhance"]))[0]
assert (
linechart.get_attr_by_channel("x")[0].attribute == "Year"
), "Ensure recommended vis does not inherit input vis channel"
def test_filter_inequality(global_var):
df = pytest.car_df
df["Year"] = pd.to_datetime(df["Year"], format="%Y")
df.set_intent(
[
lux.Clause(attribute="Horsepower"),
lux.Clause(attribute="MilesPerGal"),
lux.Clause(attribute="Acceleration", filter_op=">", value=10),
]
)
df._ipython_display_()
from lux.utils.utils import get_filter_specs
complement_vis = df.recommendation["Filter"][0]
fltr_clause = get_filter_specs(complement_vis._intent)[0]
assert fltr_clause.filter_op == "<="
assert fltr_clause.value == 10
def test_generalize_action(global_var):
# test that generalize action creates all unique visualizations
df = pytest.car_df
df["Year"] = pd.to_datetime(
df["Year"], format="%Y"
) # change pandas dtype for the column "Year" to datetype
df.set_intent(["Acceleration", "MilesPerGal", "Cylinders", "Origin=USA"])
df._ipython_display_()
assert len(df.recommendation["Generalize"]) == 4
v1 = df.recommendation["Generalize"][0]
v2 = df.recommendation["Generalize"][1]
v3 = df.recommendation["Generalize"][2]
v4 = df.recommendation["Generalize"][3]
for clause in v4._inferred_intent:
assert clause.value == "" # No filter value
assert v4.title == "Overall"
check1 = v1 != v2 and v1 != v3 and v1 != v4
check2 = v2 != v3 and v2 != v4
check3 = v3 != v4
assert check1 and check2 and check3
def test_row_column_group(global_var):
df = pd.read_csv(
"https://github.com/lux-org/lux-datasets/blob/master/data/state_timeseries.csv?raw=true"
)
df["Date"] = pd.to_datetime(df["Date"])
tseries = df.pivot(index="State", columns="Date", values="Value")
# Interpolating missing values
tseries[tseries.columns.min()] = tseries[tseries.columns.min()].fillna(0)
tseries[tseries.columns.max()] = tseries[tseries.columns.max()].fillna(tseries.max(axis=1))
tseries = tseries.interpolate("zero", axis=1)
tseries._ipython_display_()
assert list(tseries.recommendation.keys()) == ["Temporal"]
def test_groupby(global_var):
df = pytest.college_df
groupbyResult = df.groupby("Region").sum()
groupbyResult._ipython_display_()
assert list(groupbyResult.recommendation.keys()) == ["Column Groups"]
def test_crosstab():
# Example from http://www.datasciencemadesimple.com/cross-tab-cross-table-python-pandas/
d = {
"Name": [
"Alisa",
"Bobby",
"Cathrine",
"Alisa",
"Bobby",
"Cathrine",
"Alisa",
"Bobby",
"Cathrine",
"Alisa",
"Bobby",
"Cathrine",
],
"Exam": [
"Semester 1",
"Semester 1",
"Semester 1",
"Semester 1",
"Semester 1",
"Semester 1",
"Semester 2",
"Semester 2",
"Semester 2",
"Semester 2",
"Semester 2",
"Semester 2",
],
"Subject": [
"Mathematics",
"Mathematics",
"Mathematics",
"Science",
"Science",
"Science",
"Mathematics",
"Mathematics",
"Mathematics",
"Science",
"Science",
"Science",
],
"Result": [
"Pass",
"Pass",
"Fail",
"Pass",
"Fail",
"Pass",
"Pass",
"Fail",
"Fail",
"Pass",
"Pass",
"Fail",
],
}
df = pd.DataFrame(d, columns=["Name", "Exam", "Subject", "Result"])
result = pd.crosstab([df.Exam], df.Result)
result._ipython_display_()
assert list(result.recommendation.keys()) == ["Row Groups", "Column Groups"]
def test_custom_aggregation(global_var):
import numpy as np
df = pytest.college_df
df.set_intent(["HighestDegree", lux.Clause("AverageCost", aggregation=np.ptp)])
df._ipython_display_()
assert list(df.recommendation.keys()) == ["Enhance", "Filter", "Generalize"]
df.clear_intent()
def test_year_filter_value(global_var):
df = pytest.car_df
df["Year"] = pd.to_datetime(df["Year"], format="%Y")
df.set_intent(["Acceleration", "Horsepower"])
df._ipython_display_()
list_of_vis_with_year_filter = list(
filter(
lambda vis: len(
list(
filter(
lambda clause: clause.value != "" and clause.attribute == "Year",
vis._intent,
)
)
)
!= 0,
df.recommendation["Filter"],
)
)
vis = list_of_vis_with_year_filter[0]
assert (
"T00:00:00.000000000" not in vis.to_altair()
), "Year filter title contains extraneous string, not displayed as summarized string"
df.clear_intent()
def test_similarity(global_var):
lux.config.early_pruning = False
df = pytest.car_df
df["Year"] = pd.to_datetime(df["Year"], format="%Y")
df.set_intent(
[
lux.Clause("Year", channel="x"),
lux.Clause("Displacement", channel="y"),
lux.Clause("Origin=USA"),
]
)
df._ipython_display_()
assert len(df.recommendation["Similarity"]) == 2
ranked_list = df.recommendation["Similarity"]
japan_vis = list(
filter(
lambda vis: vis.get_attr_by_attr_name("Origin")[0].value == "Japan",
ranked_list,
)
)[0]
europe_vis = list(
filter(
lambda vis: vis.get_attr_by_attr_name("Origin")[0].value == "Europe",
ranked_list,
)
)[0]
assert japan_vis.score > europe_vis.score
df.clear_intent()
lux.config.early_pruning = True
def test_similarity2():
df = pd.read_csv(
"https://raw.githubusercontent.com/lux-org/lux-datasets/master/data/real_estate_tutorial.csv"
)
df["Month"] = pd.to_datetime(df["Month"], format="%m")
df["Year"] =
|
pd.to_datetime(df["Year"], format="%Y")
|
pandas.to_datetime
|
import pandas as pd
import numpy
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn import tree
from sklearn import neighbors
from sklearn.naive_bayes import MultinomialNB,BernoulliNB,GaussianNB
from sklearn.decomposition import PCA
from sklearn import metrics
import matplotlib.pyplot as plt
def readData(filename):
data=[]
with open(filename, "r") as f:
for line in f.readlines():
words = line.split(",")
words[0] = float(words[0])
words[1] = float(words[1])
words[2] = float(words[2])
words[3] = float(words[3])
words[4] = words[4].replace("\n","")
data.append(words)
return data
def iris():
iris_data = readData("iris.data")
iris_x=[] #feature
iris_y=[] #target
for i in range(len(iris_data)):
iris_x.append(iris_data[i][:4])
iris_y.append(iris_data[i][4])
'''
pca=PCA(n_components='mle')
new_iris_x=pca.fit_transform(iris_x)
explained_variance = numpy.var(new_iris_x, axis=0)
explained_variance_ratio = explained_variance / numpy.sum(explained_variance)
print(explained_variance)
print(explained_variance_ratio)
'''
train_x, test_x, train_y, test_y = train_test_split(iris_x, iris_y, test_size=0.3)
#Decision Tree
decision_tree= tree.DecisionTreeClassifier()
iris_decision_tree = decision_tree.fit(train_x, train_y)
DT_test_y_predicted = iris_decision_tree.predict(test_x)
DT_accuracy = metrics.accuracy_score(test_y, DT_test_y_predicted)
#KNN
knn = neighbors.KNeighborsClassifier(n_neighbors = 5)
iris_knn = knn.fit(train_x, train_y)
KNN_test_y_predicted = iris_knn.predict(test_x)
KNN_accuracy = metrics.accuracy_score(test_y, KNN_test_y_predicted)
#Naive Bayes
nb = GaussianNB()
iris_nb = nb.fit(train_x, train_y)
NB_test_y_predicted = iris_nb.predict(test_x)
NB_accuracy = metrics.accuracy_score(test_y, NB_test_y_predicted, normalize=True)
#Accuarcy
print("Iris---------------------------------------")
print("DecsionTree = " + str(round(DT_accuracy, 7)))
print("KNN = " + str(round(KNN_accuracy,7)))
print("NaiveBayes = " + str(round(NB_accuracy, 7)))
print("")
def fire():
fire_data =
|
pd.read_csv("forestfires.csv")
|
pandas.read_csv
|
# import Ipynb_importer
import pandas as pd
from .public_fun import *
# 全局变量
class glv:
def _init():
global _global_dict
_global_dict = {}
def set_value(key,value):
_global_dict[key] = value
def get_value(key,defValue=None):
try:
return _global_dict[key]
except KeyError:
return defValue
## fun_01to06
class fun_01to06(object):
def __init__(self, data):
self.cf = [2, 1, 1, 17, 1, 2]
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"起始符",
"命令标识",
"应答标志",
"唯一识别码",
"数据单元加密方式",
"数据单元长度"
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"起始符":hex2str(self.oj["起始符"]),
"命令标识":dict_list_replace('02', self.oj['命令标识']),
"应答标志":dict_list_replace('03', self.oj['应答标志']),
"唯一识别码":hex2str(self.oj["唯一识别码"]),
"数据单元加密方式":dict_list_replace('05', self.oj['数据单元加密方式']),
"数据单元长度":hex2dec(self.oj["数据单元长度"]),
}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
self.mo = self.oj["命令标识"]
glv.set_value('data_f', self.next)
glv.set_value('data_mo', self.mo)
glv.set_value('data_01to07', self.o)
print('fun_01to06 done!')
## fun_07
class fun_07:
def __init__(self, data):
self.mo = glv.get_value("data_mo")
if self.mo == '01':
self.o = fun_07_01(glv.get_value('data_f'))
elif self.mo == '02' or self.mo == '03':
self.o = fun_07_02(glv.get_value('data_f'))
elif self.mo == '04':
self.o = fun_07_04(glv.get_value('data_f'))
elif self.mo == '05':
self.o = fun_07_05(glv.get_value('data_f'))
elif self.mo == '06':
self.o = fun_07_06(glv.get_value('data_f'))
else :
print('命令标识:',self.mo,'有误')
self.c = fun_07_cursor(glv.get_value('data_f'))
self.oj = dict(self.o.oj, **self.c.oj)
self.oj2 = {'数据单元':self.oj}
self.ol = pd.merge(self.o.ol, self.c.ol, left_index=True, right_index=True)
self.pj = dict(self.o.pj, **self.c.pj)
self.pj2 = {'数据单元':self.pj}
self.pl = pd.merge(self.o.pl, self.c.pl, left_index=True, right_index=True)
print('fun_07 done!')
## fun_07_01
class fun_07_01(object):
def __init__(self, data):
self.cf = [6, 2, 20, 1, 1]
self.cf_a = hexlist2(self.cf)
self.n = hex2dec(data[self.cf_a[3]:self.cf_a[4]])
self.m = hex2dec(data[self.cf_a[4]:self.cf_a[5]])
self.cf.append(self.n*self.m)
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"数据采集时间",
"登入流水号",
"ICCID",
"可充电储能子系统数",
"可充电储能系统编码长度",
"可充电储能系统编码",
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.oj2 = {'车辆登入': self.oj}
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"数据采集时间":get_datetime(self.oj['数据采集时间']),
"登入流水号":hex2dec(self.oj['登入流水号']),
"ICCID":hex2str(self.oj['ICCID']),
"可充电储能子系统数":hex2dec(self.oj['可充电储能子系统数']),
"可充电储能系统编码长度":hex2dec(self.oj['可充电储能系统编码长度']),
"可充电储能系统编码":fun_07_01.fun_07_01_06(self.oj['可充电储能系统编码'], self.oj['可充电储能子系统数'], self.oj['可充电储能系统编码长度']),
}
self.pj2 = {'车辆登入': self.pj}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('data_07_01', self.o)
print('fun_07_01 done!')
def fun_07_01_06(data, n, m):
if m=='00':
return "NA"
else :
n = hex2dec(n)
m = hex2dec(m) * 2
output = []
for i in range(n):
output_unit = hex2str(data[i * m: i* m +m])
output.append(output_unit)
return output
## fun_07_04
class fun_07_04(object):
def __init__(self, data):
self.cf = [6, 2]
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"登出时间",
"登出流水号",
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"登出时间":get_datetime(self.oj['登出时间']),
"登出流水号":hex2dec(self.oj['登出流水号']),
}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('data_07_04', self.o)
print('fun_07_04 done!')
## fun_07_05
class fun_07_05(object):
def __init__(self, data):
self.cf = [6, 2, 12, 20, 1]
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"平台登入时间",
"登入流水号",
"平台用户名",
"平台密码",
"加密规则",
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"平台登入时间":get_datetime(self.oj['平台登入时间']),
"登入流水号":hex2dec(self.oj['登入流水号']),
"平台用户名":hex2str(self.oj['平台用户名']),
"平台密码":hex2str(self.oj['平台密码']),
"加密规则":dict_list_replace('07_05_05',self.oj['加密规则']),
}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('data_07_05', self.o)
print('fun_07_05 done!')
## fun_07_06
class fun_07_06(object):
def __init__(self, data):
self.cf = [6, 2]
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"登出时间",
"登出流水号",
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
print(self.oj)
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"登出时间":get_datetime(self.oj['登出时间']),
"登出流水号":hex2dec(self.oj['登出流水号']),
}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('data_07_06', self.o)
print('fun_07_06 done!')
## fun_07_02
class fun_07_02:
def __init__(self, data):
self.o = data
self.oj = {'数据采集时间': self.o[:12]}
self.ol = pd.DataFrame({'01':['01']})
self.pj = {'数据采集时间': get_datetime(self.oj['数据采集时间'])}
self.pl = pd.DataFrame({'01':['01']})
glv.set_value('data_f', data[12:])
glv.set_value('m_07_02', data[12:14])
self.mo_list = glv.get_value('model')
self.do_list = []
while(glv.get_value('m_07_02') in self.mo_list):
# 记录已执行的
self.do_list.append(glv.get_value('m_07_02'))
# 删除已执行的
self.mo_list.remove(glv.get_value('m_07_02'))
if glv.get_value('m_07_02') == '01':
self.f_01 = fun_07_02_01(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '02':
self.f_02 = fun_07_02_02(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '03':
self.f_03 = fun_07_02_03(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '04':
self.f_04 = fun_07_02_04(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '05':
self.f_05 = fun_07_02_05(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '06':
self.f_06 = fun_07_02_06(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '07':
self.f_07 = fun_07_02_07(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '08':
self.f_08 = fun_07_02_08(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '09':
self.f_09 = fun_07_02_09(glv.get_value('data_f'))
else:
print("fun_07_02 done")
print(glv.get_value('data_f'))
print(glv.get_value('m_07_02'))
self.do_list.sort()
for i in self.do_list:
if i == '01':
self.oj = dict(self.oj,**self.f_01.oj2)
self.ol = pd.merge(self.ol, self.f_01.ol, left_index=True, right_index=True)
self.pj = dict(self.pj,**self.f_01.pj2)
self.pl = pd.merge(self.pl, self.f_01.pl, left_index=True, right_index=True)
elif i == '02':
self.oj = dict(self.oj,**self.f_02.oj2)
self.ol = pd.merge(self.ol, self.f_02.ol, left_index=True, right_index=True)
self.pj = dict(self.pj,**self.f_02.pj2)
self.pl =
|
pd.merge(self.pl, self.f_02.pl, left_index=True, right_index=True)
|
pandas.merge
|
import unittest
import pandas.util.testing as pdt
import pandas as pd
from qiime2 import Artifact
from qiime2.plugin.testing import TestPluginBase
from qiime2.plugins import gcn_norm
class GcnNormTests(TestPluginBase):
package = 'q2_gcn_norm.tests'
def test_gcn_norm_silva(self):
feature_id = ['taxon_1', 'taxon_2', 'taxon_3', 'taxon_4', 'taxon_5', 'taxon_6']
taxa = ['D_0__Bacteria;D_1__Firmicutes;D_2__Bacilli;D_3__Lactobacillales;'\
'D_4__Lactobacillaceae;D_5__Lactobacillus;D_6__Lactobacillus murinus',
'D_0__Bacteria;D_1__Firmicutes;D_2__Bacilli;D_3__Lactobacillales;'\
'D_4__Lactobacillaceae;D_5__Lactobacillus',
'D_0__Bacteria;D_1__Bacteroidetes;D_2__Bacteroidia;D_3__Bacteroidales;'\
'D_4__Rikenellaceae;D_5__Alistipes;D_6__Alistipes sp. N15.MGS-157',
'D_0__Bacteria;D_1__Bacteroidetes;D_2__Bacteroidia;D_3__Bacteroidales;'\
'D_4__Rikenellaceae;D_5__Alistipes',
'D_0__Bacteria;D_1__Bacteroidetes;D_2__Bacteroidia;D_3__Bacteroidales',
'Unassigned'
]
confidence = [0.99]*len(feature_id)
data_taxonomy = {'Feature ID': feature_id,'Taxon': taxa, 'Confidence': confidence}
df_taxa = pd.DataFrame(data_taxonomy)
df_taxa.set_index('Feature ID', inplace=True)
taxonomy_silva = Artifact.import_data('FeatureData[Taxonomy]', df_taxa)
df_table = pd.DataFrame([[10,10,10,10,10,10],
[ 5, 5, 5, 0, 0, 0]],
index=['sample A','sample B'],
columns=feature_id)
table = Artifact.import_data('FeatureTable[Frequency]', df_table)
table_gcn_normalized = gcn_norm.actions.copy_num_normalize(table, taxonomy_silva ,database='silva')
df_table_normalized = table_gcn_normalized.gcn_norm_table.view(pd.DataFrame)
copy_num = [6, 5.04, 2.12, 2.12, 3.52, 1]
df_true = df_table/copy_num
|
pdt.assert_frame_equal(df_table_normalized, df_true)
|
pandas.util.testing.assert_frame_equal
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 =
|
Series([20, 30, 40], dtype=dtype)
|
pandas.Series
|
#!/usr/bin/env python
"""Tests for `arcos_py` package."""
from numpy import int64
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from arcos4py import ARCOS
from arcos4py.tools._errors import noDataError
@pytest.fixture
def no_bin_data():
"""
pytest fixture to generate test data
"""
data = [item for i in range(10) for item in list(range(1, 11))]
m = [0 for i in range(100)]
d = {'id': data, 'time': data, 'm': m, 'x': data}
print(d)
df = pd.DataFrame(d)
return df
def test_empty_data(no_bin_data: pd.DataFrame):
with pytest.raises(noDataError, match='Input is empty'):
test_data = no_bin_data[no_bin_data['m'] > 0]
pos = ['x']
ts = ARCOS(
test_data, posCols=pos, frame_column='time', id_column='id', measurement_column='m', clid_column='clTrackID'
)
ts.trackCollev(eps=1, minClsz=1, nPrev=2)
def test_1_central_1_prev():
df_in = pd.read_csv('tests/testdata/1central_in.csv')
df_true = pd.read_csv('tests/testdata/1central_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true, check_dtype=False)
def test_1_central_2_prev():
df_in = pd.read_csv('tests/testdata/1central_in.csv')
df_true = pd.read_csv('tests/testdata/1central2prev_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1, minClsz=1, nPrev=2)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true, check_dtype=False)
def test_1_central_3D():
df_in = pd.read_csv('tests/testdata/1central3D_in.csv')
df_true = pd.read_csv('tests/testdata/1central3D_res.csv')
pos = ['x', 'y', 'z']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x', 'y', 'z'])
assert_frame_equal(out, df_true)
def test_1_central_growing():
df_in = pd.read_csv('tests/testdata/1centralGrowing_in.csv')
df_true = pd.read_csv('tests/testdata/1centralGrowing_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_2_central_growing():
df_in = pd.read_csv('tests/testdata/2centralGrowing_in.csv')
df_true = pd.read_csv('tests/testdata/2centralGrowing_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_2_with_1_common_symmetric():
df_in = pd.read_csv('tests/testdata/2with1commonSym_in.csv')
df_true = pd.read_csv('tests/testdata/2with1commonSym_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_2_with_1_common_asymmetric():
df_in = pd.read_csv('tests/testdata/2with1commonAsym_in.csv')
df_true = pd.read_csv('tests/testdata/2with1commonAsym_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_3_spreading_1_prev():
df_in = pd.read_csv('tests/testdata/3spreading_in.csv')
df_true = pd.read_csv('tests/testdata/3spreading_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_3_spreading_2_prev():
df_in = pd.read_csv('tests/testdata/3spreading_in.csv')
df_true = pd.read_csv('tests/testdata/3spreading2prev_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=2)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_5_overlapping_1_prev():
df_in = pd.read_csv('tests/testdata/5overlapping_in.csv')
df_true = pd.read_csv('tests/testdata/5overlapping_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_5_overlapping_2_prev():
df_in = pd.read_csv('tests/testdata/5overlapping_in.csv')
df_true = pd.read_csv('tests/testdata/5overlapping2prev_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=2)
out = out.drop(columns=['m', 'x'])
assert_frame_equal(out, df_true)
def test_6_overlapping():
df_in = pd.read_csv('tests/testdata/6overlapping_in.csv')
df_true = pd.read_csv('tests/testdata/6overlapping_res.csv')
pos = ['x']
ts = ARCOS(
df_in, posCols=pos, frame_column='time', id_column='trackID', measurement_column='m', clid_column='clTrackID'
)
ts.bin_col = 'm'
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['m', 'x'])
out['trackID'] = out['trackID'].astype(int64)
assert_frame_equal(out, df_true)
def test_split_from_single():
df_in = pd.read_csv('tests/testdata/1objSplit_in.csv')
df_true = pd.read_csv('tests/testdata/1objSplit_res.csv')
pos = ['pos']
ts = ARCOS(df_in, posCols=pos, frame_column='t', id_column='id', measurement_column=None, clid_column='collid')
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['pos'])
assert_frame_equal(out, df_true)
def test_split_from_2_objects():
df_in = pd.read_csv('tests/testdata/2objSplit_in.csv')
df_true = pd.read_csv('tests/testdata/2objSplit_res.csv')
pos = ['pos']
ts = ARCOS(df_in, posCols=pos, frame_column='t', id_column='id', measurement_column=None, clid_column='collid')
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['pos'])
assert_frame_equal(out, df_true)
def test_cross_2_objects():
df_in = pd.read_csv('tests/testdata/2objCross_in.csv')
df_true = pd.read_csv('tests/testdata/2objCross_res.csv')
pos = ['pos']
ts = ARCOS(df_in, posCols=pos, frame_column='t', id_column='id', measurement_column=None, clid_column='collid')
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['pos'])
assert_frame_equal(out, df_true)
def test_merge_split_2_objects_with_common():
df_in = pd.read_csv('tests/testdata/2objMergeSplitCommon_in.csv')
df_true = pd.read_csv('tests/testdata/2objMergeSplitCommon_res.csv')
pos = ['pos']
ts = ARCOS(df_in, posCols=pos, frame_column='t', id_column='id', measurement_column=None, clid_column='collid')
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['pos'])
assert_frame_equal(out, df_true)
## if algorithm behaves differently (like in R) a different output is produced regarding collective events
def test_merge_split_2_objects_crossing():
df_in = pd.read_csv('tests/testdata/2objMergeSplitCross_in.csv')
df_true = pd.read_csv('tests/testdata/2objMergeSplitCross_res.csv')
pos = ['pos']
ts = ARCOS(df_in, posCols=pos, frame_column='t', id_column='id', measurement_column=None, clid_column='collid')
out = ts.trackCollev(eps=1.0, minClsz=1, nPrev=1)
out = out.drop(columns=['pos'])
|
assert_frame_equal(out, df_true)
|
pandas.testing.assert_frame_equal
|
import torch
import torch.nn as nn
from torch.nn import functional as F
import math
from torch.utils.data import Dataset
import os
import pandas as pd
import pdb
import numpy as np
import math
import pickle
import random
from sklearn.utils import shuffle
class FinalTCGAPCAWG(Dataset):
def __init__(self, dataset_name = None,
data_dir=None,
mode='training',
curr_fold=1,
block_size=5000,
load=False,
addtriplettoken=False,
addpostoken=False,
addgestoken=False,
addrt=False,
nummut = 0,
frac = 0,
crossdata=False,
crossdatadir=None,
pcawg2tgca_class=False,
tcga2pcawg_class=False,
mutratio = '1-0-0-0-0-0',
adddatadir = None):
self.dataset_name = dataset_name
self.data_dir=data_dir
self.mode=mode
self.curr_fold=int(curr_fold)
self.block_size=block_size
self.load=load
self.addtriplettoken=addtriplettoken
self.addpostoken=addpostoken
self.addrt=addrt
self.nummut = nummut
self.frac = frac
self.addgestoken = addgestoken
self.crossdata= crossdata
self.crossdatadir = crossdatadir
self.adddatadir = adddatadir
self.pcawg2tgca_class=pcawg2tgca_class
self.tcga2pcawg_class=tcga2pcawg_class
self.NiSi = False
self.SNV = False
self.indel = False
self.SVMEI = False
self.Normal = False
if self.nummut > 0 :
self.block_size = self.nummut
if self.dataset_name == 'finalpcawg':
self.training_fold = pd.read_csv('./notebookpcawg/pcawg_trainfold' + str(self.curr_fold) + '.csv',index_col=0)
self.validation_fold = pd.read_csv('./notebookpcawg/pcawg_valfold' + str(self.curr_fold) + '.csv',index_col=0)
elif self.dataset_name == 'finaltcga':
self.training_fold = pd.read_csv('./notebookpcawg/tcga_trainfold' + str(self.curr_fold) + '.csv',index_col=0)
self.validation_fold = pd.read_csv('./notebookpcawg/tcga_valfold' + str(self.curr_fold) + '.csv',index_col=0)
elif self.dataset_name == 'westcga':
self.training_fold = pd.read_csv('./notebookpcawg/tcgawes_trainfold' + str(self.curr_fold) + '.csv',index_col=0)
self.validation_fold = pd.read_csv('./notebookpcawg/tcgawes_valfold' + str(self.curr_fold) + '.csv',index_col=0)
elif self.dataset_name == 'wgspcawg':
self.training_fold = pd.read_csv('./notebookpcawg/pcawgwgs_trainfold' + str(self.curr_fold) + '.csv',index_col=0)
self.validation_fold = pd.read_csv('./notebookpcawg/pcawgwgs_valfold' + str(self.curr_fold) + '.csv',index_col=0)
if self.adddatadir is not None:
adddata = pd.DataFrame(columns=self.validation_fold.columns)
adddata.columns = self.validation_fold.columns
folder = os.listdir(self.adddatadir)
for i in folder:
samples = os.listdir(self.adddatadir + i )
for j in samples:
if j[0:3] == 'new':
counter = pd.read_csv(self.adddatadir + i + '/count_new_' + j[4:],index_col=0)
listall = [i,j[4:]] + counter['0'].values.tolist() + [1]
pds = pd.DataFrame(listall)
pds = pds.T
pds.columns=self.validation_fold.columns
adddata = adddata.append(pds)
adddata = adddata.reset_index(drop=True)
self.adddata = adddata
#self.validation_fold = self.validation_fold.append(self.adddata)
self.validation_fold = self.adddata
self.data_dir = self.adddatadir
self.load_classinfo()
self.vocab_mutation = pd.read_csv('./notebookpcawg/dictMutation.csv',index_col=0)
self.allSNV_index = 0
self.mutratio = mutratio.split('-')
self.mutratio = [float(i) for i in self.mutratio]
if self.mutratio[0]>0:
self.NiSi = True
if self.mutratio[1]>0:
self.SNV = True
if self.mutratio[2]>0:
self.indel = True
if self.mutratio[3]>0:
self.SVMEI = True
if self.mutratio[4]>0:
self.Normal = True
if self.NiSi:
vocabsize = len(self.vocab_mutation.loc[self.vocab_mutation['typ']=='NiSi'])
if self.SNV:
vocabsize = vocabsize + len(self.vocab_mutation.loc[self.vocab_mutation['typ']=='SNV'])
if self.indel:
vocabsize = vocabsize + len(self.vocab_mutation.loc[self.vocab_mutation['typ']=='indel'])
if self.SVMEI:
vocabsize = vocabsize + len(self.vocab_mutation.loc[self.vocab_mutation['typ'].isin(['MEI','SV'])])
if self.Normal:
vocabsize = vocabsize + len(self.vocab_mutation.loc[self.vocab_mutation['typ']=='Normal'])
self.vocab_size = vocabsize + 1
#print(self.vocab_size)
#pdb.set_trace()
self.pd_position_vocab = pd.read_csv('./notebookpcawg/dictChpos.csv',index_col=0)
self.pd_ges_vocab = pd.read_csv('./notebookpcawg/dictGES.csv',index_col=0)
self.position_size = len(self.pd_position_vocab) + 1
self.ges_size = len(self.pd_ges_vocab) + 1
self.rt_size = 1
self.midstring = '.' + self.dataset_name + str(mutratio) + str(int(self.addtriplettoken)) + str(int(self.addpostoken)) + str(int(self.addgestoken)) + str(int(self.addrt)) + '/'
if self.mode == 'validation':
if self.crossdata:
os.makedirs(self.crossdatadir + self.midstring, exist_ok=True)
self.data_dir = self.crossdatadir
#pdb.set_trace()
else:
os.makedirs(self.data_dir + self.midstring, exist_ok=True)
def load_classinfo(self):
if self.dataset_name == 'finalpcawg':
num_class = os.listdir(self.data_dir)
name_class = [i for i in num_class if len(i.split('.'))==1]
name_class = sorted(name_class)
n_samples = []
for idx,nm_class in enumerate(name_class):
samples = os.listdir(self.data_dir+nm_class)
samples = [x for x in samples if x[:10]=='count_new_']
n_samples.append(len(samples))
data = list(zip(name_class, np.arange(len(name_class)),n_samples))
self.pd_class_info = pd.DataFrame(data,columns=['class_name','class_index','n_samples'])
else:
num_class = os.listdir(self.data_dir)
name_class = [i for i in num_class if len(i.split('.'))==1]
name_class = sorted(name_class)
n_samples = []
for idx,nm_class in enumerate(name_class):
samples = os.listdir(self.data_dir+nm_class)
samples = [x for x in samples if x[:10]=='count_new_']
n_samples.append(len(samples))
data = list(zip(name_class, np.arange(len(name_class)),n_samples))
self.pd_class_info = pd.DataFrame(data,columns=['class_name','class_index','n_samples'])
if self.crossdata:
self.crossdatadir = self.data_dir
num_class = os.listdir(self.crossdatadir)
name_class = [i for i in num_class if len(i.split('.'))==1]
name_class = sorted(name_class)
n_samples = []
for idx,nm_class in enumerate(name_class):
samples = os.listdir(self.crossdatadir+nm_class)
samples = [x for x in samples if x[:10]=='count_new_']
n_samples.append(len(samples))
data = list(zip(name_class, np.arange(len(name_class)),n_samples))
self.pd_class_infoto = pd.DataFrame(data,columns=['class_name','class_index','n_samples'])
self.pd_class_crossdata = pd.read_csv('./extfile/crossdata.csv',index_col =0)
#pdb.set_trace()
def get_data(self,idx):
if self.mode=='training':
instances=self.training_fold.iloc[idx]
elif self.mode=='validation':
instances=self.validation_fold.iloc[idx]
elif self.mode == 'testing':
instances=self.test_data.iloc[idx]
#if self.prioritize:
# instances=self.training_fold.loc[self.training_fold['samples']=='f8593ac0-9480-22a0-e040-11ac0d48697a.csv']
# instances=instances.iloc[0]
target_name = instances['nm_class']
#if self.crossdata:
# target_name = self.pd_class_crossdata.loc[self.pd_class_crossdata['tcga_class']==target_name]['class_name'].to_list()[0]
samples = instances[1]
avail_count = np.asarray(self.mutratio) * self.block_size
row_count = instances[['NiSi','SNV','indel','SVMEI','Normal']].to_numpy()
diff = avail_count - row_count
pos = diff>0
avail_count1 = row_count * pos
diff = row_count > avail_count
avail_count2 = avail_count * diff
avail_count3 = avail_count1 + avail_count2
shadowavail_count3 = avail_count3
shadowavail_count3[0] = row_count[0]
if sum(shadowavail_count3) > self.block_size:
diff = self.block_size - sum(avail_count3)
shadowavail_count3[0] = diff + avail_count3[0]
avail_count2 = shadowavail_count3.astype(int)
if avail_count2[0]<0:
secondmax = avail_count2[np.argmax(avail_count2)]
avail_count2 = avail_count2 * 0.7
avail_count = avail_count2
diff = avail_count - row_count
pos = diff>0
avail_count1 = row_count * pos
diff = row_count > avail_count
avail_count2 = avail_count * diff
avail_count3 = avail_count1 + avail_count2
shadowavail_count3 = avail_count3
shadowavail_count3[0] = row_count[0]
if sum(shadowavail_count3) > self.block_size:
diff = self.block_size - sum(avail_count3)
shadowavail_count3[0] = diff + avail_count3[0]
avail_count2 = shadowavail_count3.astype(int)
avail_count = avail_count2
def grab(pd_input,grabcol):
return pd_input[grabcol]
def allgrab(grabcol):
if self.NiSi:
#pdb.set_trace()
pd_nisi = pd.read_csv(self.data_dir + target_name + '/' + 'NiSi_new_' + samples,index_col=0)
pd_nisi = pd_nisi.sample(n = avail_count[0], replace = False)
pd_nisi = grab(pd_nisi,grabcol)
if self.SNV:
pd_SNV = pd.read_csv(self.data_dir + target_name + '/' + 'SNV_new_' + samples,index_col=0)
pd_SNV = pd_SNV.sample(n = avail_count[1], replace = False)
pd_SNV = grab(pd_SNV,grabcol)
pd_nisi = pd_nisi.append(pd_SNV)
if self.indel:
pd_indel = pd.read_csv(self.data_dir + target_name + '/' + 'indel_new_' + samples,index_col=0)
pd_indel = pd_indel.sample(n = avail_count[2], replace = False)
pd_indel = grab(pd_indel,grabcol)
pd_nisi = pd_nisi.append(pd_indel)
if self.SVMEI:
pd_meisv = pd.read_csv(self.data_dir + target_name + '/' + 'MEISV_new_' + samples,index_col=0)
pd_meisv = pd_meisv.sample(n = avail_count[3], replace = False)
pd_meisv = grab(pd_meisv,grabcol)
pd_nisi = pd_nisi.append(pd_meisv)
if self.Normal:
pd_normal = pd.read_csv(self.data_dir + target_name + '/' + 'Normal_new_' + samples,index_col=0)
pd_normal = pd_normal.sample(n = avail_count[4], replace = False)
pd_normal = grab(pd_normal,grabcol)
pd_nisi = pd_nisi.append(pd_normal)
pd_nisi = pd_nisi.fillna(0)
return pd_nisi
if self.addtriplettoken:
if self.mode=='training' :
pd_nisi = allgrab(['triplettoken'])
else:
filename = self.data_dir + self.midstring + 'val_' + samples
if os.path.isfile(filename):
try:
pd_nisi =
|
pd.read_csv(filename,index_col=0)
|
pandas.read_csv
|
import datetime
from datetime import datetime as dt
from datetime import timedelta as td
import os
import re as reg
from math import sqrt
import matplotlib.pyplot as plt
import matplotlib.pylab as plb
import numpy as np
from sklearn.ensemble import VotingRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
from plotly.subplots import make_subplots
import plotly.graph_objs as go
from plotly.offline import plot
from fbprophet import Prophet
from fbprophet.plot import plot_plotly
import pandas as pd
#Constant
EPOCH_TIME = datetime.datetime.utcfromtimestamp(0)
#Functions
def access_csv_url(url):
"""This python module downloads the csv from a url passed
and return the dataframe else raises a FileNotFoundError exception"""
df = pd.read_csv(url)
if df.empty is False:
return df
else:
raise FileNotFoundError
def get_company_name(company_df):
"""lvl1 @2 : Following line loads the initial data of list of companies"""
company_symbol = ''
while (company_df.empty is False and company_symbol == ''):
surety_value = input("Are you sure of company symbol you want to search? (Y/N/E(Exit)) : ")
if surety_value.upper() == 'Y' or surety_value.upper() == 'N':
if surety_value.upper() == 'N':
search_dict = company_search('', company_df)
if len(search_dict) == 0:
print("\n No related results found, Give it another Try!!")
continue
elif len(search_dict) > 0:
if len(search_dict) > 10:
print("Showing Top 10 results for your search which gave ", len(search_dict), " results")
else:
print("found ", str(len(search_dict)), "results")
print(" \t Symbol \t Name")
print("\t _________", "\t", "_________")
for index, key in enumerate(search_dict.keys()):
if index+1 == 11:
break
else:
print("\t", key, "\t\t", search_dict[key])
surety_value = input("Have you found your symbol yet ? Y/N : ")
if surety_value.upper() == 'N' or surety_value.upper() == 'Y':
if surety_value.upper() == 'Y':
company_symbol = input("Enter the final symbol : ")
search_dict = company_search(company_symbol, company_df)
if len(search_dict) > 1:
print("Your search resulted into multiple results, please reselect your company!")
company_symbol = '' #resetting the value so that value can be input again
elif len(search_dict) == 0:
print("Your search yielded no results")
company_symbol = ''
else:
continue
else:
print("please choose only Y or N or y or n or E or e")
continue
elif surety_value.upper() == 'Y':
company_symbol = input("Enter the final symbol : ")
search_dict = company_search(company_symbol, company_df)
if len(search_dict) > 1:
print("Your search resulted into multiple results, please reselect your company!")
company_symbol = '' #resetting the value so that value can be input again
elif len(search_dict) == 0:
print("Your search yielded no results")
company_symbol = ''
elif surety_value.upper() == 'E':
company_symbol = ''
break
else:
print("please choose only Y or N or y or n")
continue
return company_symbol.upper()
def file_exists(filename, filepath):
file_tree = [file for file in os.listdir(filepath) if os.path.isfile(file)]
if filename not in file_tree:
return False
else:
return True
def update_company_data(company_symbol):
"""If a file does not exit then data will be downloaded from the website and save in the file with that company name"""
file_name = (str(company_symbol)+'.csv')
existing_file = file_exists(file_name, '.')
end_date = dt.date(dt.utcnow() - td(seconds=14400))
if existing_file is False:
alpha_url = f"http://quotes.wsj.com/{company_symbol}/historical-prices/download?MOD_VIEW=page%20&num_rows=7500&range_days=7500&startDate=11/01/1970%20&endDate={end_date}" #mm/dd/yyyy
company_data = pd.read_csv(alpha_url)
company_data.columns = [col_name.lstrip() for col_name in company_data.columns]
company_data['Date'] = pd.to_datetime(company_data['Date'], format='%m/%d/%y')
company_data['Date'] = company_data['Date'].dt.date
company_data = company_data.sort_values(by='Date')
if not company_data.empty:
company_data.to_csv(f"{company_symbol}.csv")
else:
"""if the file exists, read the last line and update the data until todays date"""
company_data = pd.read_csv(file_name, index_col=0)
company_data['Date'] = company_data['Date'].apply(lambda x: datetime.datetime.strptime(x, '%Y-%m-%d').date())
row = company_data.sort_values('Date').tail(1)
date = row['Date']
date = str(date).split()
date = datetime.datetime.strptime(date[1], '%Y-%m-%d').date() + td(days=1)
if end_date != date:
remaining_df = pd.read_csv('http://quotes.wsj.com/'+company_symbol+'/historical-prices/download?MOD_VIEW=page%20&num_rows=7500&range_days=7500&startDate='+str(date.month)+'/'+str(date.day)+'/'+str(date.year)+'%20&endDate='+str(end_date.month)+'/'+str(end_date.day)+'/'+str(end_date.year))
remaining_df.columns = company_data.columns
remaining_df['Date'] = pd.to_datetime(remaining_df['Date'], format='%m/%d/%y')
remaining_df['Date'] = remaining_df['Date'].dt.date
company_data = company_data.append(remaining_df, sort=False)
company_data.columns = [col_name.lstrip() for col_name in company_data.columns]
company_data['Date'] = pd.to_datetime(company_data['Date'], format='%Y-%m-%d')
company_data['Date'] = company_data['Date'].dt.date
company_data = company_data.sort_values(by='Date')
company_data.reset_index(inplace=True, drop=True)
company_data.to_csv(str(company_symbol)+'.csv')
return company_data
def print_menu(company_symbol):
"""This prints the main user menu with dynamic company name"""
print("/" + "-" * 56 + "\\")
#print(f"\t\t USER MENU: {company_symbol}")
print(f"\t Stock Analysis MENU of {company_symbol}\t\t\t\t ")
print("|" + "-" * 56 + "|")
print("| 1. Current Data\t\t\t\t\t |")
print("| 2. Summary Statistic \t\t\t\t |")
print("| 3. Raw time-series \t\t\t\t\t |")
print("| 4. Linear trend line \t\t\t\t |")
print("| 5. Moving Averages \t\t\t\t\t |")
print("| 6. Predict close price for a day \t\t\t |")
print("| 7. Enhance Prediction \t\t\t\t |")
print("| 8. Predict close price for N-future days\t\t |")
print("| 9. Compare 2 companies using candlestick chart\t |")
print("| 10. Analyse with new start and end date\t\t |")
print("| 11. Search New Company \t\t\t\t |")
print("| 12. Exit \t\t\t\t\t\t |")
print("\\" + "-" * 56 + "/")
def date_validation(start_date, end_date):
try:
#Check for format of start date, It should be in format of YYYY-MM-DD
datetime.datetime.strptime(start_date, "%Y-%m-%d")
except ValueError:
#If any errors, raise an exception
print("Incorrect Start Date Format")
return '1'
try:
#Check for format of start date, It should be in format of YYYY-MM-DD
datetime.datetime.strptime(end_date, "%Y-%m-%d")
except ValueError:
#If any errors, raise an exception
print("Incorrect End Date Format")
return '2'
try:
#Start Date cannot be later than today
if datetime.datetime.strptime(start_date, "%Y-%m-%d") >= dt.today():
raise ValueError
except ValueError:
#If any errors, raise an exception
print("Start Date cannot be greater than today's date")
return '3'
try:
#End date cannot be greater than today
if datetime.datetime.strptime(end_date, "%Y-%m-%d") >= dt.today():
raise ValueError
except ValueError:
#If any errors, raise an exception
print("End Date cannot be greater than today's date")
return '4'
try:
#Start date can not greater than end date
if datetime.datetime.strptime(start_date, "%Y-%m-%d") >= datetime.datetime.strptime(end_date, "%Y-%m-%d"):
raise ValueError
except ValueError:
print("Start Date should be less than End date")
return '5'
def period_validation(start_date, end_date, period):
try:
#Period should be greater than 0 and less than days between start date and end date
if period < (end_date - start_date).days and period > 0:
return False
else:
raise ValueError
except ValueError:
print('Incorrect value of Window')
return '1'
def current_data(company_symbol):
"""This API gives statistical data of the last working business day"""
last_stats_url = "https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol=" + company_symbol + "&apikey=T11CBFXU1UTRD2KG&datatype=csv"
last_stats = pd.read_csv(last_stats_url)
last_stat_transposed = last_stats.T
print(last_stat_transposed)
def norm_values_plot(norm_values, start_date, end_date, company_symbol):
"""Plotting of normalised values"""
fig = go.Figure(data=[go.Scatter(x=norm_values['Date'], y=norm_values['NormValues'], name="Normalised Prices")])
fig.update_layout(title=f"Normalised Prices of {company_symbol}", xaxis_title=f"Date range from {start_date} to {end_date}",
yaxis_title="Normalised Prices", showlegend=True, font=dict(size=18))
plot(fig, filename=f'{company_symbol}_normalised_graph.html')
def raw_time_series(required_data, company_symbol, start_date, end_date):
"""Plotting Closing Values of the company symbol selected"""
required_data.Date = pd.to_datetime(required_data['Date'])
fig = go.Figure(data=go.Scatter(x=required_data['Date'], y=required_data['Close'], name='Closing price'),
layout=go.Layout(title=go.layout.Title(text="Linear Raw Time Series", font=dict(size=18)),
yaxis=go.layout.YAxis(title=go.layout.yaxis.Title(text=f"Closing price of {company_symbol}", font=dict(size=18))),
xaxis=go.layout.XAxis(title=go.layout.xaxis.Title(text=f"Date range from {start_date} to {end_date}", font=dict(size=18)))))
fig.update_layout(showlegend=True)
plot(fig, filename=f'{company_symbol}_raw_time_graph.html')
def linear_trend_line(required_data, start_date, end_date, company_symbol):
"""Plotting linear trend line which indicates if the closing price at the end date
is higher or lower from the closing price during the start date"""
required_data.Date = pd.to_datetime(required_data['Date'])
required_data.reset_index(inplace=True, drop=True)
plt.plot(required_data['Close'], label='Closing Price')
plt.ylabel(f'Closing Price of {company_symbol}')
plt.xlabel(f'Date range from {start_date} to {end_date}')
plt.title(f'Linear Trend line of {company_symbol}', loc='center')
plt.tick_params(
axis='x', # changes apply to the x-axis
which='major', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False)
z = np.polyfit(required_data.index, required_data.Close, 1)
p = np.poly1d(z)
plb.plot(required_data.index, p(required_data.index), 'm--', label='Linear Trend Line')
plb.legend(loc='upper left')
plb.plot()
required_data[['ds', 'y']] = required_data[['Date', 'Close']]
df = required_data[['ds', 'y']]
m = Prophet(weekly_seasonality=False, yearly_seasonality=False, daily_seasonality=False, n_changepoints=15)
m.fit(df)
future = m.make_future_dataframe(periods=0)
forecast = m.predict(future)
m.plot_components(forecast)
plt.show()
#Create a Subplot showing moving average graphs
#https://plot.ly/python/subplots/
#https://www.learndatasci.com/tutorials/python-finance-part-3-moving-average-trading-strategy/
def moving_average_all(dataframe, window, start_date, end_date, company_symbol):
"""Calculate Simple Moving Average"""
"""System takes above data and uses rolling property"""
"""of dataframe and use plotly over timeseries index to plot the graph"""
subset_df = dataframe
subset_df = subset_df.sort_values(by='Date')
subset_df.drop_duplicates(inplace=True)
subset_df.set_index('Date', inplace=True)
window_df = subset_df.rolling(window=window).mean()
roller = (window + 1)
series = np.arange(1, roller)
WMASeriesData =
|
pd.DataFrame()
|
pandas.DataFrame
|
import datetime
from unittest import TestCase
import numpy as np
import pandas as pd
from mlnext import pipeline
class TestColumnSelector(TestCase):
def setUp(self):
data = np.arange(8).reshape(-1, 2)
cols = ['a', 'b']
self.df = pd.DataFrame(data, columns=cols)
def test_select_columns(self):
t = pipeline.ColumnSelector(keys=['a'])
expected = self.df.loc[:, ['a']]
result = t.fit_transform(self.df)
pd.testing.assert_frame_equal(result, expected)
class TestColumnDropper(TestCase):
def setUp(self):
data = np.arange(8).reshape(-1, 2)
cols = ['a', 'b']
self.df = pd.DataFrame(data, columns=cols)
def test_drop_columns(self):
t = pipeline.ColumnDropper(columns=['b'])
expected = self.df.loc[:, ['a']]
result = t.fit_transform(self.df)
pd.testing.assert_frame_equal(result, expected)
def test_drop_columns_verbose(self):
t = pipeline.ColumnDropper(columns=['b'], verbose=True)
expected = self.df.loc[:, ['a']]
result = t.transform(self.df)
pd.testing.assert_frame_equal(result, expected)
def test_drop__missing_columns(self):
t = pipeline.ColumnDropper(columns=['c'])
with self.assertWarns(Warning):
t.transform(self.df)
class TestColumnRename(TestCase):
def test_rename_columns(self):
t = pipeline.ColumnRename(lambda x: x.split('.')[-1])
df = pd.DataFrame(columns=['a.b.c', 'd.e.f'])
expected = pd.DataFrame(columns=['c', 'f'])
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
class TestNaDropper(TestCase):
def test_drop_na(self):
t = pipeline.NaDropper()
df = pd.DataFrame([1, 0, pd.NA])
expected = pd.DataFrame([1, 0], dtype=object)
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
class TestClip(TestCase):
def test_clip(self):
t = pipeline.Clip(lower=0.5, upper=1.5)
df = pd.DataFrame([[0.1, 0.4, 0.6, 0.8, 1.2, 1.5]])
expected = pd.DataFrame([[0.5, 0.5, 0.6, 0.8, 1.2, 1.5]])
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
class TestDatetimeTransformer(TestCase):
# FIXME: fails in gitlab pipeline but succeeds locally
def test_datetime(self):
t = pipeline.DatetimeTransformer(columns=['time'])
df = pd.DataFrame([['2021-01-04 14:12:31']], columns=['time'])
expected = pd.DataFrame([[datetime.datetime(2021, 1, 4, 14, 12, 31)]],
columns=['time'])
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
def test_datetime_missing_cols(self):
t = pipeline.DatetimeTransformer(columns=['t'])
df = pd.DataFrame([['2021-01-04 14:12:31']], columns=['time'])
with self.assertRaises(ValueError):
t.fit_transform(df)
class TestNumericTransformer(TestCase):
# FIXME: fails in gitlab pipeline but succeeds locally
def test_numeric(self):
t = pipeline.NumericTransformer(columns=['1'])
df = pd.DataFrame([0, 1], columns=['1'], dtype=object)
expected = pd.DataFrame([0, 1], columns=['1'], dtype=np.int64)
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
def test_numeric_missing_column(self):
t = pipeline.NumericTransformer(columns=['2'])
df = pd.DataFrame([0, 1], columns=['1'], dtype=object)
with self.assertRaises(ValueError):
t.fit_transform(df)
def test_numeric_additional_column(self):
t = pipeline.NumericTransformer(columns=['2'])
df = pd.DataFrame([[0, 1]], columns=['1', '2'], dtype=object)
expected = pd.DataFrame([[0, 1]], columns=['1', '2'], dtype=object)
expected['2'] = expected['2'].apply(pd.to_numeric)
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
def test_numeric_multiple_column(self):
t = pipeline.NumericTransformer(columns=['1', '2'])
df = pd.DataFrame([[0, 1]], columns=['1', '2'], dtype=object)
expected = pd.DataFrame([[0, 1]], columns=['1', '2'])
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
def test_numeric_all_column(self):
t = pipeline.NumericTransformer()
df = pd.DataFrame([[0, 1]], columns=['1', '2'], dtype=object)
expected = pd.DataFrame([[0, 1]], columns=['1', '2'])
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
class TestTimeframeExtractor(TestCase):
def setUp(self):
self.dates = [datetime.datetime(2021, 10, 1, 9, 50, 0),
datetime.datetime(2021, 10, 1, 10, 0, 0),
datetime.datetime(2021, 10, 1, 11, 0, 0),
datetime.datetime(2021, 10, 1, 12, 0, 0),
datetime.datetime(2021, 10, 1, 12, 10, 0)]
self.values = np.arange(len(self.dates))
self.df = pd.DataFrame(zip(self.dates, self.values),
columns=['time', 'value'])
def test_timeframe_extractor(self):
t = pipeline.TimeframeExtractor(
time_column='time', start_time=datetime.time(10, 0, 0),
end_time=datetime.time(12, 0, 0), verbose=True)
expected = pd.DataFrame(zip(self.dates[1:-1], np.arange(1, 4)),
columns=['time', 'value'])
result = t.fit_transform(self.df)
pd.testing.assert_frame_equal(result, expected)
def test_timeframe_extractor_invert(self):
t = pipeline.TimeframeExtractor(
time_column='time', start_time=datetime.time(10, 0, 0),
end_time=datetime.time(12, 0, 0), invert=True)
expected = pd.DataFrame(zip([self.dates[0], self.dates[-1]],
np.array([0, 4])),
columns=['time', 'value'])
result = t.fit_transform(self.df)
pd.testing.assert_frame_equal(result, expected)
class TestDateExtractor(TestCase):
def setUp(self):
self.dates = [datetime.datetime(2021, 10, 1, 9, 50, 0),
datetime.datetime(2021, 10, 2, 10, 0, 0),
datetime.datetime(2021, 10, 3, 11, 0, 0),
datetime.datetime(2021, 10, 4, 12, 0, 0),
datetime.datetime(2021, 10, 5, 12, 10, 0)]
self.values = np.arange(len(self.dates))
self.df = pd.DataFrame(zip(self.dates, self.values),
columns=['date', 'value'])
def test_date_extractor(self):
t = pipeline.DateExtractor(
date_column='date', start_date=datetime.date(2021, 10, 2),
end_date=datetime.date(2021, 10, 4), verbose=True)
expected = pd.DataFrame(zip(self.dates[1:-1], np.arange(1, 4)),
columns=['date', 'value'])
result = t.fit_transform(self.df)
pd.testing.assert_frame_equal(result, expected)
def test_date_extractor_invert(self):
t = pipeline.DateExtractor(
date_column='date', start_date=datetime.date(2021, 10, 2),
end_date=datetime.date(2021, 10, 4), invert=True)
expected = pd.DataFrame(zip([self.dates[0], self.dates[-1]],
np.array([0, 4])),
columns=['date', 'value'])
result = t.fit_transform(self.df)
pd.testing.assert_frame_equal(result, expected)
class TestValueMapper(TestCase):
def test_value_mapper_one_column(self):
t = pipeline.ValueMapper(columns=['b'], classes={2.0: 1.0})
df = pd.DataFrame(np.ones((3, 2)) * 2, columns=['a', 'b'])
expected = pd.DataFrame(zip(np.ones((3, 1)) * 2, np.ones((3, 1))),
columns=['a', 'b'], dtype=np.float64)
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
def test_value_mapper_all_columns(self):
t = pipeline.ValueMapper(columns=['a', 'b'], classes={2.0: 1.0})
df = pd.DataFrame(np.ones((3, 2)) * 2, columns=['a', 'b'])
expected = pd.DataFrame(np.ones((3, 2)), columns=['a', 'b'],
dtype=np.float64)
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
def test_value_mapper_missing_value(self):
t = pipeline.ValueMapper(columns=['a', 'b'], classes={2.0: 1.0})
df = pd.DataFrame(np.ones((3, 2)), columns=['a', 'b'])
expected = pd.DataFrame(np.ones((3, 2)), columns=['a', 'b'],
dtype=np.float64)
with self.assertWarns(Warning):
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
class TestSorter(TestCase):
def setUp(self):
self.df = pd.DataFrame({
'a': [2, 3, 1, 4],
'b': ['A', 'D', 'C', 'B']
})
def test_sorter(self):
t = pipeline.Sorter(columns=['a'])
result = t.fit_transform(self.df)
expected = self.df.copy().sort_values(by=['a'])
|
pd.testing.assert_frame_equal(result, expected)
|
pandas.testing.assert_frame_equal
|
import scrapy, re, pdb, pandas as pd, requests
from urllib.parse import urlparse
from bs4 import BeautifulSoup
from sqlalchemy import create_engine
from crawl_htmls.items import RaResult
from urllib.parse import urlparse
with open('psql_engine.txt') as f:
psql = create_engine(f.read())
unix_offset = (pd.to_datetime('now') - pd.DateOffset(days=30)).replace(hour=0, minute=0, second=0).value // 10**9
q = f'''
SELECT * FROM
(SELECT fbfeeds.link, fbfeeds.fbfeeds_id FROM fbfeeds
LEFT JOIN htmls ON (fbfeeds.link = htmls.link)
WHERE htmls.link ISNULL AND fbfeeds.unix_time > {unix_offset}
) AS fb
FULL OUTER JOIN
(SELECT rss.link, rss.rss_id FROM rss
LEFT JOIN htmls ON (rss.link = htmls.link)
WHERE htmls.link ISNULL AND rss.published_parsed > {unix_offset}
) AS r
ON (r.link = fb.link);
'''
raw = pd.read_sql(q, psql)
raw.columns = ['link', 'fbfeeds_id', 'link1', 'rss_id']
raw.link = raw.link.fillna(raw.link1)
del(raw['link1'])
raw = raw.sample(frac=1)
# raw = pd.read_sql(f'''
# select distinct link, fbfeeds_id, rss_id from htmls
# where (link ~~ '%%hromadske.ua%%' and is_other isnull)
# or link ~~ '%%alternatio.org%%';
# ''', psql)
class SitesCrawler(scrapy.Spider):
name = 'ra_htmls'
psql = psql
custom_settings = {
'CONCURRENT_ITEMS': 300,
'URLLENGTH_LIMIT': 10000,
}
def start_requests(self):
urls = raw.sample(frac=1).values
blocked = ['tvzvezda.ru', 'tk-union.tv', 'novorossiatv.com', 'ria.ru',
'sputniknews.com', 'rian.com.ua', 'news-front.info',
'armiyadnr.su', 'lug-info.com', 'dnr-live.ru', 'dnr-pravda.ru',
'republic-tv.ru', 'dan-news.info']
for link, fbfeeds_id, rss_id in urls:
domain = urlparse(link).netloc
if domain == 'hromadske.ua':
link = link.replace('hromadske.ua', 'ru.hromadske.ua')
if domain in blocked:
yield scrapy.Request(link, callback=self.parse,
meta={'link':link, 'fbfeeds_id': fbfeeds_id, 'rss_id': rss_id, 'proxy': 'http://localhost:8123',})
else:
yield scrapy.Request(link, callback=self.parse,
meta={'link':link, 'fbfeeds_id': fbfeeds_id, 'rss_id': rss_id,})
def parse(self, r):
link = r.meta['link']
domain = urlparse(link).netloc
page_links = filter(lambda a: 'http' in a,
r.xpath('//a/@href').extract())
page_links = map(lambda a: urlparse(a).netloc.replace('www.', ' '), page_links)
page_links = list(set(filter(lambda a: a != domain, page_links)))
if 'alternatio.org' in link or 'hromadske.ua' in link:
soup = BeautifulSoup(r.body.decode('utf8', 'ignore'), 'lxml')
else:
soup = BeautifulSoup(r.body, 'lxml')
[t.extract() for t in soup.find_all()
if len(t.text.strip()) == 0
or t.name in ['img', 'iframe', 'script', 'link', 'footer', 'meta']
or re.search('nav|p[io]dval|footer|copyright|re[ck]lam', str(list(t.attrs.values())))
]
try:
d = requests.post('http://localhost:3000',
data={'raw_html': str(soup)}
).json()
assert len(d['title']) > 0
except Exception as err:
self.logger.info(f'Failed {r.url}')
pass
item = RaResult()
item['link'] = r.meta['link']
item['real_url'] = r.url
item['ra_title'] = d['title']
item['ra_summary'] = re.sub('\s+', ' ', d['content'])
item['rss_id'] = r.meta['rss_id'] if pd.notnull(r.meta['rss_id']) else 'null'
item['fbfeeds_id'] = r.meta['fbfeeds_id'] if
|
pd.notnull(r.meta['fbfeeds_id'])
|
pandas.notnull
|
from __future__ import annotations
import numpy as np
import pandas as pd
from sklearn import datasets
from IMLearn.metrics import mean_square_error
from IMLearn.utils import split_train_test
from IMLearn.model_selection import cross_validate
from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression
from sklearn.linear_model import Lasso
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
def select_polynomial_degree(n_samples: int = 100, noise: float = 5):
"""
Simulate data from a polynomial model and use cross-validation to select the best fitting degree
Parameters
----------
n_samples: int, default=100
Number of samples to generate
noise: float, default = 5
Noise level to simulate in responses
"""
# Question 1 - Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps Gaussian noise
# and split into training- and testing portions
func = lambda x: (x + 3) * (x + 2) * (x + 1) * (x - 1) * (x - 2)
noise = np.random.normal(0, noise, n_samples)
X = np.linspace(-1.2, 2, n_samples)
y = func(X) + noise
X_train, y_train, X_test, y_test = split_train_test(pd.DataFrame(X),
|
pd.Series(y)
|
pandas.Series
|
import os
os.chdir('seqFISH_AllenVISp/')
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('qt5agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import matplotlib.pyplot as plt
import scipy.stats as st
import pickle
### Original data
with open ('data/SpaGE_pkl/seqFISH_Cortex.pkl', 'rb') as f:
datadict = pickle.load(f)
seqFISH_data = datadict['seqFISH_data']
del datadict
test_set = pd.read_csv('Results/10SpaGE_New_genes.csv',header=0,index_col=0,sep=',').columns
seqFISH_data = seqFISH_data[test_set]
### SpaGE
#10
SpaGE_imputed_10 = pd.read_csv('Results/10SpaGE_New_genes.csv',header=0,index_col=0,sep=',')
SpaGE_Corr_10 = pd.Series(index = test_set)
for i in test_set:
SpaGE_Corr_10[i] = st.spearmanr(seqFISH_data[i],SpaGE_imputed_10[i])[0]
#30
SpaGE_imputed_30 = pd.read_csv('Results/30SpaGE_New_genes.csv',header=0,index_col=0,sep=',')
SpaGE_Corr_30 = pd.Series(index = test_set)
for i in test_set:
SpaGE_Corr_30[i] = st.spearmanr(seqFISH_data[i],SpaGE_imputed_30[i])[0]
#50
SpaGE_imputed_50 = pd.read_csv('Results/50SpaGE_New_genes.csv',header=0,index_col=0,sep=',')
SpaGE_Corr_50 = pd.Series(index = test_set)
for i in test_set:
SpaGE_Corr_50[i] = st.spearmanr(seqFISH_data[i],SpaGE_imputed_50[i])[0]
#100
SpaGE_imputed_100 =
|
pd.read_csv('Results/100SpaGE_New_genes.csv',header=0,index_col=0,sep=',')
|
pandas.read_csv
|
import pytest
import pandas as pd
import numpy as np
import itertools
from vivarium.interpolation import Interpolation, validate_parameters, check_data_complete, Order0Interp
def make_bin_edges(data: pd.DataFrame, col: str) -> pd.DataFrame:
""" Given a dataframe and a column containing midpoints, construct
equally sized bins around midpoints.
"""
mid_pts = data[[col]].drop_duplicates().sort_values(by=col).reset_index(drop=True)
mid_pts['shift'] = mid_pts[col].shift()
mid_pts['left'] = mid_pts.apply(lambda row: (row[col] if pd.isna(row['shift'])
else 0.5 * (row[col] + row['shift'])), axis=1)
mid_pts['right'] = mid_pts['left'].shift(-1)
mid_pts['right'] = mid_pts.right.fillna(mid_pts.right.max() + mid_pts.left.tolist()[-1] - mid_pts.left.tolist()[-2])
data = data.copy()
idx = data.index
data = data.set_index(col, drop=False)
mid_pts = mid_pts.set_index(col, drop=False)
data[[col, f'{col}_left', f'{col}_right']] = mid_pts[[col, 'left', 'right']]
return data.set_index(idx)
@pytest.mark.skip(reason="only order 0 interpolation currently supported")
def test_1d_interpolation():
df = pd.DataFrame({'a': np.arange(100), 'b': np.arange(100), 'c': np.arange(100, 0, -1)})
df = df.sample(frac=1) # Shuffle table to assure interpolation works given unsorted input
i = Interpolation(df, (), ('a',), 1, True)
query = pd.DataFrame({'a': np.arange(100, step=0.01)})
assert np.allclose(query.a, i(query).b)
assert np.allclose(100-query.a, i(query).c)
@pytest.mark.skip(reason="only order 0 interpolation currently supported")
def test_age_year_interpolation():
years = list(range(1990, 2010))
ages = list(range(0, 90))
pops = np.array(ages)*11.1
data = []
for age, pop in zip(ages, pops):
for year in years:
for sex in ['Male', 'Female']:
data.append({'age': age, 'sex': sex, 'year': year, 'pop': pop})
df = pd.DataFrame(data)
df = df.sample(frac=1) # Shuffle table to assure interpolation works given unsorted input
i = Interpolation(df, ('sex', 'age'), ('year',), 1, True)
query = pd.DataFrame({'year': [1990, 1990], 'age': [35, 35], 'sex': ['Male', 'Female']})
assert np.allclose(i(query), 388.5)
@pytest.mark.skip(reason="only order 0 interpolation currently supported")
def test_interpolation_called_missing_key_col():
a = [range(1990, 1995), range(25, 30), ['Male', 'Female']]
df = pd.DataFrame(list(itertools.product(*a)), columns=['year', 'age', 'sex'])
df['pop'] = df.age * 11.1
df = df.sample(frac=1) # Shuffle table to assure interpolation works given unsorted input
i = Interpolation(df, ['sex',], ['year','age'], 1, True)
query = pd.DataFrame({'year': [1990, 1990], 'age': [35, 35]})
with pytest.raises(ValueError):
i(query)
@pytest.mark.skip(reason="only order 0 interpolation currently supported")
def test_interpolation_called_missing_param_col():
a = [range(1990, 1995), range(25, 30), ['Male', 'Female']]
df = pd.DataFrame(list(itertools.product(*a)), columns=['year', 'age', 'sex'])
df['pop'] = df.age * 11.1
df = df.sample(frac=1) # Shuffle table to assure interpolation works given unsorted input
i = Interpolation(df, ['sex',], ['year','age'], 1, True)
query = pd.DataFrame({'year': [1990, 1990], 'sex': ['Male', 'Female']})
with pytest.raises(ValueError):
i(query)
@pytest.mark.skip(reason="only order 0 interpolation currently supported")
def test_2d_interpolation():
a = np.mgrid[0:5, 0:5][0].reshape(25)
b = np.mgrid[0:5, 0:5][1].reshape(25)
df = pd.DataFrame({'a': a, 'b': b, 'c': b, 'd': a})
df = df.sample(frac=1) # Shuffle table to assure interpolation works given unsorted input
i = Interpolation(df, (), ('a', 'b'), 1, True)
query = pd.DataFrame({'a': np.arange(4, step=0.01), 'b': np.arange(4, step=0.01)})
assert np.allclose(query.b, i(query).c)
assert np.allclose(query.a, i(query).d)
@pytest.mark.skip(reason="only order 0 interpolation currently supported")
def test_interpolation_with_categorical_parameters():
a = ['one']*100 + ['two']*100
b = np.append(np.arange(100), np.arange(100))
c = np.append(np.arange(100), np.arange(100, 0, -1))
df = pd.DataFrame({'a': a, 'b': b, 'c': c})
df = df.sample(frac=1) # Shuffle table to assure interpolation works given unsorted input
i = Interpolation(df, ('a',), ('b',), 1, True)
query_one = pd.DataFrame({'a': 'one', 'b': np.arange(100, step=0.01)})
query_two = pd.DataFrame({'a': 'two', 'b': np.arange(100, step=0.01)})
assert np.allclose(np.arange(100, step=0.01), i(query_one).c)
assert np.allclose(np.arange(100, 0, step=-0.01), i(query_two).c)
def test_order_zero_2d():
a = np.mgrid[0:5, 0:5][0].reshape(25)
b = np.mgrid[0:5, 0:5][1].reshape(25)
df = pd.DataFrame({'a': a + 0.5, 'b': b + 0.5, 'c': b*3, 'garbage': ['test']*len(a)})
df = make_bin_edges(df, 'a')
df = make_bin_edges(df, 'b')
df = df.sample(frac=1) # Shuffle table to assure interpolation works given unsorted input
i = Interpolation(df, ('garbage',), [('a', 'a_left', 'a_right'), ('b', 'b_left', 'b_right')],
order=0, extrapolate=True, validate=True)
column = np.arange(0.5, 4, step=0.011)
query = pd.DataFrame({'a': column, 'b': column, 'garbage': ['test']*(len(column))})
assert np.allclose(query.b.astype(int) * 3, i(query).c)
def test_order_zero_2d_fails_on_extrapolation():
a = np.mgrid[0:5, 0:5][0].reshape(25)
b = np.mgrid[0:5, 0:5][1].reshape(25)
df = pd.DataFrame({'a': a + 0.5, 'b': b + 0.5, 'c': b*3, 'garbage': ['test']*len(a)})
df = make_bin_edges(df, 'a')
df = make_bin_edges(df, 'b')
df = df.sample(frac=1) # Shuffle table to assure interpolation works given unsorted input
i = Interpolation(df, ('garbage',), [('a', 'a_left', 'a_right'), ('b', 'b_left', 'b_right')],
order=0, extrapolate=False, validate=True)
column = np.arange(4, step=0.011)
query = pd.DataFrame({'a': column, 'b': column, 'garbage': ['test']*(len(column))})
with pytest.raises(ValueError) as error:
i(query)
message = error.value.args[0]
assert 'Extrapolation' in message and 'a' in message
def test_order_zero_1d_no_extrapolation():
s = pd.Series({0: 0, 1: 1}).reset_index()
s = make_bin_edges(s, 'index')
f = Interpolation(s, tuple(), [['index', 'index_left', 'index_right']], order=0, extrapolate=False,
validate=True)
assert f(pd.DataFrame({'index': [0]}))[0][0] == 0, 'should be precise at index values'
assert f(pd.DataFrame({'index': [0.999]}))[0][0] == 1
with pytest.raises(ValueError) as error:
f(pd.DataFrame({'index': [1]}))
message = error.value.args[0]
assert 'Extrapolation' in message and 'index' in message
def test_order_zero_1d_constant_extrapolation():
s = pd.Series({0: 0, 1: 1}).reset_index()
s = make_bin_edges(s, 'index')
f = Interpolation(s, tuple(), [['index', 'index_left', 'index_right']], order=0, extrapolate=True,
validate=True)
assert f(pd.DataFrame({'index': [1]}))[0][0] == 1
assert f(pd.DataFrame({'index': [2]}))[0][0] == 1, 'should be constant extrapolation outside of input range'
assert f(pd.DataFrame({'index': [-1]}))[0][0] == 0
def test_validate_parameters__empty_data():
with pytest.raises(ValueError) as error:
validate_parameters(pd.DataFrame(columns=["age_left", "age_right",
"sex", "year_left", "year_right", "value"]), ["sex"],
[("age", "age_left", "age_right"),
["year", "year_left", "year_right"]])
message = error.value.args[0]
assert 'empty' in message
def test_check_data_complete_gaps():
data = pd.DataFrame({'year_start': [1990, 1990, 1995, 1995],
'year_end': [1995, 1995, 2000, 2000],
'age_start': [16, 10, 10, 16],
'age_end': [20, 15, 15, 20],})
with pytest.raises(NotImplementedError) as error:
check_data_complete(data, [('year', 'year_start', 'year_end'), ['age', 'age_start', 'age_end']])
message = error.value.args[0]
assert "age_start" in message and "age_end" in message
def test_check_data_complete_overlap():
data = pd.DataFrame({'year_start': [1995, 1995, 2000, 2005, 2010],
'year_end': [2000, 2000, 2005, 2010, 2015]})
with pytest.raises(ValueError) as error:
check_data_complete(data, [('year', 'year_start', 'year_end')])
message = error.value.args[0]
assert "year_start" in message and "year_end" in message
def test_check_data_missing_combos():
data = pd.DataFrame({'year_start': [1990, 1990, 1995],
'year_end': [1995, 1995, 2000],
'age_start': [10, 15, 10],
'age_end': [15, 20, 15]})
with pytest.raises(ValueError) as error:
check_data_complete(data, [['year', 'year_start', 'year_end'], ('age', 'age_start', 'age_end')])
message = error.value.args[0]
assert 'combination' in message
def test_order0interp():
data = pd.DataFrame({'year_start': [1990, 1990, 1990, 1990, 1995, 1995, 1995, 1995],
'year_end': [1995, 1995, 1995, 1995, 2000, 2000, 2000, 2000],
'age_start': [15, 10, 10, 15, 10, 10, 15, 15],
'age_end': [20, 15, 15, 20, 15, 15, 20, 20],
'height_start': [140, 160, 140, 160, 140, 160, 140, 160],
'height_end': [160, 180, 160, 180, 160, 180, 160, 180],
'value': [5, 3, 1, 7, 8, 6, 4, 2]})
interp = Order0Interp(data, [('age', 'age_start', 'age_end'),
('year', 'year_start', 'year_end'),
('height', 'height_start', 'height_end'),]
, ['value'], True, True)
interpolants = pd.DataFrame({'age': [12, 17, 8, 24, 12],
'year': [1992, 1998, 1985, 1992, 1992],
'height': [160, 145, 140, 179, 160]})
result = interp(interpolants)
assert result.equals(pd.DataFrame({'value': [3, 4, 1, 7, 3]}))
def test_order_zero_1d_with_key_column():
data = pd.DataFrame({'year_start': [1990, 1990, 1995, 1995],
'year_end': [1995, 1995, 2000, 2000],
'sex': ['Male', 'Female', 'Male', 'Female'],
'value_1': [10, 7, 2, 12],
'value_2': [1200, 1350, 1476, 1046]})
i = Interpolation(data, ['sex',], [('year', 'year_start', 'year_end'),], 0, True, True)
query = pd.DataFrame({'year': [1992, 1993,],
'sex': ['Male', 'Female']})
expected_result = pd.DataFrame({'value_1': [10.0, 7.0],
'value_2': [1200.0, 1350.0]})
assert i(query).equals(expected_result)
def test_order_zero_non_numeric_values():
data = pd.DataFrame({'year_start': [1990, 1990],
'year_end': [1995, 1995],
'age_start': [15, 24,],
'age_end': [24, 30],
'value_1': ['blue', 'red']})
i = Interpolation(data, tuple(), [('year', 'year_start', 'year_end'), ('age', 'age_start', 'age_end')], 0,
True, True)
query = pd.DataFrame({'year': [1990, 1990],
'age': [15, 24,]},
index=[1, 0])
expected_result = pd.DataFrame({'value_1': ['blue', 'red']},
index=[1, 0])
assert i(query).equals(expected_result)
def test_order_zero_3d_with_key_col():
data = pd.DataFrame({'year_start': [1990, 1990, 1990, 1990, 1995, 1995, 1995, 1995]*2,
'year_end': [1995, 1995, 1995, 1995, 2000, 2000, 2000, 2000]*2,
'age_start': [15, 10, 10, 15, 10, 10, 15, 15]*2,
'age_end': [20, 15, 15, 20, 15, 15, 20, 20]*2,
'height_start': [140, 160, 140, 160, 140, 160, 140, 160]*2,
'height_end': [160, 180, 160, 180, 160, 180, 160, 180]*2,
'sex': ['Male']*8+['Female']*8,
'value': [5, 3, 1, 7, 8, 6, 4, 2, 6, 4, 2, 8, 9, 7, 5, 3]})
interp = Interpolation(data, ('sex',),
[('age', 'age_start', 'age_end'),
('year', 'year_start', 'year_end'),
('height', 'height_start', 'height_end')], 0, True, True)
interpolants = pd.DataFrame({'age': [12, 17, 8, 24, 12],
'year': [1992, 1998, 1985, 1992, 1992],
'height': [160, 145, 140, 185, 160],
'sex': ['Male', 'Female', 'Female', 'Male', 'Male']},
index=[10, 4, 7, 0, 9])
result = interp(interpolants)
assert result.equals(pd.DataFrame({'value': [3.0, 5.0, 2.0, 7.0, 3.0]}, index=[10, 4, 7, 0, 9]))
def test_order_zero_diff_bin_sizes():
data = pd.DataFrame({'year_start': [1990, 1995, 1996, 2005, 2005.5,],
'year_end': [1995, 1996, 2005, 2005.5, 2010],
'value': [1, 5, 2.3, 6, 100]})
i = Interpolation(data, tuple(), [('year', 'year_start', 'year_end')], 0, False, True)
query = pd.DataFrame({'year': [2007, 1990, 2005.4, 1994, 2004, 1995, 2002, 1995.5, 1996]})
expected_result = pd.DataFrame({'value': [100, 1, 6, 1, 2.3, 5, 2.3, 5, 2.3]})
assert i(query).equals(expected_result)
def test_order_zero_given_call_column():
data = pd.DataFrame({'year_start': [1990, 1995, 1996, 2005, 2005.5,],
'year_end': [1995, 1996, 2005, 2005.5, 2010],
'year': [1992.5, 1995.5, 2000, 2005.25, 2007.75],
'value': [1, 5, 2.3, 6, 100]})
i = Interpolation(data, tuple(), [('year', 'year_start', 'year_end')], 0, False, True)
query = pd.DataFrame({'year': [2007, 1990, 2005.4, 1994, 2004, 1995, 2002, 1995.5, 1996]})
expected_result = pd.DataFrame({'value': [100, 1, 6, 1, 2.3, 5, 2.3, 5, 2.3]})
assert i(query).equals(expected_result)
@pytest.mark.parametrize('validate', [True, False])
def test_interpolation_init_validate_option_invalid_data(validate):
if validate:
with pytest.raises(ValueError, match='You must supply non-empty data to create the interpolation.'):
i = Interpolation(
|
pd.DataFrame()
|
pandas.DataFrame
|
import typing
import os
import time
import sys
import warnings
import traceback
import shlex
import tempfile
from subprocess import CalledProcessError
from .backends import AbstractSlurmBackend, LocalSlurmBackend
from .utils import check_call
from .orchestrator import Orchestrator, BACKENDS, version
import yaml
import pandas as pd
from agutil import status_bar
class Xargs(Orchestrator):
"""
Simplified Orchestrator
Built to work on a local slurm cluster.
No localization is performed. Job-specific commands are dumped to a staging
directory, and jobs are dispatched. Outputs are not gathered or delocalized,
and the only cleanup performed removes the temporary staging dir
"""
def __init__(
self, command: str, inputs: typing.Dict[str, typing.Any],
backend: typing.Union[AbstractSlurmBackend, typing.Dict[str, typing.Any]],
name: typing.Optional[str] = None,
cwd: typing.Optional[str] = None,
resources: typing.Optional[typing.Dict[str, typing.Any]] = None
):
if isinstance(backend, AbstractSlurmBackend):
self.backend = backend
self._slurmconf_path = None
else:
self.backend = BACKENDS[backend['type']](**backend)
self._slurmconf_path = backend['slurm_conf_path'] if 'slurm_conf_path' in backend else None
if not isinstance(self.backend, LocalSlurmBackend):
raise TypeError("Xargs only works on local-based slurm backends")
self.resources = {} if resources is None else resources
self.inputs = inputs
self.cwd = cwd
self.n_jobs = 0 if len(inputs) == 0 else len(inputs['canine_arg0'])
self.name = name if name is not None else 'Canine-Xargs'
self.command = command
def run_pipeline(self, dry_run: bool = False) -> pd.DataFrame:
warnings.warn(
"The Xargs orchestrator is officially deprecated due to lack of use or support. It will be removed soon",
DeprecationWarning
)
print("Preparing a pipeline of", self.n_jobs, "jobs")
print("Connecting to backend...")
start_time = time.monotonic()
with self.backend:
print("Initializing pipeline workspace")
with tempfile.TemporaryDirectory(dir=self.cwd) as tempdir:
for jid in range(self.n_jobs):
# By creating a local tempdir
# but using the backend to write the script
# we ensure that this is a local-based backend
self.backend.pack_batch_script(
*(
'export {}={}'.format(
argname,
shlex.quote(argvalues[jid])
)
for argname, argvalues in self.inputs.items()
),
script_path=os.path.join(
tempdir,
'{}.sh'.format(jid)
)
)
print("Job staged on SLURM controller in:", tempdir)
print("Preparing pipeline script")
self.backend.pack_batch_script(
'export CANINE={}'.format(version),
'export CANINE_BACKEND='.format(type(self.backend)),
'export CANINE_ADAPTER=Xargs',
'export CANINE_ROOT={}'.format(tempdir),
'export CANINE_COMMON=""',
'export CANINE_OUTPUT=""',
'export CANINE_JOBS={}'.format(self.cwd),
'source $CANINE_ROOT/$SLURM_ARRAY_TASK_ID.sh',
self.command,
script_path=os.path.join(
tempdir,
'entrypoint.sh'
)
)
if dry_run:
return
print("Waiting for cluster to finish startup...")
self.backend.wait_for_cluster_ready()
if self.backend.hard_reset_on_orch_init and self._slurmconf_path:
active_jobs = self.backend.squeue('all')
if len(active_jobs):
print("There are active jobs. Skipping slurmctld restart")
else:
try:
print("Stopping slurmctld")
rc, stdout, stderr = self.backend.invoke(
'sudo pkill slurmctld',
True
)
check_call('sudo pkill slurmctld', rc, stdout, stderr)
print("Loading configurations", self._slurmconf_path)
rc, stdout, stderr = self.backend.invoke(
'sudo slurmctld -c -f {}'.format(self._slurmconf_path),
True
)
check_call('sudo slurmctld -c -f {}'.format(self._slurmconf_path), rc, stdout, stderr)
print("Restarting slurmctl")
rc, stdout, stderr = self.backend.invoke(
'sudo slurmctld reconfigure',
True
)
check_call('sudo slurmctld reconfigure', rc, stdout, stderr)
except CalledProcessError:
traceback.print_exc()
print("Slurmctld restart failed")
print("Submitting batch job")
batch_id = self.backend.sbatch(
os.path.join(
tempdir,
'entrypoint.sh'
),
**{
'chdir': '~' if self.cwd is None else self.cwd,
'requeue': True,
'job_name': self.name,
'array': "0-{}".format(self.n_jobs-1),
'output': "{}/%A.%a.stdout".format('~' if self.cwd is None else self.cwd),
'error': "{}/%A.%a.stderr".format('~' if self.cwd is None else self.cwd),
**self.resources
}
)
print("Batch id:", batch_id)
completed_jobs = []
cpu_time = {}
uptime = {}
prev_acct = None
try:
waiting_jobs = {
'{}_{}'.format(batch_id, i)
for i in range(self.n_jobs)
}
while len(waiting_jobs):
time.sleep(30)
acct = self.backend.sacct(job=batch_id, format="JobId,State,ExitCode,CPUTimeRAW").astype({'CPUTimeRAW': int})
for jid in [*waiting_jobs]:
if jid in acct.index:
if prev_acct is not None and jid in prev_acct.index and prev_acct['CPUTimeRAW'][jid] > acct['CPUTimeRAW'][jid]:
# Job has restarted since last update:
if jid in cpu_time:
cpu_time[jid] += prev_acct['CPUTimeRAW'][jid]
else:
cpu_time[jid] = prev_acct['CPUTimeRAW'][jid]
if acct['State'][jid] not in {'RUNNING', 'PENDING', 'NODE_FAIL'}:
job = jid.split('_')[1]
print("Job",job, "completed with status", acct['State'][jid], acct['ExitCode'][jid].split(':')[0])
completed_jobs.append((job, jid))
waiting_jobs.remove(jid)
for node in {node for node in self.backend.squeue(jobs=batch_id)['NODELIST(REASON)'] if not node.startswith('(')}:
if node in uptime:
uptime[node] += 1
else:
uptime[node] = 1
if prev_acct is None:
prev_acct = acct
else:
prev_acct = pd.concat([
acct,
prev_acct.loc[[idx for idx in prev_acct.index if idx not in acct.index]]
])
except:
print("Encountered unhandled exception. Cancelling batch job", file=sys.stderr)
self.backend.scancel(batch_id)
raise
runtime = time.monotonic() - start_time
job_spec = {
str(i): {
argname: argvalues[i]
for argname, argvalues in self.inputs.items()
}
for i in range(self.n_jobs)
}
df = pd.DataFrame(
data={
job_id: {
'slurm_state': acct['State'][batch_id+'_'+job_id],
'exit_code': acct['ExitCode'][batch_id+'_'+job_id],
'cpu_hours': (prev_acct['CPUTimeRAW'][batch_id+'_'+job_id] + (
cpu_time[batch_id+'_'+job_id] if batch_id+'_'+job_id in cpu_time else 0
))/3600,
**job_spec[job_id],
}
for job_id in job_spec
}
).T.set_index(
|
pd.Index([*job_spec], name='job_id')
|
pandas.Index
|
import os
import re
import shlex
import numpy as np
import pandas as pd
from scipy.io import mmread, mmwrite
from scipy.sparse import csr_matrix
import tempfile
import subprocess
from typing import List, Dict, Tuple, Union
import logging
logger = logging.getLogger(__name__)
from pegasusio import UnimodalData, CITESeqData, MultimodalData
def _enumerate_files(path: str, parts: List[str], repl_list1: List[str], repl_list2: List[str] = None) -> str:
""" Enumerate all possible file names """
if len(parts) <= 2:
for token in repl_list1:
parts[-1] = token
candidate = os.path.join(path, ''.join(parts))
if os.path.isfile(candidate):
return candidate
else:
assert len(parts) == 4
for p2 in repl_list1:
parts[1] = p2
for p4 in repl_list2:
parts[3] = p4
candidate = os.path.join(path, ''.join(parts))
if os.path.isfile(candidate):
return candidate
return None
def _locate_barcode_and_feature_files(path: str, fname: str) -> Tuple[str, str]:
""" Locate barcode and feature files (with path) based on mtx file name (no suffix)
"""
barcode_file = feature_file = None
if fname == "matrix":
barcode_file = _enumerate_files(path, [''], ["cells.tsv.gz", "cells.tsv", "barcodes.tsv.gz", "barcodes.tsv"])
feature_file = _enumerate_files(path, [''], ["genes.tsv.gz", "genes.tsv", "features.tsv.gz", "features.tsv"])
else:
p1, p2, p3 = fname.partition("matrix")
if p2 == '' and p3 == '':
barcode_file = _enumerate_files(path, [p1, ''], [".barcodes.tsv.gz", ".barcodes.tsv", ".cells.tsv.gz", ".cells.tsv", "_barcode.tsv", ".barcodes.txt"])
feature_file = _enumerate_files(path, [p1, ''], [".genes.tsv.gz", ".genes.tsv", ".features.tsv.gz", ".features.tsv", "_gene.tsv", ".genes.txt"])
else:
barcode_file = _enumerate_files(path, [p1, '', p3, ''], ["barcodes", "cells"], [".tsv.gz", ".tsv"])
feature_file = _enumerate_files(path, [p1, '', p3, ''], ["genes", "features"], [".tsv.gz", ".tsv"])
if barcode_file is None:
raise ValueError("Cannot find barcode file!")
if feature_file is None:
raise ValueError("Cannot find feature file!")
return barcode_file, feature_file
def _load_barcode_metadata(barcode_file: str, sep: str = "\t") -> Tuple[pd.DataFrame, str]:
""" Load cell barcode information """
format_type = None
barcode_metadata = pd.read_csv(barcode_file, sep=sep, header=None)
if "cellkey" in barcode_metadata.iloc[0].values:
# HCA DCP format
barcode_metadata = pd.DataFrame(data = barcode_metadata.iloc[1:].values, columns = barcode_metadata.iloc[0].values)
barcode_metadata.rename(columns={"cellkey": "barcodekey"}, inplace=True)
format_type = "HCA DCP"
elif "barcodekey" in barcode_metadata.iloc[0].values:
# Pegasus format
barcode_metadata =
|
pd.DataFrame(data = barcode_metadata.iloc[1:].values, columns = barcode_metadata.iloc[0].values)
|
pandas.DataFrame
|
"Functions specific to GenX outputs"
from itertools import product
import logging
from pathlib import Path
from typing import Dict
import pandas as pd
from powergenome.external_data import (
load_policy_scenarios,
load_demand_segments,
load_user_genx_settings,
)
from powergenome.load_profiles import make_distributed_gen_profiles
from powergenome.time_reduction import kmeans_time_clustering
from powergenome.util import load_settings
from powergenome.nrelatb import investment_cost_calculator
logger = logging.getLogger(__name__)
INT_COLS = [
"Inv_cost_per_MWyr",
"Fixed_OM_cost_per_MWyr",
"Inv_cost_per_MWhyr",
"Fixed_OM_cost_per_MWhyr",
"Line_Reinforcement_Cost_per_MW_yr",
]
COL_ROUND_VALUES = {
"Var_OM_cost_per_MWh": 2,
"Var_OM_cost_per_MWh_in": 2,
"Start_cost_per_MW": 0,
"Cost_per_MMBtu": 2,
"CO2_content_tons_per_MMBtu": 5,
"Cap_size": 2,
"Heat_rate_MMBTU_per_MWh": 2,
"distance_mile": 4,
"Line_Max_Reinforcement_MW": 0,
"distance_miles": 1,
"distance_km": 1,
}
def add_emission_policies(transmission_df, settings, DistrZones=None):
"""Add emission policies to the transmission dataframe
Parameters
----------
transmission_df : DataFrame
Zone to zone transmission constraints
settings : dict
User-defined parameters from a settings file. Should have keys of `input_folder`
(a Path object of where to find user-supplied data) and
`emission_policies_fn` (the file to load).
DistrZones : [type], optional
Placeholder setting, by default None
Returns
-------
DataFrame
The emission policies provided by user next to the transmission constraints.
"""
model_year = settings["model_year"]
case_id = settings["case_id"]
policies = load_policy_scenarios(settings)
year_case_policy = policies.loc[(case_id, model_year), :]
# Bug where multiple regions for a case will return this as a df, even if the policy
# for this case applies to all regions (code below expects a Series)
ycp_shape = year_case_policy.shape
if ycp_shape[0] == 1 and len(ycp_shape) > 1:
year_case_policy = year_case_policy.squeeze() # convert to series
zones = settings["model_regions"]
zone_num_map = {
zone: f"z{number + 1}" for zone, number in zip(zones, range(len(zones)))
}
zone_cols = ["Region_description", "Network_zones", "Distr_Zones"] + list(
policies.columns
)
zone_df = pd.DataFrame(columns=zone_cols)
zone_df["Region_description"] = zones
zone_df["Network_zones"] = zone_df["Region_description"].map(zone_num_map)
if DistrZones is None:
zone_df["Distr_Zones"] = 0
# Add code here to make DistrZones something else!
# If there is only one region, assume that the policy is applied across all regions.
if isinstance(year_case_policy, pd.Series):
logger.info(
"Only one zone was found in the emissions policy file."
" The same emission policies are being applied to all zones."
)
for col, value in year_case_policy.iteritems():
if col == "CO_2_Max_Mtons":
zone_df.loc[:, col] = 0
if value > 0:
zone_df.loc[0, col] = value
else:
zone_df.loc[:, col] = value
else:
for region, col in product(
year_case_policy["region"].unique(), year_case_policy.columns
):
zone_df.loc[
zone_df["Region_description"] == region, col
] = year_case_policy.loc[year_case_policy.region == region, col].values[0]
zone_df = zone_df.drop(columns="region")
network_df = pd.concat([zone_df, transmission_df], axis=1)
return network_df
def add_misc_gen_values(gen_clusters, settings):
path = Path(settings["input_folder"]) / settings["misc_gen_inputs_fn"]
misc_values = pd.read_csv(path)
misc_values = misc_values.fillna("skip")
for resource in misc_values["Resource"].unique():
# resource_misc_values = misc_values.loc[misc_values["Resource"] == resource, :].dropna()
for col in misc_values.columns:
if col == "Resource":
continue
value = misc_values.loc[misc_values["Resource"] == resource, col].values[0]
if value != "skip":
gen_clusters.loc[
gen_clusters["Resource"].str.contains(resource, case=False), col
] = value
return gen_clusters
def make_genx_settings_file(pudl_engine, settings, calculated_ces=None):
"""Make a copy of the GenX settings file for a specific case.
This function tries to make some intellegent choices about parameter values like
the RPS/CES type and can also read values from a file.
There should be a base-level GenX settings file with parameters like the solver and
solver-specific settings that stay constant across all cases.
Parameters
----------
pudl_engine : sqlalchemy.Engine
A sqlalchemy connection for use by pandas to access IPM load profiles. These
load profiles are needed when DG is calculated as a fraction of load.
settings : dict
User-defined parameters from a settings file. Should have keys of `model_year`
`case_id`, 'case_name', `input_folder` (a Path object of where to find
user-supplied data), `emission_policies_fn`, 'distributed_gen_profiles_fn'
(the files to load in other functions), and 'genx_settings_fn'.
Returns
-------
dict
Dictionary of settings for a GenX run
"""
model_year = settings["model_year"]
case_id = settings["case_id"]
case_name = settings["case_name"]
genx_settings = load_settings(settings["genx_settings_fn"])
policies = load_policy_scenarios(settings)
year_case_policy = policies.loc[(case_id, model_year), :]
# Bug where multiple regions for a case will return this as a df, even if the policy
# for this case applies to all regions (code below expects a Series)
ycp_shape = year_case_policy.shape
if ycp_shape[0] == 1 and len(ycp_shape) > 1:
year_case_policy = year_case_policy.squeeze() # convert to series
if settings.get("distributed_gen_profiles_fn"):
dg_generation = make_distributed_gen_profiles(pudl_engine, settings)
total_dg_gen = dg_generation.sum().sum()
else:
total_dg_gen = 0
if isinstance(year_case_policy, pd.DataFrame):
year_case_policy = year_case_policy.sum()
# If a value isn't supplied to the function use value from file
if calculated_ces is None:
CES = year_case_policy["CES"]
else:
CES = calculated_ces
RPS = year_case_policy["RPS"]
# THIS WILL NEED TO BE MORE FLEXIBLE FOR OTHER SCENARIOS
if float(year_case_policy["CO_2_Max_Mtons"]) >= 0:
genx_settings["CO2Cap"] = 2
else:
genx_settings["CO2Cap"] = 0
if float(year_case_policy["RPS"]) > 0:
# print(total_dg_gen)
# print(year_case_policy["RPS"])
if policies.loc[(case_id, model_year), "region"].all() == "all":
genx_settings["RPS"] = 3
genx_settings["RPS_Adjustment"] = float((1 - RPS) * total_dg_gen)
else:
genx_settings["RPS"] = 2
genx_settings["RPS_Adjustment"] = 0
else:
genx_settings["RPS"] = 0
genx_settings["RPS_Adjustment"] = 0
if float(year_case_policy["CES"]) > 0:
if policies.loc[(case_id, model_year), "region"].all() == "all":
genx_settings["CES"] = 3
# This is a little confusing but for partial CES
if settings.get("partial_ces"):
genx_settings["CES_Adjustment"] = 0
else:
genx_settings["CES_Adjustment"] = float((1 - CES) * total_dg_gen)
else:
genx_settings["CES"] = 2
genx_settings["CES_Adjustment"] = 0
else:
genx_settings["CES"] = 0
genx_settings["CES_Adjustment"] = 0
# Don't wrap when time domain isn't reduced
if not settings.get("reduce_time_domain"):
genx_settings["OperationWrapping"] = 0
genx_settings["case_id"] = case_id
genx_settings["case_name"] = case_name
genx_settings["year"] = str(model_year)
# This is a new setting, will need to have a way to change.
genx_settings["CapacityReserveMargin"] = 0
genx_settings["LDS"] = 0
# Load user defined values for the genx settigns file. This overrides the
# complicated logic above.
if settings.get("case_genx_settings_fn"):
user_genx_settings = load_user_genx_settings(settings)
user_case_settings = user_genx_settings.loc[(case_id, model_year), :]
for key, value in user_case_settings.items():
if not pd.isna(value):
genx_settings[key] = value
return genx_settings
def reduce_time_domain(
resource_profiles, load_profiles, settings, variable_resources_only=True
):
demand_segments = load_demand_segments(settings)
if settings.get("reduce_time_domain"):
days = settings["time_domain_days_per_period"]
time_periods = settings["time_domain_periods"]
include_peak_day = settings["include_peak_day"]
load_weight = settings["demand_weight_factor"]
results, _, _ = kmeans_time_clustering(
resource_profiles=resource_profiles,
load_profiles=load_profiles,
days_in_group=days,
num_clusters=time_periods,
include_peak_day=include_peak_day,
load_weight=load_weight,
variable_resources_only=variable_resources_only,
)
reduced_resource_profile = results["resource_profiles"]
reduced_resource_profile.index.name = "Resource"
reduced_resource_profile.index = range(1, len(reduced_resource_profile) + 1)
reduced_load_profile = results["load_profiles"]
time_series_mapping = results["time_series_mapping"]
time_index = pd.Series(data=reduced_load_profile.index + 1, name="Time_index")
sub_weights = pd.Series(
data=[x * (days * 24) for x in results["ClusterWeights"]],
name="Sub_Weights",
)
hours_per_period = pd.Series(data=[days * 24], name="Hours_per_period")
subperiods = pd.Series(data=[time_periods], name="Subperiods")
reduced_load_output = pd.concat(
[
demand_segments,
subperiods,
hours_per_period,
sub_weights,
time_index,
reduced_load_profile.round(0),
],
axis=1,
)
return reduced_resource_profile, reduced_load_output, time_series_mapping
else:
time_index = pd.Series(data=range(1, 8761), name="Time_index")
sub_weights =
|
pd.Series(data=[1], name="Sub_Weights")
|
pandas.Series
|
import numpy as np
from sklearn.preprocessing import StandardScaler
import pyswarms as ps
from sklearn.model_selection import cross_val_score,ShuffleSplit
from sklearn.metrics import mean_squared_error,r2_score
from sklearn.externals import joblib
import pandas as pd
import time
def discretize(x,num):
result = min(num-1, max(0, x))
return int(result)
def ErrorDistribs(y_true,y_pred):
return abs(y_true-y_pred)/y_true
class model():
def __init__(self,n_particles=20,c1=0.5,c2=0.5,w=0.9,verbose=1,cv=5,scoring='neg_mean_squared_error'):
self.StandardScaler=StandardScaler()
self.n_particles=n_particles
self.c1=c1
self.c2=c2
self.w=w
self.options={'c1':self.c1,'c2':self.c2,'w':self.w}
self.verbose=verbose
self.cv=cv
self.scoring=scoring
def train(self,X_train,y_train,reg,param_distribs):
start=time.perf_counter()
self.X_train_pre=self.StandardScaler.fit_transform(X_train)
self.y_train=y_train
self.reg=reg
self.param_distribs=param_distribs
self.dimensions=len(param_distribs)
upper=np.zeros(self.dimensions)
lower=np.zeros(self.dimensions)
for count, (key, value) in enumerate(self.param_distribs.items()):
lower[count]=value[1]
upper[count]=value[2]
bounds=(lower,upper)
optimizer=ps.single.GlobalBestPSO(n_particles=self.n_particles,dimensions=self.dimensions,options=self.options,bounds=bounds)
best_cost,best_pos=optimizer.optimize(self.search,iters=50,verbose=self.verbose)
# best_pos=[-0.7811003950341757, 4.736212131795903, 0.3134303131418766]
self.best_params={}
for count, (key, value) in enumerate(self.param_distribs.items()):
if value[0].__name__=='choice':
index=value[0](best_pos[count])
self.best_params[key]=value[3][index]
else:
self.best_params[key]=value[0](best_pos[count])
self.final_model=self.reg(**self.best_params)
self.final_model.fit(self.X_train_pre,self.y_train)
y_pred=self.final_model.predict(self.X_train_pre)
now=time.perf_counter()
RMSE=np.sqrt(mean_squared_error(y_train,y_pred))/10**6
R2=r2_score(y_train,y_pred)
rela_error=ErrorDistribs(y_train,y_pred)*100
error_hist,_=np.histogram(rela_error,bins=50)
error_median=np.median(rela_error)
# with open('{}.txt'.format(self.reg.__name__),'w+') as f:
# f.write('RMSE: {}\r\nR2: {}\r\nError_median: {}\r\n'.format(RMSE,R2,error_median))
self.my_dict={}
self.my_dict['train_time']=now-start
self.my_dict['RMSE']=RMSE
self.my_dict['R2']=R2
self.my_dict['Error_Median']=error_median
self.my_dict['Error_Hist']=error_hist.ravel()
self.my_dict['y_true']=self.y_train.ravel()
self.my_dict['y_pred']=y_pred.ravel()
joblib.dump(self.final_model,'{}.pkl'.format(self.reg.__name__))
# my_model_loaded=joblib.load('{}.pkl'.format(self.reg.__name__))
def search(self,param):
score_array=np.zeros((self.n_particles,self.cv))
fit_params={}
for i in range(self.n_particles):
for count, (key, value) in enumerate(self.param_distribs.items()):
if value[0].__name__=='choice':
index=value[0](param[i,count])
fit_params[key]=value[3][index]
else:
fit_params[key]=value[0](param[i,count])
# cv=ShuffleSplit(n_splits=5,test_size=0.3)
score_array[i,:]=cross_val_score(self.reg(**fit_params),self.X_train_pre,self.y_train,scoring=self.scoring,cv=self.cv)
return -np.mean(score_array,axis=1)
def predict(self,X_test):
"""
x: numpy.ndarray of shape (n_particles, dimensions)
"""
X_test_pre=self.StandardScaler.transform(X_test)
y_pred=self.final_model.predict(X_test_pre)
return -y_pred
def optimize(self):
start=time.perf_counter()
upper=250*np.ones(72)
lower=15*np.ones(72)
bounds=(lower,upper)
optimizer=ps.single.GlobalBestPSO(n_particles=self.n_particles,dimensions=72,options=self.options,bounds=bounds)
best_cost,best_pos=optimizer.optimize(self.predict,iters=100,verbose=self.verbose)
best_pos=np.array(best_pos)
cost_history=np.array(optimizer.cost_history)
now=time.perf_counter()
self.my_dict['opt_time']=now-start
self.my_dict['X_test']=best_pos.ravel()
self.my_dict['cost_history']=cost_history.ravel()
df=pd.DataFrame(dict([ (k,
|
pd.Series(v)
|
pandas.Series
|
import requests
import pandas as pd
import numpy as np
import json
import haversine
import datetime
from haversine import Unit
import os
import prefect
from prefect import task, Flow, Parameter, case
from prefect.tasks.notifications.email_task import EmailTask
from prefect.schedules import IntervalSchedule
ENDPOINT = 'https://www.vaccinespotter.org/api/v0/states'
@task(log_stdout=True)
def load_data(state):
endpoint = f'{ENDPOINT}/{state}.json'
print(f'Checking: {endpoint}')
json_payload = requests.get(endpoint)
data = json.loads(json_payload.content)
df = pd.DataFrame([x['properties'] for x in data['features']])
df['coordinates'] = [(x['geometry']['coordinates'][1], x['geometry']['coordinates'][0]) for x in data['features']]
df['appointments_last_fetched'] =
|
pd.to_datetime(data['metadata']['appointments_last_fetched'])
|
pandas.to_datetime
|
import sys
import pandas as pd
import pytd
from pedestrian_detector import PedestrianDetector
# TODO: fill the following env vars
TD_DATABASE = ""
TD_TABLE = ""
TD_API_KEY = ""
TD_API_SERVER = "https://api.treasuredata.com"
if __name__ == "__main__":
client = pytd.Client(
database=TD_DATABASE, apikey=TD_API_KEY, endpoint=TD_API_SERVER
)
detector = PedestrianDetector()
counts, timestamps = [], []
try:
while True:
cnt, ts = detector.detect()
print(cnt, ts)
counts.append(cnt)
timestamps.append(ts)
if len(counts) == 10:
client.load_table_from_dataframe(
|
pd.DataFrame(data={"time": timestamps, "num_pedestrian": counts})
|
pandas.DataFrame
|
# coding: utf-8
import re
import numpy as np
import pandas as pd
def vnpy_opt_DAY_IN(opt):
"""
vnpy 优化结果清洗,用于 DAY_IN 和 DAY_OUT
:param opt:
:return:
"""
data = re.compile(r'DAY_IN\':\s(\d+),\s\'DAY_OUT\':\s(\d+)\}"\]:\s([\d\.]+)').findall(opt)
data = np.array(data).T
dic = {
"DAY_IN": pd.Series(data[0], dtype=np.int),
"DAY_OUT": pd.Series(data[1], dtype=np.int),
"capital": pd.Series(data[2], dtype=np.float)
}
return
|
pd.DataFrame(dic)
|
pandas.DataFrame
|
from datetime import datetime
from io import StringIO
import itertools
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Period,
Series,
Timedelta,
date_range,
)
import pandas._testing as tm
class TestDataFrameReshape:
def test_stack_unstack(self, float_frame):
df = float_frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({"foo": stacked, "bar": stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
tm.assert_frame_equal(unstacked, df)
tm.assert_frame_equal(unstacked_df["bar"], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
tm.assert_frame_equal(unstacked_cols.T, df)
tm.assert_frame_equal(unstacked_cols_df["bar"].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, "a", "b"], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
tm.assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0], columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(
1, index=MultiIndex.from_product([levels[0], levels[2]]), columns=levels[1]
)
tm.assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[["a", "b"]].stack(1)
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_unstack_not_consolidated(self, using_array_manager):
# Gh#34708
df = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
df2 = df[["x"]]
df2["y"] = df["y"]
if not using_array_manager:
assert len(df2._mgr.blocks) == 2
res = df2.unstack()
expected = df.unstack()
tm.assert_series_equal(res, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack(fill_value=-1)
expected = DataFrame(
{"a": [1, -1, 5], "b": [2, 4, -1]}, index=["x", "y", "z"], dtype=np.int16
)
tm.assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame(
{"a": [1, 0.5, 5], "b": [2, 4, 0.5]}, index=["x", "y", "z"], dtype=float
)
tm.assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame(
{"x": ["a", "a", "b"], "y": ["j", "k", "j"], "z": [0, 1, 2], "w": [0, 1, 2]}
).set_index(["x", "y", "z"])
unstacked = df.unstack(["x", "y"], fill_value=0)
key = ("<KEY>")
expected = unstacked[key]
result = Series([0, 0, 2], index=unstacked.index, name=key)
tm.assert_series_equal(result, expected)
stacked = unstacked.stack(["x", "y"])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
tm.assert_frame_equal(result, df)
# From a series
s = df["w"]
result = s.unstack(["x", "y"], fill_value=0)
expected = unstacked["w"]
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list("AB"), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list("xyz"), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
# From a mixed type dataframe
df["A"] = df["A"].astype(np.int16)
df["B"] = df["B"].astype(np.float64)
result = df.unstack(fill_value=-1)
expected["A"] = expected["A"].astype(np.int16)
expected["B"] = expected["B"].astype(np.float64)
tm.assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list("xyz"), dtype=float)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = date_range("2012-01-01", periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [dv[0], pd.NaT, dv[3]], "b": [dv[1], dv[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame(
{"a": [dv[0], dv[0], dv[3]], "b": [dv[1], dv[2], dv[0]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [td[0], pd.NaT, td[3]], "b": [td[1], td[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame(
{"a": [td[0], td[1], td[3]], "b": [td[1], td[2], td[1]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [
Period("2012-01"),
Period("2012-02"),
Period("2012-03"),
Period("2012-04"),
]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [periods[0], None, periods[3]], "b": [periods[1], periods[2], None]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame(
{
"a": [periods[0], periods[1], periods[3]],
"b": [periods[1], periods[2], periods[1]],
},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = Series(["a", "b", "c", "a"], dtype="category")
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame(
{
"a": pd.Categorical(list("axa"), categories=list("abc")),
"b": pd.Categorical(list("bcx"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
# Fill with non-category results in a ValueError
msg = r"'fill_value=d' is not present in"
with pytest.raises(TypeError, match=msg):
data.unstack(fill_value="d")
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value="c")
expected = DataFrame(
{
"a": pd.Categorical(list("aca"), categories=list("abc")),
"b": pd.Categorical(list("bcc"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_tuplename_in_multiindex(self):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")]
)
df = DataFrame({"d": [1] * 9, "e": [2] * 9}, index=idx)
result = df.unstack(("A", "a"))
expected = DataFrame(
[[1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2]],
columns=MultiIndex.from_tuples(
[
("d", "a"),
("d", "b"),
("d", "c"),
("e", "a"),
("e", "b"),
("e", "c"),
],
names=[None, ("A", "a")],
),
index=Index([1, 2, 3], name=("B", "b")),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"unstack_idx, expected_values, expected_index, expected_columns",
[
(
("A", "a"),
[[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2]],
MultiIndex.from_tuples(
[(1, 3), (1, 4), (2, 3), (2, 4)], names=["B", "C"]
),
MultiIndex.from_tuples(
[("d", "a"), ("d", "b"), ("e", "a"), ("e", "b")],
names=[None, ("A", "a")],
),
),
(
(("A", "a"), "B"),
[[1, 1, 1, 1, 2, 2, 2, 2], [1, 1, 1, 1, 2, 2, 2, 2]],
Index([3, 4], name="C"),
MultiIndex.from_tuples(
[
("d", "a", 1),
("d", "a", 2),
("d", "b", 1),
("d", "b", 2),
("e", "a", 1),
("e", "a", 2),
("e", "b", 1),
("e", "b", 2),
],
names=[None, ("A", "a"), "B"],
),
),
],
)
def test_unstack_mixed_type_name_in_multiindex(
self, unstack_idx, expected_values, expected_index, expected_columns
):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"]
)
df = DataFrame({"d": [1] * 8, "e": [2] * 8}, index=idx)
result = df.unstack(unstack_idx)
expected = DataFrame(
expected_values, columns=expected_columns, index=expected_index
)
tm.assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = DataFrame(
{
"state": ["IL", "MI", "NC"],
"index": ["a", "b", "c"],
"some_categories": Series(["a", "b", "c"]).astype("category"),
"A": np.random.rand(3),
"B": 1,
"C": "foo",
"D": pd.Timestamp("20010102"),
"E": Series([1.0, 50.0, 100.0]).astype("float32"),
"F": Series([3.0, 4.0, 5.0]).astype("float64"),
"G": False,
"H": Series([1, 200, 923442], dtype="int8"),
}
)
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
tm.assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(["state", "index"])
unstack_and_compare(df1, "index")
df1 = df.set_index(["state", "some_categories"])
unstack_and_compare(df1, "some_categories")
df1 = df.set_index(["F", "C"])
unstack_and_compare(df1, "F")
df1 = df.set_index(["G", "B", "state"])
unstack_and_compare(df1, "B")
df1 = df.set_index(["E", "A"])
unstack_and_compare(df1, "E")
df1 = df.set_index(["state", "index"])
s = df1["A"]
unstack_and_compare(s, "index")
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3), repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
tm.assert_frame_equal(df.stack(level=[1, 2]), df.stack(level=1).stack(level=1))
tm.assert_frame_equal(
df.stack(level=[-2, -1]), df.stack(level=1).stack(level=1)
)
df_named = df.copy()
return_value = df_named.columns.set_names(range(3), inplace=True)
assert return_value is None
tm.assert_frame_equal(
df_named.stack(level=[1, 2]), df_named.stack(level=1).stack(level=1)
)
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ["exp", "animal", 1]
tm.assert_frame_equal(
df2.stack(level=["animal", 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=["exp", 1]), exp_hair_stacked, check_names=False
)
# When mixed types are passed and the ints are not level
# names, raise
msg = (
"level should contain all level names or all level numbers, not "
"a mixture of the two"
)
with pytest.raises(ValueError, match=msg):
df2.stack(level=["animal", 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ["exp", "animal", 0]
tm.assert_frame_equal(
df3.stack(level=["animal", 0]), animal_hair_stacked, check_names=False
)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=["exp", "animal"])
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
tm.assert_frame_equal(
df2.stack(level=[1, 2]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 1]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 2]), exp_hair_stacked, check_names=False
)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
tm.assert_frame_equal(
df3.stack(level=[0, 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 0]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 1]), exp_hair_stacked, check_names=False
)
def test_unstack_bool(self):
df = DataFrame(
[False, False],
index=MultiIndex.from_arrays([["a", "b"], ["c", "l"]]),
columns=["col"],
)
rs = df.unstack()
xp = DataFrame(
np.array([[False, np.nan], [np.nan, False]], dtype=object),
index=["a", "b"],
columns=MultiIndex.from_arrays([["col", "col"], ["c", "l"]]),
)
tm.assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"], ["a", "b"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=["first", "second", "third"],
)
s = Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=["first", "second"],
)
expected = DataFrame(
np.array(
[[np.nan, 0], [0, np.nan], [np.nan, 0], [0, np.nan]], dtype=np.float64
),
index=expected_mi,
columns=Index(["a", "b"], name="third"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_to_series(self, float_frame):
# check reversibility
data = float_frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
tm.assert_frame_equal(undo, float_frame)
# check NA handling
data = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
data.index = Index(["a", "b", "c"])
result = data.unstack()
midx = MultiIndex(
levels=[["x", "y"], ["a", "b", "c"]],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
)
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
tm.assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
tm.assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4], [1, 2, 3, 4], [2, 1, 3, 4], [2, 2, 3, 4]]
df = DataFrame(rows, columns=list("ABCD"))
result = df.dtypes
expected = Series([np.dtype("int64")] * 4, index=list("ABCD"))
tm.assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(["A", "B"])
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("int64")] * 4,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# mixed
df2 = df.set_index(["A", "B"])
df2["C"] = 3.0
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("int64")] * 2,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
df2["D"] = "foo"
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("object")] * 2,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# GH7405
for c, d in (
(np.zeros(5), np.zeros(5)),
(np.arange(5, dtype="f8"), np.arange(5, 10, dtype="f8")),
):
df = DataFrame(
{
"A": ["a"] * 5,
"C": c,
"D": d,
"B": date_range("2012-01-01", periods=5),
}
)
right = df.iloc[:3].copy(deep=True)
df = df.set_index(["A", "B"])
df["D"] = df["D"].astype("int64")
left = df.iloc[:3].unstack(0)
right = right.set_index(["A", "B"]).unstack(0)
right[("D", "a")] = right[("D", "a")].astype("int64")
assert left.shape == (3, 2)
tm.assert_frame_equal(left, right)
def test_unstack_non_unique_index_names(self):
idx = MultiIndex.from_tuples([("a", "b"), ("c", "d")], names=["c1", "c1"])
df = DataFrame([1, 2], index=idx)
msg = "The name c1 occurs multiple times, use a level number"
with pytest.raises(ValueError, match=msg):
df.unstack("c1")
with pytest.raises(ValueError, match=msg):
df.T.stack("c1")
def test_unstack_unused_levels(self):
# GH 17845: unused codes in index make unstack() cast int to float
idx = MultiIndex.from_product([["a"], ["A", "B", "C", "D"]])[:-1]
df = DataFrame([[1, 0]] * 3, index=idx)
result = df.unstack()
exp_col = MultiIndex.from_product([[0, 1], ["A", "B", "C"]])
expected = DataFrame([[1, 1, 1, 0, 0, 0]], index=["a"], columns=exp_col)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# Unused items on both levels
levels = [[0, 1, 7], [0, 1, 2, 3]]
codes = [[0, 0, 1, 1], [0, 2, 0, 2]]
idx = MultiIndex(levels, codes)
block = np.arange(4).reshape(2, 2)
df = DataFrame(np.concatenate([block, block + 4]), index=idx)
result = df.unstack()
expected = DataFrame(
np.concatenate([block * 2, block * 2 + 1], axis=1), columns=idx
)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# With mixed dtype and NaN
levels = [["a", 2, "c"], [1, 3, 5, 7]]
codes = [[0, -1, 1, 1], [0, 2, -1, 2]]
idx = MultiIndex(levels, codes)
data = np.arange(8)
df = DataFrame(data.reshape(4, 2), index=idx)
cases = (
(0, [13, 16, 6, 9, 2, 5, 8, 11], [np.nan, "a", 2], [np.nan, 5, 1]),
(1, [8, 11, 1, 4, 12, 15, 13, 16], [np.nan, 5, 1], [np.nan, "a", 2]),
)
for level, idces, col_level, idx_level in cases:
result = df.unstack(level=level)
exp_data = np.zeros(18) * np.nan
exp_data[idces] = data
cols = MultiIndex.from_product([[0, 1], col_level])
expected = DataFrame(exp_data.reshape(3, 6), index=idx_level, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("cols", [["A", "C"], slice(None)])
def test_unstack_unused_level(self, cols):
# GH 18562 : unused codes on the unstacked level
df = DataFrame([[2010, "a", "I"], [2011, "b", "II"]], columns=["A", "B", "C"])
ind = df.set_index(["A", "B", "C"], drop=False)
selection = ind.loc[(slice(None), slice(None), "I"), cols]
result = selection.unstack()
expected = ind.iloc[[0]][cols]
expected.columns = MultiIndex.from_product(
[expected.columns, ["I"]], names=[None, "C"]
)
expected.index = expected.index.droplevel("C")
tm.assert_frame_equal(result, expected)
def test_unstack_long_index(self):
# PH 32624: Error when using a lot of indices to unstack.
# The error occurred only, if a lot of indices are used.
df = DataFrame(
[[1]],
columns=MultiIndex.from_tuples([[0]], names=["c1"]),
index=MultiIndex.from_tuples(
[[0, 0, 1, 0, 0, 0, 1]],
names=["i1", "i2", "i3", "i4", "i5", "i6", "i7"],
),
)
result = df.unstack(["i2", "i3", "i4", "i5", "i6", "i7"])
expected = DataFrame(
[[1]],
columns=MultiIndex.from_tuples(
[[0, 0, 1, 0, 0, 0, 1]],
names=["c1", "i2", "i3", "i4", "i5", "i6", "i7"],
),
index=Index([0], name="i1"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_multi_level_cols(self):
# PH 24729: Unstack a df with multi level columns
df = DataFrame(
[[0.0, 0.0], [0.0, 0.0]],
columns=MultiIndex.from_tuples(
[["B", "C"], ["B", "D"]], names=["c1", "c2"]
),
index=MultiIndex.from_tuples(
[[10, 20, 30], [10, 20, 40]], names=["i1", "i2", "i3"]
),
)
assert df.unstack(["i2", "i1"]).columns.names[-2:] == ["i2", "i1"]
def test_unstack_multi_level_rows_and_cols(self):
# PH 28306: Unstack df with multi level cols and rows
df = DataFrame(
[[1, 2], [3, 4], [-1, -2], [-3, -4]],
columns=MultiIndex.from_tuples([["a", "b", "c"], ["d", "e", "f"]]),
index=MultiIndex.from_tuples(
[
["m1", "P3", 222],
["m1", "A5", 111],
["m2", "P3", 222],
["m2", "A5", 111],
],
names=["i1", "i2", "i3"],
),
)
result = df.unstack(["i3", "i2"])
expected = df.unstack(["i3"]).unstack(["i2"])
tm.assert_frame_equal(result, expected)
def test_unstack_nan_index1(self):
# GH7466
def cast(val):
val_str = "" if val != val else val
return f"{val_str:1}"
def verify(df):
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = df.notna().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(df.iloc[i, j].split("."))
right = mk_list(df.index[i]) + mk_list(df.columns[j])
right = sorted(map(cast, right))
assert left == right
df = DataFrame(
{
"jim": ["a", "b", np.nan, "d"],
"joe": ["w", "x", "y", "z"],
"jolie": ["a.w", "b.x", " .y", "d.z"],
}
)
left = df.set_index(["jim", "joe"]).unstack()["jolie"]
right = df.set_index(["joe", "jim"]).unstack()["jolie"].T
tm.assert_frame_equal(left, right)
for idx in itertools.permutations(df.columns[:2]):
mi = df.set_index(list(idx))
for lev in range(2):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == len(df)
verify(udf["jolie"])
df = DataFrame(
{
"1st": ["d"] * 3
+ [np.nan] * 5
+ ["a"] * 2
+ ["c"] * 3
+ ["e"] * 2
+ ["b"] * 5,
"2nd": ["y"] * 2
+ ["w"] * 3
+ [np.nan] * 3
+ ["z"] * 4
+ [np.nan] * 3
+ ["x"] * 3
+ [np.nan] * 2,
"3rd": [
67,
39,
53,
72,
57,
80,
31,
18,
11,
30,
59,
50,
62,
59,
76,
52,
14,
53,
60,
51,
],
}
)
df["4th"], df["5th"] = (
df.apply(lambda r: ".".join(map(cast, r)), axis=1),
df.apply(lambda r: ".".join(map(cast, r.iloc[::-1])), axis=1),
)
for idx in itertools.permutations(["1st", "2nd", "3rd"]):
mi = df.set_index(list(idx))
for lev in range(3):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == 2 * len(df)
for col in ["4th", "5th"]:
verify(udf[col])
def test_unstack_nan_index2(self):
# GH7403
df = DataFrame({"A": list("aaaabbbb"), "B": range(8), "C": range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [
[3, 0, 1, 2, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, 4, 5, 6, 7],
]
vals = list(map(list, zip(*vals)))
idx = Index([np.nan, 0, 1, 2, 4, 5, 6, 7], name="B")
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)})
df.iloc[2, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [[2, np.nan], [0, 4], [1, 5], [np.nan, 6], [3, 7]]
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
idx = Index([np.nan, 0, 1, 2, 3], name="B")
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [[3, np.nan], [0, 4], [1, 5], [2, 6], [np.nan, 7]]
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
idx = Index([np.nan, 0, 1, 2, 3], name="B")
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
def test_unstack_nan_index3(self, using_array_manager):
# GH7401
df = DataFrame(
{
"A": list("aaaaabbbbb"),
"B": (date_range("2012-01-01", periods=5).tolist() * 2),
"C": np.arange(10),
}
)
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack()
vals = np.array([[3, 0, 1, 2, np.nan, 4], [np.nan, 5, 6, 7, 8, 9]])
idx = Index(["a", "b"], name="A")
cols = MultiIndex(
levels=[["C"], date_range("2012-01-01", periods=5)],
codes=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, "B"],
)
right = DataFrame(vals, columns=cols, index=idx)
if using_array_manager:
# INFO(ArrayManager) with ArrayManager preserve dtype where possible
cols = right.columns[[1, 2, 3, 5]]
right[cols] = right[cols].astype(df["C"].dtype)
tm.assert_frame_equal(left, right)
def test_unstack_nan_index4(self):
# GH4862
vals = [
["Hg", np.nan, np.nan, 680585148],
["U", 0.0, np.nan, 680585148],
["Pb", 7.07e-06, np.nan, 680585148],
["Sn", 2.3614e-05, 0.0133, 680607017],
["Ag", 0.0, 0.0133, 680607017],
["Hg", -0.00015, 0.0133, 680607017],
]
df = DataFrame(
vals,
columns=["agent", "change", "dosage", "s_id"],
index=[17263, 17264, 17265, 17266, 17267, 17268],
)
left = df.copy().set_index(["s_id", "dosage", "agent"]).unstack()
vals = [
[np.nan, np.nan, 7.07e-06, np.nan, 0.0],
[0.0, -0.00015, np.nan, 2.3614e-05, np.nan],
]
idx = MultiIndex(
levels=[[680585148, 680607017], [0.0133]],
codes=[[0, 1], [-1, 0]],
names=["s_id", "dosage"],
)
cols = MultiIndex(
levels=[["change"], ["Ag", "Hg", "Pb", "Sn", "U"]],
codes=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],
names=[None, "agent"],
)
right =
|
DataFrame(vals, columns=cols, index=idx)
|
pandas.DataFrame
|
"""Miscellaneous spatial and statistical transforms."""
import copy
import logging
import os.path as op
import nibabel as nib
import numpy as np
import pandas as pd
from nilearn.reporting import get_clusters_table
from scipy import stats
from . import references
from .base import Transformer
from .due import due
from .utils import dict_to_coordinates, dict_to_df, get_masker, listify
LGR = logging.getLogger(__name__)
class ImageTransformer(Transformer):
"""A class to create new images from existing ones within a Dataset.
This class is a light wrapper around :func:`nimare.transforms.transform_images`.
.. versionadded:: 0.0.9
Parameters
----------
target : {'z', 'p', 'beta', 'varcope'} or list
Target image type. Multiple target types may be specified as a list.
overwrite : :obj:`bool`, optional
Whether to overwrite existing files or not. Default is False.
See Also
--------
nimare.transforms.transform_images : The function called by this class.
"""
def __init__(self, target, overwrite=False):
self.target = listify(target)
self.overwrite = overwrite
def transform(self, dataset):
"""Generate images of the target type from other image types in a Dataset.
Parameters
----------
dataset : :obj:`nimare.dataset.Dataset`
A Dataset containing images and relevant metadata.
Returns
-------
new_dataset : :obj:`nimare.dataset.Dataset`
A copy of the input Dataset, with new images added to its images attribute.
"""
# Using attribute check instead of type check to allow fake Datasets for testing.
if not hasattr(dataset, "slice"):
raise ValueError(
f"Argument 'dataset' must be a valid Dataset object, not a {type(dataset)}."
)
new_dataset = dataset.copy()
temp_images = dataset.images
for target_type in self.target:
temp_images = transform_images(
temp_images,
target=target_type,
masker=dataset.masker,
metadata_df=dataset.metadata,
out_dir=dataset.basepath,
overwrite=self.overwrite,
)
new_dataset.images = temp_images
return new_dataset
def transform_images(images_df, target, masker, metadata_df=None, out_dir=None, overwrite=False):
"""Generate images of a given type from other image types and write out to files.
.. versionchanged:: 0.0.9
* [ENH] Add overwrite option to transform_images
.. versionadded:: 0.0.4
Parameters
----------
images_df : :class:`pandas.DataFrame`
DataFrame with paths to images for studies in Dataset.
target : {'z', 'p', 'beta', 'varcope'}
Target data type.
masker : :class:`nilearn.input_data.NiftiMasker` or similar
Masker used to define orientation and resolution of images.
Specific voxels defined in mask will not be used, and a new masker
with _all_ voxels in acquisition matrix selected will be created.
metadata_df : :class:`pandas.DataFrame` or :obj:`None`, optional
DataFrame with metadata. Rows in this DataFrame must match those in
``images_df``, including the ``'id'`` column.
out_dir : :obj:`str` or :obj:`None`, optional
Path to output directory. If None, use folder containing first image
for each study in ``images_df``.
overwrite : :obj:`bool`, optional
Whether to overwrite existing files or not. Default is False.
Returns
-------
images_df : :class:`pandas.DataFrame`
DataFrame with paths to new images added.
"""
images_df = images_df.copy()
valid_targets = {"z", "p", "beta", "varcope"}
if target not in valid_targets:
raise ValueError(
f"Target type {target} not supported. Must be one of: {', '.join(valid_targets)}"
)
mask_img = masker.mask_img
new_mask = np.ones(mask_img.shape, int)
new_mask = nib.Nifti1Image(new_mask, mask_img.affine, header=mask_img.header)
new_masker = get_masker(new_mask)
res = masker.mask_img.header.get_zooms()
res = "x".join([str(r) for r in res])
if target not in images_df.columns:
target_ids = images_df["id"].values
else:
target_ids = images_df.loc[images_df[target].isnull(), "id"]
for id_ in target_ids:
row = images_df.loc[images_df["id"] == id_].iloc[0]
# Determine output filename, if file can be generated
if out_dir is None:
options = [r for r in row.values if isinstance(r, str) and op.isfile(r)]
id_out_dir = op.dirname(options[0])
else:
id_out_dir = out_dir
new_file = op.join(id_out_dir, f"{id_}_{res}_{target}.nii.gz")
# Grab columns with actual values
available_data = row[~row.isnull()].to_dict()
if metadata_df is not None:
metadata_row = metadata_df.loc[metadata_df["id"] == id_].iloc[0]
metadata = metadata_row[~metadata_row.isnull()].to_dict()
for k, v in metadata.items():
if k not in available_data.keys():
available_data[k] = v
# Get converted data
img = resolve_transforms(target, available_data, new_masker)
if img is not None:
if overwrite or not op.isfile(new_file):
img.to_filename(new_file)
else:
LGR.debug("Image already exists. Not overwriting.")
images_df.loc[images_df["id"] == id_, target] = new_file
else:
images_df.loc[images_df["id"] == id_, target] = None
return images_df
def resolve_transforms(target, available_data, masker):
"""Determine and apply the appropriate transforms to a target image type from available data.
.. versionchanged:: 0.0.8
* [FIX] Remove unnecessary dimensions from output image object *img_like*. \
Now, the image object only has 3 dimensions.
.. versionadded:: 0.0.4
Parameters
----------
target : {'z', 'p', 't', 'beta', 'varcope'}
Target image type.
available_data : dict
Dictionary mapping data types to their values. Images in the dictionary
are paths to files.
masker : nilearn Masker
Masker used to convert images to arrays and back. Preferably, this mask
should cover the full acquisition matrix (rather than an ROI), given
that the calculated images will be saved and used for the full Dataset.
Returns
-------
img_like or None
Image object with the desired data type, if it can be generated.
Otherwise, None.
"""
if target in available_data.keys():
LGR.warning(f"Target '{target}' already available.")
return available_data[target]
if target == "z":
if ("t" in available_data.keys()) and ("sample_sizes" in available_data.keys()):
dof = sample_sizes_to_dof(available_data["sample_sizes"])
t = masker.transform(available_data["t"])
z = t_to_z(t, dof)
elif "p" in available_data.keys():
p = masker.transform(available_data["p"])
z = p_to_z(p)
else:
return None
z = masker.inverse_transform(z.squeeze())
return z
elif target == "t":
# will return none given no transform/target exists
temp = resolve_transforms("z", available_data, masker)
if temp is not None:
available_data["z"] = temp
if ("z" in available_data.keys()) and ("sample_sizes" in available_data.keys()):
dof = sample_sizes_to_dof(available_data["sample_sizes"])
z = masker.transform(available_data["z"])
t = z_to_t(z, dof)
t = masker.inverse_transform(t.squeeze())
return t
else:
return None
elif target == "beta":
if "t" not in available_data.keys():
# will return none given no transform/target exists
temp = resolve_transforms("t", available_data, masker)
if temp is not None:
available_data["t"] = temp
if "varcope" not in available_data.keys():
temp = resolve_transforms("varcope", available_data, masker)
if temp is not None:
available_data["varcope"] = temp
if ("t" in available_data.keys()) and ("varcope" in available_data.keys()):
t = masker.transform(available_data["t"])
varcope = masker.transform(available_data["varcope"])
beta = t_and_varcope_to_beta(t, varcope)
beta = masker.inverse_transform(beta.squeeze())
return beta
else:
return None
elif target == "varcope":
if "se" in available_data.keys():
se = masker.transform(available_data["se"])
varcope = se_to_varcope(se)
elif ("samplevar_dataset" in available_data.keys()) and (
"sample_sizes" in available_data.keys()
):
sample_size = sample_sizes_to_sample_size(available_data["sample_sizes"])
samplevar_dataset = masker.transform(available_data["samplevar_dataset"])
varcope = samplevar_dataset_to_varcope(samplevar_dataset, sample_size)
elif ("sd" in available_data.keys()) and ("sample_sizes" in available_data.keys()):
sample_size = sample_sizes_to_sample_size(available_data["sample_sizes"])
sd = masker.transform(available_data["sd"])
varcope = sd_to_varcope(sd, sample_size)
varcope = masker.inverse_transform(varcope)
elif ("t" in available_data.keys()) and ("beta" in available_data.keys()):
t = masker.transform(available_data["t"])
beta = masker.transform(available_data["beta"])
varcope = t_and_beta_to_varcope(t, beta)
else:
return None
varcope = masker.inverse_transform(varcope.squeeze())
return varcope
elif target == "p":
if ("t" in available_data.keys()) and ("sample_sizes" in available_data.keys()):
dof = sample_sizes_to_dof(available_data["sample_sizes"])
t = masker.transform(available_data["t"])
z = t_to_z(t, dof)
p = z_to_p(z)
elif "z" in available_data.keys():
z = masker.transform(available_data["z"])
p = z_to_p(z)
else:
return None
p = masker.inverse_transform(p.squeeze())
return p
else:
return None
class ImagesToCoordinates(Transformer):
"""Transformer from images to coordinates.
.. versionadded:: 0.0.8
Parameters
----------
merge_strategy : {"fill", "replace", "demolish"}, optional
Strategy for how to incorporate the generated coordinates with possible pre-existing
coordinates. Default="fill".
Descriptions of the options:
- ``"fill"``: only add coordinates to study contrasts that do not have coordinates.
If a study contrast has both image and coordinate data, the original coordinate data
will be kept.
- ``"replace"``: replace existing coordinates with coordinates generated by this function.
If a study contrast only has coordinate data and no images or if the statistical
threshold is too high for nimare to detect any peaks the original coordinates will be
kept.
- ``"demolish"``: only keep generated coordinates and discard any study contrasts with
coordinate data, but no images.
cluster_threshold : :obj:`int` or `None`, optional
Cluster size threshold, in voxels. Default=None.
remove_subpeaks : :obj:`bool`, optional
If True, removes subpeaks from the cluster results. Default=False.
two_sided : :obj:`bool`, optional
Whether to employ two-sided thresholding or to evaluate positive values only.
Default=False.
min_distance : :obj:`float`, optional
Minimum distance between subpeaks in mm. Default=8mm.
z_threshold : :obj:`float`
Cluster forming z-scale threshold. Default=3.1.
Notes
-----
The raw Z and/or P maps are not corrected for multiple comparisons. Uncorrected z-values and/or
p-values are used for thresholding.
"""
def __init__(
self,
merge_strategy="fill",
cluster_threshold=None,
remove_subpeaks=False,
two_sided=False,
min_distance=8.0,
z_threshold=3.1,
):
self.merge_strategy = merge_strategy
self.cluster_threshold = cluster_threshold
self.remove_subpeaks = remove_subpeaks
self.min_distance = min_distance
self.two_sided = two_sided
self.z_threshold = z_threshold
def transform(self, dataset):
"""Create coordinate peaks from statistical images.
Parameters
----------
dataset : :obj:`nimare.dataset.Dataset`
Dataset with z maps and/or p maps
that can be converted to coordinates.
Returns
-------
dataset : :obj:`nimare.dataset.Dataset`
Dataset with coordinates generated from
images and metadata indicating origin
of coordinates ('original' or 'nimare').
"""
# relevant variables from dataset
space = dataset.space
masker = dataset.masker
images_df = dataset.images
metadata = dataset.metadata.copy()
# conform space specification
if "mni" in space.lower() or "ale" in space.lower():
coordinate_space = "MNI"
elif "tal" in space.lower():
coordinate_space = "TAL"
else:
coordinate_space = None
coordinates_dict = {}
for _, row in images_df.iterrows():
if row["id"] in list(dataset.coordinates["id"]) and self.merge_strategy == "fill":
continue
if row.get("z"):
clusters = get_clusters_table(
nib.funcs.squeeze_image(nib.load(row.get("z"))),
self.z_threshold,
self.cluster_threshold,
self.two_sided,
self.min_distance,
)
elif row.get("p"):
LGR.info(
f"No Z map for {row['id']}, using p map "
"(p-values will be treated as positive z-values)"
)
if self.two_sided:
LGR.warning(f"Cannot use two_sided threshold using a p map for {row['id']}")
p_threshold = 1 - z_to_p(self.z_threshold)
nimg = nib.funcs.squeeze_image(nib.load(row.get("p")))
inv_nimg = nib.Nifti1Image(1 - nimg.get_fdata(), nimg.affine, nimg.header)
clusters = get_clusters_table(
inv_nimg,
p_threshold,
self.cluster_threshold,
self.min_distance,
)
# Peak stat p-values are reported as 1 - p in get_clusters_table
clusters["Peak Stat"] = p_to_z(1 - clusters["Peak Stat"])
else:
LGR.warning(f"No Z or p map for {row['id']}, skipping...")
continue
# skip entry if no clusters are found
if clusters.empty:
LGR.warning(
f"No clusters were found for {row['id']} at a threshold of {self.z_threshold}"
)
continue
if self.remove_subpeaks:
# subpeaks are identified as 1a, 1b, etc
# while peaks are kept as 1, 2, 3, etc,
# so removing all non-int rows will
# keep main peaks while removing subpeaks
clusters = clusters[clusters["Cluster ID"].apply(lambda x: isinstance(x, int))]
coordinates_dict[row["study_id"]] = {
"contrasts": {
row["contrast_id"]: {
"coords": {
"space": coordinate_space,
"x": list(clusters["X"]),
"y": list(clusters["Y"]),
"z": list(clusters["Z"]),
"z_stat": list(clusters["Peak Stat"]),
},
"metadata": {"coordinate_source": "nimare"},
}
}
}
# only the generated coordinates ('demolish')
coordinates_df = dict_to_coordinates(coordinates_dict, masker, space)
meta_df = dict_to_df(
|
pd.DataFrame(dataset._ids)
|
pandas.DataFrame
|
import cv2
import os
import mediapipe as mp
import pandas as pd
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", help="data file name",
type=str, required=True)
parser.add_argument(
"-p", "--path", help="directory to save the data", type=str, default='.')
args = parser.parse_args()
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
landmarks = [x.name for x in mp_hands.HandLandmark]
data = []
file_name = args.file
path = args.path
file_path = os.path.join(path, file_name + '.csv')
cnt_mouse, cnt_left, cnt_right = 0, 0, 0
cnt_scrllup, cnt_scrlldown, cnt_zoom = 0, 0, 0
cap = cv2.VideoCapture(0)
with mp_hands.Hands(
max_num_hands=1,
min_detection_confidence=0.5,
min_tracking_confidence=0.5) as hands:
while(True):
ret, image = cap.read()
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
image.flags.writeable = False
results = hands.process(image)
key = (cv2.waitKey(10) & 0xFF)
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks, handedness in zip(results.multi_hand_landmarks, results.multi_handedness):
if handedness.classification[0].score <= .9:
continue
new_data = {}
for lm in landmarks:
new_data[lm + '_x'] = hand_landmarks.landmark[mp_hands.HandLandmark[lm]].x
new_data[lm + '_y'] = hand_landmarks.landmark[mp_hands.HandLandmark[lm]].y
new_data[lm + '_z'] = hand_landmarks.landmark[mp_hands.HandLandmark[lm]].z
new_data['hand'] = handedness.classification[0].label
if (key == ord('a')):
new_data['class'] = 'mouse'
data.append(new_data)
cnt_mouse += 1
elif key == ord('s'):
new_data['class'] = 'left_click'
data.append(new_data)
cnt_left += 1
elif key == ord('d'):
new_data['class'] = 'right_click'
data.append(new_data)
cnt_right += 1
elif key == ord('f'):
new_data['class'] = 'scroll_up'
data.append(new_data)
cnt_scrllup += 1
elif key == ord('g'):
new_data['class'] = 'scroll_down'
data.append(new_data)
cnt_scrlldown += 1
elif key == ord('h'):
new_data['class'] = 'zoom'
data.append(new_data)
cnt_zoom += 1
mp_drawing.draw_landmarks(
image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
cv2.imshow('frame', image)
print(f'\rM: {cnt_mouse} - L: {cnt_left} - R: {cnt_right} - UP: {cnt_scrllup} - DOWN: {cnt_scrlldown} - ZOOM: {cnt_zoom} -- TOTAL = {len(data)}', end='', flush=True)
if key == ord('q'):
break
if key == ord('w'):
if data[-1]['class'] == 'mouse':
cnt_mouse -= 1
elif data[-1]['class'] == 'left_click':
cnt_left -= 1
elif data[-1]['class'] == 'right_click':
cnt_right -= 1
elif data[-1]['class'] == 'scroll_up':
cnt_scrllup -= 1
elif data[-1]['class'] == 'scroll_down':
cnt_scrlldown -= 1
elif data[-1]['class'] == 'zoom':
cnt_zoom -= 1
data.pop(-1)
if key == ord('e'):
pd.DataFrame(data).to_csv(file_path, index=False)
print()
|
pd.DataFrame(data)
|
pandas.DataFrame
|
import glob
import os
import pandas as pd
import pytz
from dateutil import parser, tz
from matplotlib import pyplot as plt
fp = "C:\\Users\\Robert\\Documents\\Uni\\SOLARNET\\HomogenizationCampaign\\rome\\"
file = os.path.join(fp, "data.csv")
data = pd.read_csv(file, delimiter=" ")
print(data)
converted_data = []
for fits_file, ut in zip(data.file, data.UT):
time = parser.parse(fits_file[-15:-7] +"T" + ut)
time = pytz.utc.localize(time)
type = fits_file[9:14]
if type != "CaIIK":
print(fits_file)
converted_data.append([fits_file, time, type, 1])
converted_data =
|
pd.DataFrame(converted_data, columns=["file", "date", "type", "quality"])
|
pandas.DataFrame
|
import pydp as dp # type: ignore
from pydp.algorithms.laplacian import BoundedSum, Count # type: ignore
import math
import statistics as s
import pandas as pd # type: ignore
from collections import defaultdict
# Assumptions:
# Time when visitors start entering the restaurant (900 represents 9:00 AM)
OPENING_HOUR = 900
# Time when visitors finish entering the restaurant (2000 represents 20:00 PM)
CLOSING_HOUR = 2000
# A python list of valid work hours when visitors can come to the restaurant.
VALID_HOURS = list(range(OPENING_HOUR, CLOSING_HOUR + 1))
# Cap the maximum number of visiting days at 3 per each visitor (any number above will not be taken into account)
COUNT_MAX_CONTRIBUTED_DAYS = 3
# Cap the maximum number of visiting days at 4 per each visitor (any number above will not be taken into account)
SUM_MAX_CONTRIBUTED_DAYS = 4
# Expected minimum amount of money (in Euros) to be spent by a visitor per a single visit
MIN_EUROS_SPENT = 0
# Expected maximum amount of money (in Euros) to be spent by a visitor per a single visit
MAX_EUROS_SPENT_1 = 50
MAX_EUROS_SPENT_2 = 65
LN_3 = math.log(3)
class RestaurantStatistics:
"""Class to replicate the restaurant example from Google's DP library.
Includes both Count and Sum operations.
Original code in java:
- https://github.com/google/differential-privacy/tree/main/examples/java"""
def __init__(self, hours_filename, days_filename, epsilon=LN_3):
# Store the .csv filenames for daily and weekly restaurant data
self.hours_filename = hours_filename
self.days_filename = days_filename
# The privacy threshold, a number between 0 and 1
self._epsilon = epsilon
# The hourly and weekly data as pandas DataFrames
self._hour_visits = pd.read_csv(self.hours_filename, sep=",")
self._day_visits = pd.read_csv(self.days_filename, sep=",")
def count_visits_per_hour(self) -> tuple:
"""Compute the number of visits per hour of day and return two dictionaries
that map an hour to the number of visits in that hour.
The first dictionary is the count calculation without any differential privacy, while the second one uses the PyDP library for a private calculation
"""
non_private_counts = self.get_non_private_counts_per_hour()
private_counts = self.get_private_counts_per_hour()
return non_private_counts, private_counts
def count_visits_per_day(self) -> tuple:
"""Compute the number of visits per hour of day and return two dictionaries
that map a day to the number of visits in that day.
The first dictionary is the count calculation without any differential privacy,
while the second one uses the PyDP library for a private calculation
"""
non_private_counts = self.get_non_private_counts_per_day()
private_counts = self.get_private_counts_per_day()
return non_private_counts, private_counts
def sum_revenue_per_day(self) -> tuple:
"""Compute the restaurant's daily revenue for the whole week.
The first dictionary is the count calculation without any differential privacy,
while the second one uses the PyDP library for a private calculation
"""
non_private_sum = self.get_non_private_sum_revenue()
private_sum = self.get_private_sum_revenue()
return non_private_sum, private_sum
def sum_revenue_per_day_with_preaggregation(self) -> tuple:
"""Calculates the restaurant's daily revenue for the whole week, while
pre-agreggating each visitor's spending before calculating the
BoundedSum with PyDP.
The first dictionary is the count calculation without any differential privacy,
while the second one uses the PyDP library for a private calculation
"""
non_private_sum = self.get_non_private_sum_revenue()
private_sum = self.get_private_sum_revenue_with_preaggregation()
return non_private_sum, private_sum
def get_non_private_counts_per_hour(self) -> dict:
"""Compute the number of visits per hour without any differential privacy.
Return a dictionary mapping hours to number of visits
"""
hours_count = dict()
# Parse times so its easy to check whether they are valid
visits = self._hour_visits.copy()
visits["Time entered"] = (
|
pd.to_datetime(visits["Time entered"])
|
pandas.to_datetime
|
from mindsdb.libs.constants.mindsdb import *
from mindsdb.config import *
from mindsdb.libs.helpers.general_helpers import disable_console_output, get_tensorflow_colname
from dateutil.parser import parse as parse_datetime
import os, sys
import shutil
from tensorflow.python.client import device_lib
from ludwig.api import LudwigModel
from ludwig.data.preprocessing import build_metadata
import pandas as pd
from imageio import imread
# @TODO: Define generci interface, similar to 'base_module' in the phases
class LudwigBackend():
def __init__(self, transaction):
self.transaction = transaction
def _translate_df_to_timeseries_format(self, df, model_definition, timeseries_cols, mode='predict'):
timeseries_col_name = timeseries_cols[0]
previous_predict_col_names = []
predict_col_names = []
for feature_def in model_definition['output_features']:
if mode == 'train':
predict_col_names.append(feature_def['name'])
else:
predict_col_names = []
previous_predict_col_name = 'previous_' + feature_def['name']
previous_predict_col_already_in = False
for definition in model_definition['input_features']:
if definition['name'] == previous_predict_col_name:
previous_predict_col_already_in = True
if not previous_predict_col_already_in:
model_definition['input_features'].append({
'name': previous_predict_col_name
,'type': 'sequence'
})
other_col_names = []
for feature_def in model_definition['input_features']:
if feature_def['name'] not in self.transaction.lmd['model_group_by'] and feature_def['name'] not in previous_predict_col_names:
feature_def['type'] = 'sequence'
if feature_def['name'] not in [timeseries_col_name]:
other_col_names.append(feature_def['name'])
previous_predict_col_names.append(previous_predict_col_name)
new_cols = {}
for col in [*other_col_names,*previous_predict_col_names,timeseries_col_name,*predict_col_names,*self.transaction.lmd['model_group_by']]:
new_cols[col] = []
nr_ele = len(df[timeseries_col_name])
i = 0
while i < nr_ele:
new_row = {}
timeseries_row = [df[timeseries_col_name][i]]
for col in other_col_names:
new_row[col] = [df[col][i]]
for col in previous_predict_col_names:
new_row[col] = []
for col in predict_col_names:
new_row[col] = df[col][i]
for col in self.transaction.lmd['model_group_by']:
new_row[col] = df[col][i]
inverted_index_range = list(range(i))
inverted_index_range.reverse()
ii = 0
for ii in inverted_index_range:
if (i - ii) > self.transaction.lmd['window_size']:
break
timeseries_row.append(df[timeseries_col_name][ii])
for col in other_col_names:
new_row[col].append(df[col][ii])
for col in previous_predict_col_names:
try:
new_row[col].append(df[col.replace('previous_', '')][ii])
except:
try:
new_row[col].append(df[col][ii])
except:
self.transaction.log.warning('Missing previous predicted values for output column: {}, these should be included in your input under the name: {}'.format(col.replace('previous_', ''), col))
if mode == 'train':
i = max(i + 1, (i + round((i - ii)/2)))
else:
i = i + 1
new_row[timeseries_col_name] = timeseries_row
for col in new_row:
if col not in predict_col_names and col not in self.transaction.lmd['model_group_by']:
new_row[col].reverse()
new_cols[col].append(new_row[col])
new_df =
|
pd.DataFrame(data=new_cols)
|
pandas.DataFrame
|
import pickle
from typing import Any, Dict, Iterable
import numpy as np
import pandas as pd
from numpy.lib.function_base import iterable
from pandas.api.types import CategoricalDtype
from pandas.core.groupby import DataFrameGroupBy
from scipy.sparse import hstack
from sklearn.naive_bayes import MultinomialNB
from sklearn.preprocessing import OneHotEncoder
from spacy.language import Language
from spacy.strings import StringStore
from spacy.tokens import Doc, Span
from .functional import _convert_series_to_array, _get_label
class EndLinesModel:
"""Model to classify if an end line is a real one or it should be a space.
Parameters
----------
nlp : Language
spaCy nlp pipeline to use for matching.
"""
def __init__(self, nlp: Language):
self.nlp = nlp
def _preprocess_data(self, corpus: Iterable[Doc]) -> pd.DataFrame:
"""
Parameters
----------
corpus : Iterable[Doc]
Corpus of documents
Returns
-------
pd.DataFrame
Preprocessed data
"""
# Extract the vocabulary
string_store = self.nlp.vocab.strings
# Iterate in the corpus and construct a dataframe
train_data_list = []
for i, doc in enumerate(corpus):
train_data_list.append(self._get_attributes(doc, i))
df = pd.concat(train_data_list)
df.reset_index(inplace=True, drop=False)
df.rename(columns={"ORTH": "A1", "index": "original_token_index"}, inplace=True)
# Retrieve string representation of token_id and shape
df["TEXT"] = df.A1.apply(self._get_string, string_store=string_store)
df["SHAPE_"] = df.SHAPE.apply(self._get_string, string_store=string_store)
# Convert new lines as an attribute instead of a row
df = self._convert_line_to_attribute(df, expr="\n", col="END_LINE")
df = self._convert_line_to_attribute(df, expr="\n\n", col="BLANK_LINE")
df = df.loc[~(df.END_LINE | df.BLANK_LINE)]
df = df.drop(columns="END_LINE")
df = df.drop(columns="BLANK_LINE")
df.rename(
columns={"TEMP_END_LINE": "END_LINE", "TEMP_BLANK_LINE": "BLANK_LINE"},
inplace=True,
)
# Construct A2 by shifting
df = self._shift_col(df, "A1", "A2", direction="backward")
# Compute A3 and A4
df = self._compute_a3(df)
df = self._shift_col(df, "A3", "A4", direction="backward")
# SPACE is the class to predict. Set 1 if not an END_LINE
df["SPACE"] = np.logical_not(df["END_LINE"]).astype("int")
df[["END_LINE", "BLANK_LINE"]] = df[["END_LINE", "BLANK_LINE"]].fillna(
True, inplace=False
)
# Assign a sentence id to each token
df = df.groupby("DOC_ID").apply(self._retrieve_lines)
df["SENTENCE_ID"] = df["SENTENCE_ID"].astype("int")
# Compute B1 and B2
df = self._compute_B(df)
# Drop Tokens without info (last token of doc)
df.dropna(subset=["A1", "A2", "A3", "A4"], inplace=True)
# Export the vocabularies to be able to use the model with another corpus
voc_a3a4 = self._create_vocabulary(df.A3_.cat.categories)
voc_B2 = self._create_vocabulary(df.cv_bin.cat.categories)
voc_B1 = self._create_vocabulary(df.l_norm_bin.cat.categories)
vocabulary = {"A3A4": voc_a3a4, "B1": voc_B1, "B2": voc_B2}
self.vocabulary = vocabulary
return df
def fit_and_predict(self, corpus: Iterable[Doc]) -> pd.DataFrame:
"""Fit the model and predict for the training data
Parameters
----------
corpus : Iterable[Doc]
An iterable of Documents
Returns
-------
pd.DataFrame
one line by end_line prediction
"""
# Preprocess data to have a pd DF
df = self._preprocess_data(corpus)
# Train and predict M1
self._fit_M1(df.A1, df.A2, df.A3, df.A4, df.SPACE)
outputs_M1 = self._predict_M1(
df.A1,
df.A2,
df.A3,
df.A4,
)
df["M1"] = outputs_M1["predictions"]
df["M1_proba"] = outputs_M1["predictions_proba"]
# Force Blank lines to 0
df.loc[df.BLANK_LINE, "M1"] = 0
# Train and predict M2
df_endlines = df.loc[df.END_LINE]
self._fit_M2(B1=df_endlines.B1, B2=df_endlines.B2, label=df_endlines.M1)
outputs_M2 = self._predict_M2(B1=df_endlines.B1, B2=df_endlines.B2)
df.loc[df.END_LINE, "M2"] = outputs_M2["predictions"]
df.loc[df.END_LINE, "M2_proba"] = outputs_M2["predictions_proba"]
df["M2"] = df["M2"].astype(
pd.Int64Dtype()
) # cast to pd.Int64Dtype cause there are None values
# M1M2
df = df.loc[df.END_LINE]
df["M1M2_lr"] = (df["M2_proba"] / (1 - df["M2_proba"])) * (
df["M1_proba"] / (1 - df["M1_proba"])
)
df["M1M2"] = (df["M1M2_lr"] > 1).astype("int")
# Force Blank lines to 0
df.loc[df.BLANK_LINE, ["M2", "M1M2"]] = 0
# Make binary col
df["PREDICTED_END_LINE"] = np.logical_not(df["M1M2"].astype(bool))
return df
def predict(self, df: pd.DataFrame) -> pd.DataFrame:
"""Use the model for inference
The df should have the following columns:
`["A1","A2","A3","A4","B1","B2","BLANK_LINE"]`
Parameters
----------
df : pd.DataFrame
The df should have the following columns:
`["A1","A2","A3","A4","B1","B2","BLANK_LINE"]`
Returns
-------
pd.DataFrame
The result is added to the column `PREDICTED_END_LINE`
"""
df = self._convert_raw_data_to_codes(df)
outputs_M1 = self._predict_M1(df.A1, df.A2, df._A3, df._A4)
df["M1"] = outputs_M1["predictions"]
df["M1_proba"] = outputs_M1["predictions_proba"]
outputs_M2 = self._predict_M2(B1=df._B1, B2=df._B2)
df["M2"] = outputs_M2["predictions"]
df["M2_proba"] = outputs_M2["predictions_proba"]
df["M2"] = df["M2"].astype(
pd.Int64Dtype()
) # cast to pd.Int64Dtype cause there are None values
# M1M2
df["M1M2_lr"] = (df["M2_proba"] / (1 - df["M2_proba"])) * (
df["M1_proba"] / (1 - df["M1_proba"])
)
df["M1M2"] = (df["M1M2_lr"] > 1).astype("int")
# Force Blank lines to 0
df.loc[
df.BLANK_LINE,
[
"M1M2",
],
] = 0
# Make binary col
df["PREDICTED_END_LINE"] = np.logical_not(df["M1M2"].astype(bool))
return df
def save(self, path="base_model.pkl"):
"""Save a pickle of the model. It could be read by the pipeline later.
Parameters
----------
path : str, optional
path to file .pkl, by default `base_model.pkl`
"""
with open(path, "wb") as outp:
del self.nlp
pickle.dump(self, outp, pickle.HIGHEST_PROTOCOL)
def _convert_A(self, df: pd.DataFrame, col: str) -> pd.DataFrame:
"""
Parameters
----------
df : pd.DataFrame
col : str
column to translate
Returns
-------
pd.DataFrame
"""
cat_type_A = CategoricalDtype(
categories=self.vocabulary["A3A4"].keys(), ordered=True
)
new_col = "_" + col
df[new_col] = df[col].astype(cat_type_A)
df[new_col] = df[new_col].cat.codes
# Ensure that not known values are coded as OTHER
df.loc[
~df[col].isin(self.vocabulary["A3A4"].keys()), new_col
] = self.vocabulary["A3A4"]["OTHER"]
return df
def _convert_B(self, df: pd.DataFrame, col: str) -> pd.DataFrame:
"""
Parameters
----------
df : pd.DataFrame
[description]
col : str
column to translate
Returns
-------
pd.DataFrame
[description]
"""
# Translate B1
index_B = pd.IntervalIndex(list(self.vocabulary[col].keys()))
new_col = "_" + col
df[new_col] = pd.cut(df[col], index_B)
df[new_col] = df[new_col].cat.codes
df.loc[df[col] >= index_B.right.max(), new_col] = max(
self.vocabulary[col].values()
)
df.loc[df[col] <= index_B.left.min(), new_col] = min(
self.vocabulary[col].values()
)
return df
def _convert_raw_data_to_codes(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Function to translate data as extracted from spacy to the model codes.
`A1` and `A2` are not translated cause are supposed to be already
in good encoding.
Parameters
----------
df : pd.DataFrame
It should have columns `['A3','A4','B1','B2']`
Returns
-------
pd.DataFrame
"""
df = self._convert_A(df, "A3")
df = self._convert_A(df, "A4")
df = self._convert_B(df, "B1")
df = self._convert_B(df, "B2")
return df
def _convert_line_to_attribute(
self, df: pd.DataFrame, expr: str, col: str
) -> pd.DataFrame:
"""
Function to convert a line into an attribute (column) of the
previous row. Particularly we use it to identify "\\n" and "\\n\\n"
that are considered tokens, express this information as an attribute
of the previous token.
Parameters
----------
df : pd.DataFrame
expr : str
pattern to search in the text. Ex.: "\\n"
col : str
name of the new column
Returns
-------
pd.DataFrame
"""
idx = df.TEXT.str.contains(expr)
df.loc[idx, col] = True
df[col] = df[col].fillna(False)
df = self._shift_col(df, col, "TEMP_" + col, direction="backward")
return df
def _compute_a3(self, df: pd.DataFrame) -> pd.DataFrame:
"""
A3 (A4 respectively): typographic form of left word (or right) :
- All in capital letter
- It starts with a capital letter
- Starts by lowercase
- It's a number
- Strong punctuation
- Soft punctuation
- A number followed or preced by a punctuation (it's the case of enumerations)
Parameters
----------
df: pd.DataFrame
Returns
-------
df: pd.DataFrame with the columns `A3` and `A3_`
"""
df = self._shift_col(
df, "IS_PUNCT", "IS_PUNCT_+1", direction="backward", fill=False
)
df = self._shift_col(
df, "IS_PUNCT", "IS_PUNCT_-1", direction="forward", fill=False
)
CONDITION1 = df.IS_UPPER
CONDITION2 = df.SHAPE_.str.startswith("Xx", na=False)
CONDITION3 = df.SHAPE_.str.startswith("x", na=False)
CONDITION4 = df.IS_DIGIT
STRONG_PUNCT = [".", ";", "..", "..."]
CONDITION5 = (df.IS_PUNCT) & (df.TEXT.isin(STRONG_PUNCT))
CONDITION6 = (df.IS_PUNCT) & (~df.TEXT.isin(STRONG_PUNCT))
CONDITION7 = (df.IS_DIGIT) & (df["IS_PUNCT_+1"] | df["IS_PUNCT_-1"]) # discuss
df["A3_"] = None
df.loc[CONDITION1, "A3_"] = "UPPER"
df.loc[CONDITION2, "A3_"] = "S_UPPER"
df.loc[CONDITION3, "A3_"] = "LOWER"
df.loc[CONDITION4, "A3_"] = "DIGIT"
df.loc[CONDITION5, "A3_"] = "STRONG_PUNCT"
df.loc[CONDITION6, "A3_"] = "SOFT_PUNCT"
df.loc[CONDITION7, "A3_"] = "ENUMERATION"
df = df.drop(columns=["IS_PUNCT_+1", "IS_PUNCT_-1"])
df["A3_"] = df["A3_"].astype("category")
df["A3_"] = df["A3_"].cat.add_categories("OTHER")
df["A3_"].fillna("OTHER", inplace=True)
df["A3"] = df["A3_"].cat.codes
return df
def _fit_M1(
self,
A1: pd.Series,
A2: pd.Series,
A3: pd.Series,
A4: pd.Series,
label: pd.Series,
):
"""Function to train M1 classifier (Naive Bayes)
Parameters
----------
A1 : pd.Series
[description]
A2 : pd.Series
[description]
A3 : pd.Series
[description]
A4 : pd.Series
[description]
label : pd.Series
[description]
"""
# Encode classes to OneHotEncoder representation
encoder_A1_A2 = self._fit_encoder_2S(A1, A2)
self.encoder_A1_A2 = encoder_A1_A2
encoder_A3_A4 = self._fit_encoder_2S(A3, A4)
self.encoder_A3_A4 = encoder_A3_A4
# M1
m1 = MultinomialNB(alpha=1)
X = self._get_X_for_M1(A1, A2, A3, A4)
m1.fit(X, label)
self.m1 = m1
def _fit_M2(self, B1: pd.Series, B2: pd.Series, label: pd.Series):
"""Function to train M2 classifier (Naive Bayes)
Parameters
----------
B1 : pd.Series
B2 : pd.Series
label : pd.Series
"""
# Encode classes to OneHotEncoder representation
encoder_B1 = self._fit_encoder_1S(B1)
self.encoder_B1 = encoder_B1
encoder_B2 = self._fit_encoder_1S(B2)
self.encoder_B2 = encoder_B2
# Multinomial Naive Bayes
m2 = MultinomialNB(alpha=1)
X = self._get_X_for_M2(B1, B2)
m2.fit(X, label)
self.m2 = m2
def _get_X_for_M1(
self, A1: pd.Series, A2: pd.Series, A3: pd.Series, A4: pd.Series
) -> np.ndarray:
"""Get X matrix for classifier
Parameters
----------
A1 : pd.Series
A2 : pd.Series
A3 : pd.Series
A4 : pd.Series
Returns
-------
np.ndarray
"""
A1_enc = self._encode_series(self.encoder_A1_A2, A1)
A2_enc = self._encode_series(self.encoder_A1_A2, A2)
A3_enc = self._encode_series(self.encoder_A3_A4, A3)
A4_enc = self._encode_series(self.encoder_A3_A4, A4)
X = hstack([A1_enc, A2_enc, A3_enc, A4_enc])
return X
def _get_X_for_M2(self, B1: pd.Series, B2: pd.Series) -> np.ndarray:
"""Get X matrix for classifier
Parameters
----------
B1 : pd.Series
B2 : pd.Series
Returns
-------
np.ndarray
"""
B1_enc = self._encode_series(self.encoder_B1, B1)
B2_enc = self._encode_series(self.encoder_B2, B2)
X = hstack([B1_enc, B2_enc])
return X
def _predict_M1(
self, A1: pd.Series, A2: pd.Series, A3: pd.Series, A4: pd.Series
) -> Dict[str, Any]:
"""Use M1 for prediction
Parameters
----------
A1 : pd.Series
A2 : pd.Series
A3 : pd.Series
A4 : pd.Series
Returns
-------
Dict[str, Any]
"""
X = self._get_X_for_M1(A1, A2, A3, A4)
predictions = self.m1.predict(X)
predictions_proba = self.m1.predict_proba(X)[:, 1]
outputs = {"predictions": predictions, "predictions_proba": predictions_proba}
return outputs
def _predict_M2(self, B1: pd.Series, B2: pd.Series) -> Dict[str, Any]:
"""Use M2 for prediction
Parameters
----------
B1 : pd.Series
B2 : pd.Series
Returns
-------
Dict[str, Any]
"""
X = self._get_X_for_M2(B1, B2)
predictions = self.m2.predict(X)
predictions_proba = self.m2.predict_proba(X)[:, 1]
outputs = {"predictions": predictions, "predictions_proba": predictions_proba}
return outputs
def _fit_encoder_2S(self, S1: pd.Series, S2: pd.Series) -> OneHotEncoder:
"""Fit a one hot encoder with 2 Series. It concatenates the series and after it fits.
Parameters
----------
S1 : pd.Series
S2 : pd.Series
Returns
-------
OneHotEncoder
"""
_S1 = _convert_series_to_array(S1)
_S2 = _convert_series_to_array(S2)
S = np.concatenate([_S1, _S2])
encoder = self._fit_one_hot_encoder(S)
return encoder
def _fit_encoder_1S(self, S1: pd.Series) -> OneHotEncoder:
"""Fit a one hot encoder with 1 Series.
Parameters
----------
S1 : pd.Series
Returns
-------
OneHotEncoder
"""
_S1 = _convert_series_to_array(S1)
encoder = self._fit_one_hot_encoder(_S1)
return encoder
def _encode_series(self, encoder: OneHotEncoder, S: pd.Series) -> np.ndarray:
"""Use the one hot encoder to transform a series.
Parameters
----------
encoder : OneHotEncoder
S : pd.Series
a series to encode (transform)
Returns
-------
np.ndarray
"""
_S = _convert_series_to_array(S)
S_enc = encoder.transform(_S)
return S_enc
def set_spans(self, corpus: Iterable[Doc], df: pd.DataFrame):
"""
Function to set the results of the algorithm (pd.DataFrame)
as spans of the spaCy document.
Parameters
----------
corpus : Iterable[Doc]
Iterable of spaCy Documents
df : pd.DataFrame
It should have the columns:
["DOC_ID","original_token_index","PREDICTED_END_LINE"]
"""
for doc_id, doc in enumerate(corpus):
spans = []
for token_i, pred in df.loc[
df.DOC_ID == doc_id, ["original_token_index", "PREDICTED_END_LINE"]
].values:
s = Span(doc, start=token_i, end=token_i + 1, label=_get_label(pred))
spans.append(s)
doc.spans["new_lines"] = spans
@staticmethod
def _retrieve_lines(dfg: DataFrameGroupBy) -> DataFrameGroupBy:
"""Function to give a sentence_id to each token.
Parameters
----------
dfg : DataFrameGroupBy
Returns
-------
DataFrameGroupBy
Same DataFrameGroupBy with the column `SENTENCE_ID`
"""
sentences_ids = np.arange(dfg.END_LINE.sum())
dfg.loc[dfg.END_LINE, "SENTENCE_ID"] = sentences_ids
dfg["SENTENCE_ID"] = dfg["SENTENCE_ID"].fillna(method="bfill")
return dfg
@staticmethod
def _create_vocabulary(x: iterable) -> dict:
"""Function to create a vocabulary for attributes in the training set.
Parameters
----------
x : iterable
Returns
-------
dict
"""
v = {}
for i, key in enumerate(x):
v[key] = i
return v
@staticmethod
def _compute_B(df: pd.DataFrame) -> pd.DataFrame:
"""Function to compute B1 and B2
Parameters
----------
df : pd.DataFrame
Returns
-------
pd.DataFrame
"""
data = df.groupby(["DOC_ID", "SENTENCE_ID"]).agg(l=("LENGTH", "sum"))
df_t = df.loc[df.END_LINE, ["DOC_ID", "SENTENCE_ID"]].merge(
data, left_on=["DOC_ID", "SENTENCE_ID"], right_index=True, how="left"
)
stats_doc = df_t.groupby("DOC_ID").agg(mu=("l", "mean"), sigma=("l", "std"))
stats_doc["sigma"].replace(
0.0, 1.0, inplace=True
) # Replace the 0 std by unit std, otherwise it breaks the code.
stats_doc["cv"] = stats_doc["sigma"] / stats_doc["mu"]
df_t = df_t.drop(columns=["DOC_ID", "SENTENCE_ID"])
df2 = df.merge(df_t, left_index=True, right_index=True, how="left")
df2 = df2.merge(stats_doc, on=["DOC_ID"], how="left")
df2["l_norm"] = (df2["l"] - df2["mu"]) / df2["sigma"]
df2["cv_bin"] = pd.cut(df2["cv"], bins=10)
df2["B2"] = df2["cv_bin"].cat.codes
df2["l_norm_bin"] =
|
pd.cut(df2["l_norm"], bins=10)
|
pandas.cut
|
"""
SMALL HELPER FUNCTIONS
"""
import os
from time import time
from scipy import stats
import numpy as np
import pandas as pd
from colorspace.colorlib import HCL
from sklearn.metrics import roc_auc_score
from sklearn.metrics import average_precision_score
import warnings
import itertools
from sklearn.metrics import roc_auc_score as auc
# y, score, groups = df_woy.query('has_woy==False').ili.values, df_woy.query('has_woy==False').ili_score.values, df_woy.query('has_woy==False').woy.values
# Function to decompose the within/between/aggregate AUC
def auc_decomp(y, score, groups):
assert y.shape[0] == score.shape[0] == groups.shape[0]
idx1, idx0 = np.where(y == 1)[0], np.where(y == 0)[0]
# Calculate number of pairs
npairs_agg = len(idx1) * len(idx0)
auc_agg = auc(y, score)
ugroups = np.unique(groups)
# --- Calculate within AUC --- #
df =
|
pd.DataFrame({'y':y,'score':score,'groups':groups})
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import datetime
from itertools import repeat
from astropy.time import Time, TimeDelta
from typing import Union, Tuple, Sequence, List
def convert_day_of_year(date: Union[float, str]) -> Time:
"""Convert day of the year (defined as yyyy.ddd where ddd is the day number of that year) to an astropy Time object.
Some important dates for the COS team were recorded in this format.
"""
return Time(datetime.datetime.strptime(str(date), '%Y.%j'), format='datetime')
def fit_line(x: Sequence, y: Sequence) -> Tuple[np.poly1d, np.ndarray]:
"""Given arrays x and y, fit a line."""
fit = np.poly1d(np.polyfit(x, y, 1))
return fit, fit(x)
def explode_df(df: pd.DataFrame, list_keywords: list) -> pd.DataFrame:
"""If a dataframe contains arrays for the element of a column or columns given by list_keywords, expand the
dataframe to one row per array element. Each row in list_keywords must be the same length.
"""
idx = df.index.repeat(df[list_keywords[0]].str.len()) # Repeat values based on the number of elements in the arrays
unpacked = pd.concat([pd.DataFrame({x: np.concatenate(df[x].values)}) for x in list_keywords], axis=1)
unpacked.index = idx # assigns repeated index to the unpacked dataframe, unpacked.
# Join unpacked df to the original df and drop the old columns
exploded = unpacked.join(df.drop(list_keywords, 1), how='left').reset_index(drop=True)
if exploded.isna().values.any(): # If there are NaNs, then it didn't make sense to "explode" the input df
raise ValueError('Elements in columns to be exploded are not the same length across rows.')
return exploded
def absolute_time(df: pd.DataFrame = None, expstart: Sequence = None, time: Sequence = None, time_key: str = None,
time_format: str = 'sec') -> TimeDelta:
"""Compute the time sequence relative to the start of the exposure (EXPSTART). Can be computed from a DataFrame that
contains an EXPSTART column and some other time array column, or from an EXPSTART array and time array pair.
"""
# If no input is given raise an error
if df is None and expstart is None and time is None:
raise TypeError('Computing and absolute time requires either a dataframe or set of arrays')
# Check that expstart and time_array are used together
if bool(expstart is not None or time is not None) and not (expstart is not None and time is not None):
raise TypeError('expstart and time must be used together.')
# Ingest given dataframe if one is given and check that it's not used with arrays at the same time
if df is not None:
if bool(expstart is not None or time is not None):
raise ValueError('Cannot use a dataframe and arrays as input at the same time. Use one or the other.')
expstart = df.EXPSTART
time = df.TIME if not time_key else df[time_key]
zero_points = Time(expstart, format='mjd')
time_delta = TimeDelta(time, format=time_format)
return zero_points + time_delta
def create_visibility(trace_lengths: List[int], visible_list: List[bool]) -> List[bool]:
"""Create visibility lists for plotly buttons. trace_lengths and visible_list must be in the correct order.
:param trace_lengths: List of the number of traces in each "button set".
:param visible_list: Visibility setting for each button set (either True or False).
"""
visibility = [] # Total visibility. Length should match the total number of traces in the figure.
for visible, trace_length in zip(visible_list, trace_lengths):
visibility += list(repeat(visible, trace_length)) # Set each trace per button.
return visibility
def v2v3(slew_x: Sequence, slew_y: Sequence) -> Tuple[Union[np.ndarray, pd.Series], Union[np.ndarray, pd.Series]]:
"""Detector coordinates to V2/V3 coordinates."""
# If input are lists, convert to np arrays so that the operations are completed as expected
if isinstance(slew_x, list):
slew_x = np.array(slew_x)
if isinstance(slew_y, list):
slew_y = np.array(slew_y)
rotation_angle = np.radians(45.0) # rotation angle in degrees converted to radians
x_conversion = slew_x * np.cos(rotation_angle)
y_conversion = slew_y * np.sin(rotation_angle)
v2 = x_conversion + y_conversion
v3 = x_conversion - y_conversion
return v2, v3
def get_osm_data(datamodel, detector: str) -> pd.DataFrame:
"""Query for OSM data and append any relevant new data to it."""
data =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import matplotlib.pyplot as plt
bandera_paso = False
iter = 0
lsupAnterior = -5
linfAnterior = -5
licentAnterior = -5
datos =
|
pd.read_csv('data.csv', header=None)
|
pandas.read_csv
|
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
class TestSeriesCombine:
def test_combine_scalar(self):
# GH 21248
# Note - combine() with another Series is tested elsewhere because
# it is used when testing operators
s = pd.Series([i * 10 for i in range(5)])
result = s.combine(3, lambda x, y: x + y)
expected = pd.Series([i * 10 + 3 for i in range(5)])
tm.assert_series_equal(result, expected)
result = s.combine(22, lambda x, y: min(x, y))
expected = pd.Series([min(i * 10, 22) for i in range(5)])
tm.assert_series_equal(result, expected)
def test_update(self):
s = Series([1.5, np.nan, 3.0, 4.0, np.nan])
s2 = Series([np.nan, 3.5, np.nan, 5.0])
s.update(s2)
expected = Series([1.5, 3.5, 3.0, 5.0, np.nan])
tm.assert_series_equal(s, expected)
# GH 3217
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df["c"] = np.nan
df["c"].update(Series(["foo"], index=[0]))
expected = DataFrame(
[[1, np.nan, "foo"], [3, 2.0, np.nan]], columns=["a", "b", "c"]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"other, dtype, expected",
[
# other is int
([61, 63], "int32", pd.Series([10, 61, 12], dtype="int32")),
([61, 63], "int64", pd.Series([10, 61, 12])),
([61, 63], float, pd.Series([10.0, 61.0, 12.0])),
([61, 63], object, pd.Series([10, 61, 12], dtype=object)),
# other is float, but can be cast to int
([61.0, 63.0], "int32", pd.Series([10, 61, 12], dtype="int32")),
([61.0, 63.0], "int64", pd.Series([10, 61, 12])),
([61.0, 63.0], float, pd.Series([10.0, 61.0, 12.0])),
([61.0, 63.0], object, pd.Series([10, 61.0, 12], dtype=object)),
# others is float, cannot be cast to int
([61.1, 63.1], "int32", pd.Series([10.0, 61.1, 12.0])),
([61.1, 63.1], "int64", pd.Series([10.0, 61.1, 12.0])),
([61.1, 63.1], float, pd.Series([10.0, 61.1, 12.0])),
([61.1, 63.1], object, pd.Series([10, 61.1, 12], dtype=object)),
# other is object, cannot be cast
([(61,), (63,)], "int32", pd.Series([10, (61,), 12])),
([(61,), (63,)], "int64", pd.Series([10, (61,), 12])),
([(61,), (63,)], float, pd.Series([10.0, (61,), 12.0])),
([(61,), (63,)], object, pd.Series([10, (61,), 12])),
],
)
def test_update_dtypes(self, other, dtype, expected):
s = Series([10, 11, 12], dtype=dtype)
other = Series(other, index=[1, 3])
s.update(other)
tm.assert_series_equal(s, expected)
def test_concat_empty_series_dtypes_roundtrips(self):
# round-tripping with self & like self
dtypes = map(np.dtype, ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"])
for dtype in dtypes:
assert pd.concat([Series(dtype=dtype)]).dtype == dtype
assert pd.concat([Series(dtype=dtype), Series(dtype=dtype)]).dtype == dtype
def int_result_type(dtype, dtype2):
typs = {dtype.kind, dtype2.kind}
if not len(typs - {"i", "u", "b"}) and (
dtype.kind == "i" or dtype2.kind == "i"
):
return "i"
elif not len(typs - {"u", "b"}) and (
dtype.kind == "u" or dtype2.kind == "u"
):
return "u"
return None
def float_result_type(dtype, dtype2):
typs = {dtype.kind, dtype2.kind}
if not len(typs - {"f", "i", "u"}) and (
dtype.kind == "f" or dtype2.kind == "f"
):
return "f"
return None
def get_result_type(dtype, dtype2):
result = float_result_type(dtype, dtype2)
if result is not None:
return result
result = int_result_type(dtype, dtype2)
if result is not None:
return result
return "O"
for dtype in dtypes:
for dtype2 in dtypes:
if dtype == dtype2:
continue
expected = get_result_type(dtype, dtype2)
result = pd.concat([Series(dtype=dtype),
|
Series(dtype=dtype2)
|
pandas.Series
|
from typing import Tuple, List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from LSSTM.LS_STM import LSSTM
from hottbox.core import Tensor
from pandas._libs.tslibs import timestamps
from LSSTM.aiding_functions import load_obj
from LSSTM.data_makers import create_stm_slice
# Load Data
# This dict has two Keys, "Volume" and "Price"
# "Volume" contains a pd.DataFrame with a DatetimeIndex and the Cols: VIX Price Gold Price SPX Close Label
# "Price" contains a pd.DataFrame with a DatetimeIndex and the Cols: VIX Volume Gold Volume SPX Volume Label
data_stm = load_obj("data_stm")
num_rows = len(data_stm["Price"])
tensor_shape = [2, 3, 2]
slice_width = 250
num_slices = num_rows - slice_width - tensor_shape[0] + 1 # TODO: why?
y_pred = np.zeros(num_slices - 1) # I guess this is because there is no prediction for the last row
stm = LSSTM(C=10, max_iter=100)
success = 0
pred_dates_idcs = []
# iteratively calculate slices
for i in range(num_slices - 1): # called num_slices times with i as the i_th iteration
# print progress
print("\r{0}".format((float(i) / (num_slices - 1)) * 100))
# prepare data ## TODO adapt to my data
stm_slice: Tuple[List[Tensor], np.array, Tensor, np.float64, timestamps.Timestamp] = create_stm_slice(
d=data_stm, start_index=i, slice_width=slice_width, tensor_shape=tensor_shape
)
xs_train, y_train, xs_test, y_test, associated_index = stm_slice
# fit a model
stm.fit(xs_train, y_train)
y_tmp, _ = stm.predict(xs_test)
y_pred[i] = y_tmp[0]
#
pred_dates_idcs.append(associated_index)
if y_test == y_pred[i]:
success += 1
#############
# EVALUATION
#############
# read from file
raw_data = pd.read_csv("./finance_data/raw_data.csv") # Date, SPX Close, SPX Volume, VIX Price, VIX Volume, Gold Price, Gold Volume
# create DatetimeIndex
raw_data["Date"] =
|
pd.to_datetime(raw_data["Date"], format="%Y-%m-%d")
|
pandas.to_datetime
|
import os
import numpy as np
import pandas as pd
from itertools import chain, combinations
pd.set_option('display.max_rows', 1000)
bright_dark_csv = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'all_seven_note_modes_sorted_dark_to_bright.csv')
bd =
|
pd.read_csv(bright_dark_csv)
|
pandas.read_csv
|
from __future__ import division
from functools import wraps
import pandas as pd
import numpy as np
import time
import csv, sys
import os.path
import logging
from .ted_functions import TedFunctions
from .ted_aggregate_methods import TedAggregateMethods
from base.uber_model import UberModel, ModelSharedInputs
class TedSpeciesProperties(object):
"""
Listing of species properties that will eventually be read in from a SQL db
"""
def __init__(self):
"""Class representing Species properties"""
super(TedSpeciesProperties, self).__init__()
self.sci_name = pd.Series([], dtype='object')
self.com_name = pd.Series([], dtype='object')
self.taxa = pd.Series([], dtype='object')
self.order = pd.Series([], dtype='object')
self.usfws_id = pd.Series([], dtype='object')
self.body_wgt = pd.Series([], dtype='object')
self.diet_item = pd.Series([], dtype='object')
self.h2o_cont = pd.Series([], dtype='float')
def read_species_properties(self):
# this is a temporary method to initiate the species/diet food items lists (this will be replaced with
# a method to access a SQL database containing the properties
#filename = './ted/tests/TEDSpeciesProperties.csv'
filename = os.path.join(os.path.dirname(__file__),'tests/TEDSpeciesProperties.csv')
try:
with open(filename,'rt') as csvfile:
# csv.DictReader uses first line in file for column headings by default
dr = pd.read_csv(csvfile) # comma is default delimiter
except csv.Error as e:
sys.exit('file: %s, %s' (filename, e))
print(dr)
self.sci_name = dr.ix[:,'Scientific Name']
self.com_name = dr.ix[:,'Common Name']
self.taxa = dr.ix[:,'Taxa']
self.order = dr.ix[:,'Order']
self.usfws_id = dr.ix[:,'USFWS Species ID (ENTITY_ID)']
self.body_wgt= dr.ix[:,'BW (g)']
self.diet_item = dr.ix[:,'Food item']
self.h2o_cont = dr.ix[:,'Water content of diet']
class TedInputs(ModelSharedInputs):
"""
Required inputs class for Ted.
"""
def __init__(self):
"""Class representing the inputs for Ted"""
super(TedInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Pandas DataFrame
self.chemical_name = pd.Series([], dtype="object", name="chemical_name")
# application parameters for min/max application scenarios
self.crop_min = pd.Series([], dtype="object", name="crop")
self.app_method_min = pd.Series([], dtype="object", name="app_method_min")
self.app_rate_min = pd.Series([], dtype="float", name="app_rate_min")
self.num_apps_min = pd.Series([], dtype="int", name="num_apps_min")
self.app_interval_min = pd.Series([], dtype="int", name="app_interval_min")
self.droplet_spec_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.boom_hgt_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.pest_incorp_depth_min = pd.Series([], dtype="object", name="pest_incorp_depth")
self.crop_max = pd.Series([], dtype="object", name="crop")
self.app_method_max = pd.Series([], dtype="object", name="app_method_max")
self.app_rate_max = pd.Series([], dtype="float", name="app_rate_max")
self.num_apps_max = pd.Series([], dtype="int", name="num_app_maxs")
self.app_interval_max = pd.Series([], dtype="int", name="app_interval_max")
self.droplet_spec_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.boom_hgt_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.pest_incorp_depth_max = pd.Series([], dtype="object", name="pest_incorp_depth")
# physical, chemical, and fate properties of pesticide
self.foliar_diss_hlife = pd.Series([], dtype="float", name="foliar_diss_hlife")
self.aerobic_soil_meta_hlife = pd.Series([], dtype="float", name="aerobic_soil_meta_hlife")
self.frac_retained_mamm = pd.Series([], dtype="float", name="frac_retained_mamm")
self.frac_retained_birds = pd.Series([], dtype="float", name="frac_retained_birds")
self.log_kow = pd.Series([], dtype="float", name="log_kow")
self.koc = pd.Series([], dtype="float", name="koc")
self.solubility = pd.Series([], dtype="float", name="solubility")
self.henry_law_const = pd.Series([], dtype="float", name="henry_law_const")
# bio concentration factors (ug active ing/kg-ww) / (ug active ing/liter)
self.aq_plant_algae_bcf_mean = pd.Series([], dtype="float", name="aq_plant_algae_bcf_mean")
self.aq_plant_algae_bcf_upper = pd.Series([], dtype="float", name="aq_plant_algae_bcf_upper")
self.inv_bcf_mean = pd.Series([], dtype="float", name="inv_bcf_mean")
self.inv_bcf_upper = pd.Series([], dtype="float", name="inv_bcf_upper")
self.fish_bcf_mean = pd.Series([], dtype="float", name="fish_bcf_mean")
self.fish_bcf_upper = pd.Series([], dtype="float", name="fish_bcf_upper")
# bounding water concentrations (ug active ing/liter)
self.water_conc_1 = pd.Series([], dtype="float", name="water_conc_1") # lower bound
self.water_conc_2 = pd.Series([], dtype="float", name="water_conc_2") # upper bound
# health value inputs
# naming convention (based on listing from OPP TED Excel spreadsheet 'inputs' worksheet):
# dbt: dose based toxicity
# cbt: concentration-based toxicity
# arbt: application rate-based toxicity
# 1inmill_mort: 1/million mortality (note initial character is numeral 1, not letter l)
# 1inten_mort: 10% mortality (note initial character is numeral 1, not letter l)
# others are self explanatory
# dose based toxicity(dbt): mammals (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort")
self.dbt_mamm_1inten_mort = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_low_ld50 = pd.Series([], dtype="float", name="dbt_mamm_low_ld50")
self.dbt_mamm_rat_oral_ld50 = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_rat_derm_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50")
self.dbt_mamm_rat_inhal_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50")
self.dbt_mamm_sub_direct = pd.Series([], dtype="float", name="dbt_mamm_sub_direct")
self.dbt_mamm_sub_indirect = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect")
self.dbt_mamm_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort_wgt")
self.dbt_mamm_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_low_ld50_wgt")
self.dbt_mamm_rat_oral_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_rat_derm_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50_wgt")
self.dbt_mamm_rat_inhal_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50_wgt")
self.dbt_mamm_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_direct_wgt")
self.dbt_mamm_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect_wgt")
# dose based toxicity(dbt): birds (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_bird_1inmill_mort = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort")
self.dbt_bird_1inten_mort = pd.Series([], dtype="float", name="dbt_bird_1inten_mort")
self.dbt_bird_low_ld50 = pd.Series([], dtype="float", name="dbt_bird_low_ld50")
self.dbt_bird_hc05 = pd.Series([], dtype="float", name="dbt_bird_hc05")
self.dbt_bird_hc50 = pd.Series([], dtype="float", name="dbt_bird_hc50")
self.dbt_bird_hc95 = pd.Series([], dtype="float", name="dbt_bird_hc95")
self.dbt_bird_sub_direct = pd.Series([], dtype="float", name="dbt_bird_sub_direct")
self.dbt_bird_sub_indirect = pd.Series([], dtype="float", name="dbt_bird_sub_indirect")
self.mineau_sca_fact = pd.Series([], dtype="float", name="mineau_sca_fact")
self.dbt_bird_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort_wgt")
self.dbt_bird_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inten_mort_wgt")
self.dbt_bird_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_bird_low_ld50_wgt")
self.dbt_bird_hc05_wgt = pd.Series([], dtype="float", name="dbt_bird_hc05_wgt")
self.dbt_bird_hc50_wgt = pd.Series([], dtype="float", name="dbt_bird_hc50_wgt")
self.dbt_bird_hc95_wgt = pd.Series([], dtype="float", name="dbt_bird_hc95_wgt")
self.dbt_bird_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_direct_wgt")
self.dbt_bird_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_indirect_wgt")
self.mineau_sca_fact_wgt = pd.Series([], dtype="float", name="mineau_sca_fact_wgt")
# dose based toxicity(dbt): reptiles, terrestrial-phase amphibians (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort")
self.dbt_reptile_1inten_mort = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort")
self.dbt_reptile_low_ld50 = pd.Series([], dtype="float", name="dbt_reptile_low_ld50")
self.dbt_reptile_sub_direct = pd.Series([], dtype="float", name="dbt_reptile_sub_direct")
self.dbt_reptile_sub_indirect = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect")
self.dbt_reptile_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort_wgt")
self.dbt_reptile_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort_wgt")
self.dbt_reptile_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_reptile_low_ld50_wgt")
self.dbt_reptile_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_direct_wgt")
self.dbt_reptile_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect_wgt")
# concentration-based toxicity (cbt) : mammals (mg-pest/kg-diet food)
self.cbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="cbt_mamm_1inmill_mort")
self.cbt_mamm_1inten_mort = pd.Series([], dtype="float", name="cbt_mamm_1inten_mort")
self.cbt_mamm_low_lc50 = pd.Series([], dtype="float", name="cbt_mamm_low_lc50")
self.cbt_mamm_sub_direct = pd.Series([], dtype="float", name="cbt_mamm_sub_direct")
self.cbt_mamm_grow_noec = pd.Series([], dtype="float", name="cbt_mamm_grow_noec")
self.cbt_mamm_grow_loec = pd.Series([], dtype="float", name="cbt_mamm_grow_loec")
self.cbt_mamm_repro_noec = pd.Series([], dtype="float", name="cbt_mamm_repro_noec")
self.cbt_mamm_repro_loec = pd.Series([], dtype="float", name="cbt_mamm_repro_loec")
self.cbt_mamm_behav_noec = pd.Series([], dtype="float", name="cbt_mamm_behav_noec")
self.cbt_mamm_behav_loec = pd.Series([], dtype="float", name="cbt_mamm_behav_loec")
self.cbt_mamm_sensory_noec = pd.Series([], dtype="float", name="cbt_mamm_sensory_noec")
self.cbt_mamm_sensory_loec = pd.Series([], dtype="float", name="cbt_mamm_sensory_loec")
self.cbt_mamm_sub_indirect = pd.Series([], dtype="float", name="cbt_mamm_sub_indirect")
# concentration-based toxicity (cbt) : birds (mg-pest/kg-diet food)
self.cbt_bird_1inmill_mort = pd.Series([], dtype="float", name="cbt_bird_1inmill_mort")
self.cbt_bird_1inten_mort = pd.Series([], dtype="float", name="cbt_bird_1inten_mort")
self.cbt_bird_low_lc50 = pd.Series([], dtype="float", name="cbt_bird_low_lc50")
self.cbt_bird_sub_direct = pd.Series([], dtype="float", name="cbt_bird_sub_direct")
self.cbt_bird_grow_noec = pd.Series([], dtype="float", name="cbt_bird_grow_noec")
self.cbt_bird_grow_loec = pd.Series([], dtype="float", name="cbt_bird_grow_loec")
self.cbt_bird_repro_noec = pd.Series([], dtype="float", name="cbt_bird_repro_noec")
self.cbt_bird_repro_loec = pd.Series([], dtype="float", name="cbt_bird_repro_loec")
self.cbt_bird_behav_noec = pd.Series([], dtype="float", name="cbt_bird_behav_noec")
self.cbt_bird_behav_loec = pd.Series([], dtype="float", name="cbt_bird_behav_loec")
self.cbt_bird_sensory_noec = pd.Series([], dtype="float", name="cbt_bird_sensory_noec")
self.cbt_bird_sensory_loec = pd.Series([], dtype="float", name="cbt_bird_sensory_loec")
self.cbt_bird_sub_indirect = pd.Series([], dtype="float", name="cbt_bird_sub_indirect")
# concentration-based toxicity (cbt) : reptiles, terrestrial-phase amphibians (mg-pest/kg-diet food)
self.cbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="cbt_reptile_1inmill_mort")
self.cbt_reptile_1inten_mort = pd.Series([], dtype="float", name="cbt_reptile_1inten_mort")
self.cbt_reptile_low_lc50 = pd.Series([], dtype="float", name="cbt_reptile_low_lc50")
self.cbt_reptile_sub_direct = pd.Series([], dtype="float", name="cbt_reptile_sub_direct")
self.cbt_reptile_grow_noec = pd.Series([], dtype="float", name="cbt_reptile_grow_noec")
self.cbt_reptile_grow_loec = pd.Series([], dtype="float", name="cbt_reptile_grow_loec")
self.cbt_reptile_repro_noec = pd.Series([], dtype="float", name="cbt_reptile_repro_noec")
self.cbt_reptile_repro_loec = pd.Series([], dtype="float", name="cbt_reptile_repro_loec")
self.cbt_reptile_behav_noec = pd.Series([], dtype="float", name="cbt_reptile_behav_noec")
self.cbt_reptile_behav_loec = pd.Series([], dtype="float", name="cbt_reptile_behav_loec")
self.cbt_reptile_sensory_noec = pd.Series([], dtype="float", name="cbt_reptile_sensory_noec")
self.cbt_reptile_sensory_loec = pd.Series([], dtype="float", name="cbt_reptile_sensory_loec")
self.cbt_reptile_sub_indirect = pd.Series([], dtype="float", name="cbt_reptile_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates body weight (mg-pest/kg-bw(ww))
self.cbt_inv_bw_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_bw_1inmill_mort")
self.cbt_inv_bw_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_bw_1inten_mort")
self.cbt_inv_bw_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_bw_low_lc50")
self.cbt_inv_bw_sub_direct = pd.Series([], dtype="float", name="cbt_inv_bw_sub_direct")
self.cbt_inv_bw_grow_noec = pd.Series([], dtype="float", name="cbt_inv_bw_grow_noec")
self.cbt_inv_bw_grow_loec = pd.Series([], dtype="float", name="cbt_inv_bw_grow_loec")
self.cbt_inv_bw_repro_noec = pd.Series([], dtype="float", name="cbt_inv_bw_repro_noec")
self.cbt_inv_bw_repro_loec = pd.Series([], dtype="float", name="cbt_inv_bw_repro_loec")
self.cbt_inv_bw_behav_noec = pd.Series([], dtype="float", name="cbt_inv_bw_behav_noec")
self.cbt_inv_bw_behav_loec = pd.Series([], dtype="float", name="cbt_inv_bw_behav_loec")
self.cbt_inv_bw_sensory_noec = pd.Series([], dtype="float", name="cbt_inv_bw_sensory_noec")
self.cbt_inv_bw_sensory_loec = pd.Series([], dtype="float", name="cbt_inv_bw_sensory_loec")
self.cbt_inv_bw_sub_indirect = pd.Series([], dtype="float", name="cbt_inv_bw_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates body diet (mg-pest/kg-food(ww))
self.cbt_inv_food_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_food_1inmill_mort")
self.cbt_inv_food_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_food_1inten_mort")
self.cbt_inv_food_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_food_low_lc50")
self.cbt_inv_food_sub_direct = pd.Series([], dtype="float", name="cbt_inv_food_sub_direct")
self.cbt_inv_food_grow_noec = pd.Series([], dtype="float", name="cbt_inv_food_grow_noec")
self.cbt_inv_food_grow_loec = pd.Series([], dtype="float", name="cbt_inv_food_grow_loec")
self.cbt_inv_food_repro_noec = pd.Series([], dtype="float", name="cbt_inv_food_repro_noec")
self.cbt_inv_food_repro_loec = pd.Series([], dtype="float", name="cbt_inv_food_repro_loec")
self.cbt_inv_food_behav_noec = pd.Series([], dtype="float", name="cbt_inv_food_behav_noec")
self.cbt_inv_food_behav_loec = pd.Series([], dtype="float", name="cbt_inv_food_behav_loec")
self.cbt_inv_food_sensory_noec = pd.Series([], dtype="float", name="cbt_inv_food_sensory_noec")
self.cbt_inv_food_sensory_loec = pd.Series([], dtype="float", name="cbt_inv_food_sensory_loec")
self.cbt_inv_food_sub_indirect = pd.Series([], dtype="float", name="cbt_inv_food_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates soil (mg-pest/kg-soil(dw))
self.cbt_inv_soil_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_soil_1inmill_mort")
self.cbt_inv_soil_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_soil_1inten_mort")
self.cbt_inv_soil_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_soil_low_lc50")
self.cbt_inv_soil_sub_direct = pd.Series([], dtype="float", name="cbt_inv_soil_sub_direct")
self.cbt_inv_soil_grow_noec = pd.Series([], dtype="float", name="cbt_inv_soil_grow_noec")
self.cbt_inv_soil_grow_loec = pd.Series([], dtype="float", name="cbt_inv_soil_grow_loec")
self.cbt_inv_soil_repro_noec =
|
pd.Series([], dtype="float", name="cbt_inv_soil_repro_noec")
|
pandas.Series
|
import numpy as np
import pandas as pd
from surprise import Reader, Dataset, SVD
class CollabRecSys:
def __init__(self, data):
self.df =
|
pd.read_csv(data)
|
pandas.read_csv
|
"""
Use the ``MNLDiscreteChoiceModel`` class to train a choice module using
multinomial logit and make subsequent choice predictions.
"""
from __future__ import print_function, division
import abc
import logging
import numpy as np
import pandas as pd
from patsy import dmatrix
from prettytable import PrettyTable
from zbox import toolz as tz
from . import util
from ..exceptions import ModelEvaluationError
from ..urbanchoice import interaction, mnl
from ..utils import yamlio
from ..utils.logutil import log_start_finish
from urbansim_defaults.randomfile import fixedrandomseed,seednum
logger = logging.getLogger(__name__)
def unit_choice(chooser_ids, alternative_ids, probabilities):
"""
Have a set of choosers choose from among alternatives according
to a probability distribution. Choice is binary: each
alternative can only be chosen once.
Parameters
----------
chooser_ids : 1d array_like
Array of IDs of the agents that are making choices.
alternative_ids : 1d array_like
Array of IDs of alternatives among which agents are making choices.
probabilities : 1d array_like
The probability that an agent will choose an alternative.
Must be the same shape as `alternative_ids`. Unavailable
alternatives should have a probability of 0.
Returns
-------
choices : pandas.Series
Mapping of chooser ID to alternative ID. Some choosers
will map to a nan value when there are not enough alternatives
for all the choosers.
"""
chooser_ids = np.asanyarray(chooser_ids)
alternative_ids = np.asanyarray(alternative_ids)
probabilities = np.asanyarray(probabilities)
logger.debug(
'start: unit choice with {} choosers and {} alternatives'.format(
len(chooser_ids), len(alternative_ids)))
choices = pd.Series(index=chooser_ids)
if probabilities.sum() == 0:
# return all nan if there are no available units
return choices
# probabilities need to sum to 1 for np.random.choice
probabilities = probabilities / probabilities.sum()
# need to see if there are as many available alternatives as choosers
n_available = np.count_nonzero(probabilities)
n_choosers = len(chooser_ids)
n_to_choose = n_choosers if n_choosers < n_available else n_available
if fixedrandomseed==0: np.random.seed(seednum)
chosen = np.random.choice(
alternative_ids, size=n_to_choose, replace=False, p=probabilities)
# if there are fewer available units than choosers we need to pick
# which choosers get a unit
if n_to_choose == n_available:
if fixedrandomseed==0: np.random.seed(seednum)
chooser_ids = np.random.choice(
chooser_ids, size=n_to_choose, replace=False)
choices[chooser_ids] = chosen
logger.debug('finish: unit choice')
return choices
# define the minimum interface a class must have in order to
# look like we expect DCMs to look
class DiscreteChoiceModel(object):
"""
Abstract base class for discrete choice models.
"""
__metaclass__ = abc.ABCMeta
@staticmethod
def _check_prob_choice_mode_compat(probability_mode, choice_mode):
"""
Check that the probability and choice modes are compatibly with
each other. Currently 'single_chooser' must be paired with
'aggregate' and 'full_product' must be paired with 'individual'.
"""
if (probability_mode == 'full_product' and
choice_mode == 'aggregate'):
raise ValueError(
"'full_product' probability mode is not compatible with "
"'aggregate' choice mode")
if (probability_mode == 'single_chooser' and
choice_mode == 'individual'):
raise ValueError(
"'single_chooser' probability mode is not compatible with "
"'individual' choice mode")
@staticmethod
def _check_prob_mode_interaction_compat(
probability_mode, interaction_predict_filters):
"""
The 'full_product' probability mode is currently incompatible with
post-interaction prediction filters, so make sure we don't have
both of those.
"""
if (interaction_predict_filters is not None and
probability_mode == 'full_product'):
raise ValueError(
"interaction filters may not be used in "
"'full_product' mode")
@abc.abstractmethod
def apply_fit_filters(self, choosers, alternatives):
choosers = util.apply_filter_query(choosers, self.choosers_fit_filters)
alternatives = util.apply_filter_query(
alternatives, self.alts_fit_filters)
return choosers, alternatives
@abc.abstractmethod
def apply_predict_filters(self, choosers, alternatives):
choosers = util.apply_filter_query(
choosers, self.choosers_predict_filters)
alternatives = util.apply_filter_query(
alternatives, self.alts_predict_filters)
return choosers, alternatives
@abc.abstractproperty
def fitted(self):
pass
@abc.abstractmethod
def probabilities(self):
pass
@abc.abstractmethod
def summed_probabilities(self):
pass
@abc.abstractmethod
def fit(self):
pass
@abc.abstractmethod
def predict(self):
pass
@abc.abstractmethod
def choosers_columns_used(self):
pass
@abc.abstractmethod
def alts_columns_used(self):
pass
@abc.abstractmethod
def interaction_columns_used(self):
pass
@abc.abstractmethod
def columns_used(self):
pass
class MNLDiscreteChoiceModel(DiscreteChoiceModel):
"""
A discrete choice model with the ability to store an estimated
model and predict new data based on the model.
Based on multinomial logit.
Parameters
----------
model_expression : str, iterable, or dict
A patsy model expression. Should contain only a right-hand side.
sample_size : int
Number of choices to sample for estimating the model.
probability_mode : str, optional
Specify the method to use for calculating probabilities
during prediction.
Available string options are 'single_chooser' and 'full_product'.
In "single chooser" mode one agent is chosen for calculating
probabilities across all alternatives. In "full product" mode
probabilities are calculated for every chooser across all alternatives.
Currently "single chooser" mode must be used with a `choice_mode`
of 'aggregate' and "full product" mode must be used with a
`choice_mode` of 'individual'.
choice_mode : str, optional
Specify the method to use for making choices among alternatives.
Available string options are 'individual' and 'aggregate'.
In "individual" mode choices will be made separately for each chooser.
In "aggregate" mode choices are made for all choosers at once.
Aggregate mode implies that an alternative chosen by one agent
is unavailable to other agents and that the same probabilities
can be used for all choosers.
Currently "individual" mode must be used with a `probability_mode`
of 'full_product' and "aggregate" mode must be used with a
`probability_mode` of 'single_chooser'.
choosers_fit_filters : list of str, optional
Filters applied to choosers table before fitting the model.
choosers_predict_filters : list of str, optional
Filters applied to the choosers table before calculating
new data points.
alts_fit_filters : list of str, optional
Filters applied to the alternatives table before fitting the model.
alts_predict_filters : list of str, optional
Filters applied to the alternatives table before calculating
new data points.
interaction_predict_filters : list of str, optional
Filters applied to the merged choosers/alternatives table
before predicting agent choices.
estimation_sample_size : int, optional
Whether to sample choosers during estimation
(needs to be applied after choosers_fit_filters).
prediction_sample_size : int, optional
Whether (and how much) to sample alternatives during prediction.
Note that this can lead to multiple choosers picking the same
alternative.
choice_column : optional
Name of the column in the `alternatives` table that choosers
should choose. e.g. the 'building_id' column. If not provided
the alternatives index is used.
name : optional
Optional descriptive name for this model that may be used
in output.
"""
def __init__(
self, model_expression, sample_size,
probability_mode='full_product', choice_mode='individual',
choosers_fit_filters=None, choosers_predict_filters=None,
alts_fit_filters=None, alts_predict_filters=None,
interaction_predict_filters=None,
estimation_sample_size=None,
prediction_sample_size=None,
choice_column=None, name=None):
self._check_prob_choice_mode_compat(probability_mode, choice_mode)
self._check_prob_mode_interaction_compat(
probability_mode, interaction_predict_filters)
self.model_expression = model_expression
self.sample_size = sample_size
self.probability_mode = probability_mode
self.choice_mode = choice_mode
self.choosers_fit_filters = choosers_fit_filters
self.choosers_predict_filters = choosers_predict_filters
self.alts_fit_filters = alts_fit_filters
self.alts_predict_filters = alts_predict_filters
self.interaction_predict_filters = interaction_predict_filters
self.estimation_sample_size = estimation_sample_size
self.prediction_sample_size = prediction_sample_size
self.choice_column = choice_column
self.name = name if name is not None else 'MNLDiscreteChoiceModel'
self.sim_pdf = None
self.log_likelihoods = None
self.fit_parameters = None
@classmethod
def from_yaml(cls, yaml_str=None, str_or_buffer=None):
"""
Create a DiscreteChoiceModel instance from a saved YAML configuration.
Arguments are mutally exclusive.
Parameters
----------
yaml_str : str, optional
A YAML string from which to load model.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
MNLDiscreteChoiceModel
"""
cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer)
model = cls(
cfg['model_expression'],
cfg['sample_size'],
probability_mode=cfg.get('probability_mode', 'full_product'),
choice_mode=cfg.get('choice_mode', 'individual'),
choosers_fit_filters=cfg.get('choosers_fit_filters', None),
choosers_predict_filters=cfg.get('choosers_predict_filters', None),
alts_fit_filters=cfg.get('alts_fit_filters', None),
alts_predict_filters=cfg.get('alts_predict_filters', None),
interaction_predict_filters=cfg.get(
'interaction_predict_filters', None),
estimation_sample_size=cfg.get('estimation_sample_size', None),
prediction_sample_size=cfg.get('prediction_sample_size', None),
choice_column=cfg.get('choice_column', None),
name=cfg.get('name', None)
)
if cfg.get('log_likelihoods', None):
model.log_likelihoods = cfg['log_likelihoods']
if cfg.get('fit_parameters', None):
model.fit_parameters = pd.DataFrame(cfg['fit_parameters'])
logger.debug('loaded LCM model {} from YAML'.format(model.name))
return model
@property
def str_model_expression(self):
"""
Model expression as a string suitable for use with patsy/statsmodels.
"""
return util.str_model_expression(
self.model_expression, add_constant=False)
def apply_fit_filters(self, choosers, alternatives):
"""
Filter `choosers` and `alternatives` for fitting.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
Returns
-------
filtered_choosers, filtered_alts : pandas.DataFrame
"""
return super(MNLDiscreteChoiceModel, self).apply_fit_filters(
choosers, alternatives)
def apply_predict_filters(self, choosers, alternatives):
"""
Filter `choosers` and `alternatives` for prediction.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
Returns
-------
filtered_choosers, filtered_alts : pandas.DataFrame
"""
return super(MNLDiscreteChoiceModel, self).apply_predict_filters(
choosers, alternatives)
def fit(self, choosers, alternatives, current_choice):
"""
Fit and save model parameters based on given data.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
current_choice : pandas.Series or any
A Series describing the `alternatives` currently chosen
by the `choosers`. Should have an index matching `choosers`
and values matching the index of `alternatives`.
If a non-Series is given it should be a column in `choosers`.
Returns
-------
log_likelihoods : dict
Dict of log-liklihood values describing the quality of the
model fit. Will have keys 'null', 'convergence', and 'ratio'.
"""
logger.debug('start: fit LCM model {}'.format(self.name))
if not isinstance(current_choice, pd.Series):
current_choice = choosers[current_choice]
choosers, alternatives = self.apply_fit_filters(choosers, alternatives)
if self.estimation_sample_size:
if fixedrandomseed==0: np.random.seed(seednum)
choosers = choosers.loc[np.random.choice(
choosers.index,
min(self.estimation_sample_size, len(choosers)),
replace=False)]
current_choice = current_choice.loc[choosers.index]
_, merged, chosen = interaction.mnl_interaction_dataset(
choosers, alternatives, self.sample_size, current_choice)
model_design = dmatrix(
self.str_model_expression, data=merged, return_type='dataframe')
if len(merged) != model_design.values.shape[0]:
raise ModelEvaluationError(
'Estimated data does not have the same length as input. '
'This suggests there are null values in one or more of '
'the input columns.')
self.log_likelihoods, self.fit_parameters = mnl.mnl_estimate(
model_design.values, chosen, self.sample_size)
self.fit_parameters.index = model_design.columns
logger.debug('finish: fit LCM model {}'.format(self.name))
return self.log_likelihoods
@property
def fitted(self):
"""
True if model is ready for prediction.
"""
return self.fit_parameters is not None
def assert_fitted(self):
"""
Raises `RuntimeError` if the model is not ready for prediction.
"""
if not self.fitted:
raise RuntimeError('Model has not been fit.')
def report_fit(self):
"""
Print a report of the fit results.
"""
if not self.fitted:
print('Model not yet fit.')
return
print('Null Log-liklihood: {0:.3f}'.format(
self.log_likelihoods['null']))
print('Log-liklihood at convergence: {0:.3f}'.format(
self.log_likelihoods['convergence']))
print('Log-liklihood Ratio: {0:.3f}\n'.format(
self.log_likelihoods['ratio']))
tbl = PrettyTable(
['Component', ])
tbl = PrettyTable()
tbl.add_column('Component', self.fit_parameters.index.values)
for col in ('Coefficient', 'Std. Error', 'T-Score'):
tbl.add_column(col, self.fit_parameters[col].values)
tbl.align['Component'] = 'l'
tbl.float_format = '.3'
print(tbl)
def probabilities(self, choosers, alternatives, filter_tables=True):
"""
Returns the probabilities for a set of choosers to choose
from among a set of alternatives.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
filter_tables : bool, optional
If True, filter `choosers` and `alternatives` with prediction
filters before calculating probabilities.
Returns
-------
probabilities : pandas.Series
Probability of selection associated with each chooser
and alternative. Index will be a MultiIndex with alternative
IDs in the inner index and chooser IDs in the out index.
"""
logger.debug('start: calculate probabilities for LCM model {}'.format(
self.name))
self.assert_fitted()
if filter_tables:
choosers, alternatives = self.apply_predict_filters(
choosers, alternatives)
if self.prediction_sample_size is not None:
sample_size = self.prediction_sample_size
else:
sample_size = len(alternatives)
if self.probability_mode == 'single_chooser':
_, merged, _ = interaction.mnl_interaction_dataset(
choosers.head(1), alternatives, sample_size)
elif self.probability_mode == 'full_product':
_, merged, _ = interaction.mnl_interaction_dataset(
choosers, alternatives, sample_size)
else:
raise ValueError(
'Unrecognized probability_mode option: {}'.format(
self.probability_mode))
merged = util.apply_filter_query(
merged, self.interaction_predict_filters)
model_design = dmatrix(
self.str_model_expression, data=merged, return_type='dataframe')
if len(merged) != model_design.values.shape[0]:
raise ModelEvaluationError(
'Simulated data does not have the same length as input. '
'This suggests there are null values in one or more of '
'the input columns.')
# get the order of the coefficients in the same order as the
# columns in the design matrix
coeffs = [self.fit_parameters['Coefficient'][x]
for x in model_design.columns]
# probabilities are returned from mnl_simulate as a 2d array
# with choosers along rows and alternatives along columns
if self.probability_mode == 'single_chooser':
numalts = len(merged)
else:
numalts = sample_size
probabilities = mnl.mnl_simulate(
model_design.values,
coeffs,
numalts=numalts, returnprobs=True)
# want to turn probabilities into a Series with a MultiIndex
# of chooser IDs and alternative IDs.
# indexing by chooser ID will get you the probabilities
# across alternatives for that chooser
mi = pd.MultiIndex.from_arrays(
[merged['join_index'].values, merged.index.values],
names=('chooser_id', 'alternative_id'))
probabilities = pd.Series(probabilities.flatten(), index=mi)
logger.debug('finish: calculate probabilities for LCM model {}'.format(
self.name))
return probabilities
def summed_probabilities(self, choosers, alternatives):
"""
Calculate total probability associated with each alternative.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
Returns
-------
probs : pandas.Series
Total probability associated with each alternative.
"""
def normalize(s):
return s / s.sum()
choosers, alternatives = self.apply_predict_filters(
choosers, alternatives)
probs = self.probabilities(choosers, alternatives, filter_tables=False)
# groupby the the alternatives ID and sum
if self.probability_mode == 'single_chooser':
return (
normalize(probs) * len(choosers)
).reset_index(level=0, drop=True)
elif self.probability_mode == 'full_product':
return probs.groupby(level=0).apply(normalize)\
.groupby(level=1).sum()
else:
raise ValueError(
'Unrecognized probability_mode option: {}'.format(
self.probability_mode))
def predict(self, choosers, alternatives, debug=False):
"""
Choose from among alternatives for a group of agents.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
debug : bool
If debug is set to true, will set the variable "sim_pdf" on
the object to store the probabilities for mapping of the
outcome.
Returns
-------
choices : pandas.Series
Mapping of chooser ID to alternative ID. Some choosers
will map to a nan value when there are not enough alternatives
for all the choosers.
"""
self.assert_fitted()
logger.debug('start: predict LCM model {}'.format(self.name))
choosers, alternatives = self.apply_predict_filters(
choosers, alternatives)
if len(choosers) == 0:
return pd.Series()
if len(alternatives) == 0:
return
|
pd.Series(index=choosers.index)
|
pandas.Series
|
import pytest
import pandas as pd
from pandas.testing import assert_series_equal
from yeast import Recipe
from yeast.steps import JoinStep, SortStep, RenameColumnsStep
from yeast.errors import YeastValidationError
from tests.data_samples import startrek_starships
from tests.data_samples import startrek_starships_specs
def test_join_on_left_step(startrek_starships, startrek_starships_specs):
"""
Left Join with NA mismmatches
"""
recipe = Recipe([
JoinStep(startrek_starships_specs, by="uid", how="left"),
SortStep('uid')
])
baked_data = recipe.prepare(startrek_starships).bake(startrek_starships)
assert baked_data.shape == (5, 3)
row = pd.Series({'uid': 'NCC-1031', 'name': 'USS Discovery', 'warp': 9.9}, name=0)
assert_series_equal(baked_data.loc[0], row)
row = pd.Series({'uid': 'NX-01', 'name': 'Enterprise', 'warp': None}, name=4)
assert_series_equal(baked_data.loc[4], row)
def test_join_on_inner_step(startrek_starships, startrek_starships_specs):
"""
Inner Join with NA mismmatches
"""
recipe = Recipe([
JoinStep(startrek_starships_specs, by="uid", how="inner"),
SortStep('uid')
])
baked_data = recipe.prepare(startrek_starships).bake(startrek_starships)
assert baked_data.shape == (4, 3)
row = pd.Series({'uid': 'NCC-1031', 'name': 'USS Discovery', 'warp': 9.9}, name=0)
assert_series_equal(baked_data.loc[0], row)
row = pd.Series({'uid': 'NCC-74656', 'name': 'USS Voyager', 'warp': 9.975}, name=3)
assert_series_equal(baked_data.loc[3], row)
def test_join_on_right_step(startrek_starships, startrek_starships_specs):
"""
Right Join with NA mismmatches
"""
recipe = Recipe([
JoinStep(startrek_starships_specs, by="uid", how="right"),
SortStep('uid')
])
baked_data = recipe.prepare(startrek_starships).bake(startrek_starships)
assert baked_data.shape == (4, 3)
row = pd.Series({'uid': 'NCC-1031', 'name': 'USS Discovery', 'warp': 9.9}, name=0)
assert_series_equal(baked_data.loc[0], row)
row = pd.Series({'uid': 'NCC-74656', 'name': 'USS Voyager', 'warp': 9.975}, name=3)
assert_series_equal(baked_data.loc[3], row)
def test_join_on_fullouter_step(startrek_starships, startrek_starships_specs):
"""
Full outer Join with NA mismmatches
"""
recipe = Recipe([
JoinStep(startrek_starships_specs, by="uid", how="full"),
SortStep('uid')
])
baked_data = recipe.prepare(startrek_starships).bake(startrek_starships)
assert baked_data.shape == (5, 3)
row = pd.Series({'uid': 'NCC-1031', 'name': 'USS Discovery', 'warp': 9.9}, name=0)
assert_series_equal(baked_data.loc[0], row)
row =
|
pd.Series({'uid': 'NX-01', 'name': 'Enterprise', 'warp': None}, name=4)
|
pandas.Series
|
#-*- coding: utf-8 -*-
#!usr/bin/env python
"""
SPACE GROUP a-CNN
filneame: space_group_a_CNN.py version: 1.0
dependencies:
autoXRD version 1.0
autoXRD_vis version 0.2
Code to perform classification of XRD patterns for various spcae-group using
physics-informed data augmentation and all convolutional neural network (a-CNN).
Code to plot class activation maps from a-CNN and global average pooling layer
@authors: <NAME> and <NAME>
MIT Photovoltaics Laboratory / Singapore and MIT Alliance for Research and Tehcnology
All code is under Apache 2.0 license, please cite any use of the code as explained
in the README.rst file, in the GitHub repository.
"""
#################################################################
#Libraries and dependencies
#################################################################
# Loads series of functions for preprocessing and data augmentation
from autoXRD import *
# Loads CAMs visualizations for a-CNN
from autoXRD_vis import *
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn import metrics
from sklearn.model_selection import KFold
# Neural networks uses Keran with TF background
import keras as K
from keras.models import Model
from keras.models import Sequential
from keras.callbacks import ReduceLROnPlateau
from keras.callbacks import EarlyStopping
import tensorflow as tf
# Clear Keras and TF session, if run previously
K.backend.clear_session()
tf.reset_default_graph()
# Training Parameters
BATCH_SIZE=128
# Network Parameters
n_input = 1200 # Total angles in XRD pattern
n_classes = 7 # Number of space-group classes
filter_size = 2
kernel_size = 10
################################################################
# Load data and preprocess
################################################################
# Load simulated and anonimized dataset
import os
dirname = os.path.dirname(__file__)
theor = pd.read_csv(os.path.join(dirname, 'Datasets/theor.csv'), index_col=0)
theor = theor.iloc[1:,]
theor_arr=theor.values
# Normalize data for training
ntheor = normdata(theor_arr)
# Load labels for simulated data
label_theo = pd.read_csv(os.path.join(dirname, 'Datasets/label_theo.csv'), header=None, index_col=0)
label_theo = label_theo[1].tolist()
# Load experimental data as dataframe
exp_arr_new = pd.read_csv(os.path.join(dirname, 'Datasets/exp.csv'), index_col=0)
exp_arr_new = exp_arr_new.values
# Load experimental class labels
label_exp= pd.read_csv(os.path.join(dirname, 'Datasets/label_exp.csv'), index_col=0).values
label_exp = label_exp.reshape([len(label_exp),])
# Load class enconding
space_group_enc = pd.read_csv(os.path.join(dirname, 'Datasets/encoding.csv'), index_col=0)
space_group_enc = list(space_group_enc['0'])
# Normalize experimental data
nexp = normdata(exp_arr_new)
# Define spectral range for data augmentation
exp_min = 0
exp_max = 1200
theor_min = 125
#window size for experimental data extraction
window = 20
theor_max = theor_min+exp_max-exp_min
# Preprocess experimental data
post_exp = normdatasingle(exp_data_processing (nexp, exp_min, exp_max, window))
################################################################
# Perform data augmentation
################################################################
# Specify how many data points we augmented
th_num = 2000
# Augment data, this may take a bit
augd,pard,crop_augd = augdata(ntheor, th_num, label_theo, theor_min, theor_max)
# Enconde theoretical labels
label_t=np.zeros([len(pard),])
for i in range(len(pard)):
label_t[i]=space_group_enc.index(pard[i])
# Input the num of experimetal data points
exp_num =88
# Prepare experimental arrays for training and testing
X_exp = np.transpose(post_exp[:,0:exp_num])
y_exp = label_exp[0:exp_num]
# Prepare simulated arrays for training and testing
X_th = np.transpose(crop_augd )
y_th = label_t
################################################################
# Perform training and cross-validation
################################################################
fold = 5 # Number of k-folds
k_fold = KFold(n_splits=fold, shuffle=True, random_state=3)
# Create arrays to populate metrics
accuracy_exp = np.empty((fold,1))
accuracy_exp_b = np.empty((fold,1))
accuracy_exp_r1 = np.empty((fold,1))
accuracy_exp_p1 = np.empty((fold,1))
accuracy_exp_r2 = np.empty((fold,1))
accuracy_exp_p2 = np.empty((fold,1))
f1=np.empty((fold,1))
f1_m=np.empty((fold,1))
# Create auxiliary arrays
accuracy=[]
logs=[]
ground_truth=[]
predictions_ord=[]
trains=[]
tests=[]
trains_combine=[]
trains_y=[]
# Run cross validation and define a-CNN each time in loop
for k, (train, test) in enumerate(k_fold.split(X_exp, y_exp)):
#Save splits for later use
trains.append(train)
tests.append(test)
#Data augmentation of experimental traning dataset, we
# already removed the experimental training dataset
temp_x = X_exp[train]
temp_y = y_exp[train]
exp_train_x,exp_train_y = exp_augdata(temp_x.T,5000,temp_y)
# Combine theoretical and experimenal dataset for training
train_combine = np.concatenate((X_th,exp_train_x.T))
trains_combine.append(train_combine)
# Clear weights and networks state
K.backend.clear_session()
# Network Parameters
BATCH_SIZE=128
n_input = 1200 # MNIST data input (img shape: 28*28)
n_classes = 7 # MNIST total classes (0-9 digits)
filter_size = 2
kernel_size = 10
enc = OneHotEncoder(sparse=False)
train_dim = train_combine.reshape(train_combine.shape[0],1200,1)
train_y = np.concatenate((y_th,exp_train_y))
trains_y.append(train_y)
train_y_hot = enc.fit_transform(train_y.reshape(-1,1))
# Define network structure
model = Sequential()
model.add(K.layers.Conv1D(32, 8,strides=8, padding='same',input_shape=(1200,1), activation='relu'))
model.add(K.layers.Conv1D(32, 5,strides=5, padding='same', activation='relu'))
model.add(K.layers.Conv1D(32, 3,strides=3, padding='same', activation='relu'))
model.add(K.layers.pooling.GlobalAveragePooling1D())
model.add(K.layers.Dense(n_classes, activation='softmax'))
#Define optimizer
optimizer = K.optimizers.Adam()
# Compile model
model.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['categorical_accuracy'])
# Choose early stop
#early_stop = EarlyStopping(monitor='val_loss', patience=50, verbose=1, restore_best_weights=True)
# Reduce learning rate during optimization
# reduce_lr = ReduceLROnPlateau(monitor = 'loss', factor=0.5,
# patience=50, min_lr=0.00001)
# Define test data
test_x = X_exp[test]
test_x = test_x.reshape(test_x.shape[0],1200,1)
test_y = enc.fit_transform(y_exp.reshape(-1,1))[test]
# Fit model
hist = model.fit(train_dim, train_y_hot, batch_size=BATCH_SIZE, nb_epoch=100,
verbose=1, validation_data=(test_x, test_y))
# hist = model.fit(train_dim, train_y_hot, batch_size=BATCH_SIZE, nb_epoch=100,
# verbose=1, validation_data=(test_x, test_y), callbacks = [early_stop])
#
#Compute model predictions
prediction=model.predict(test_x)
#Go from one-hot to ordinal...
prediction_ord=[np.argmax(element) for element in prediction]
predictions_ord.append(prediction_ord)
# Compute accuracy, recall, precision and F1 with macro and micro averaging
accuracy_exp[k] = metrics.accuracy_score(y_exp[test], prediction_ord)
accuracy_exp_r1[k] = metrics.recall_score(y_exp[test], prediction_ord, average='macro')
accuracy_exp_r2[k] = metrics.recall_score(y_exp[test], prediction_ord, average='micro')
accuracy_exp_p1[k] = metrics.precision_score(y_exp[test], prediction_ord, average='macro')
accuracy_exp_p2[k] = metrics.precision_score(y_exp[test], prediction_ord, average='micro')
f1[k]=metrics.f1_score(y_exp[test], prediction_ord, average='micro')
f1_m[k]=metrics.f1_score(y_exp[test], prediction_ord, average='macro')
#Produce ground_truth, each list element contains array with test elements on first column with respect to X_exp and
ground_truth.append(np.concatenate([test.reshape(len(test),1),y_exp[test].reshape(len(test),1)],axis=1))
#Compute loss and accuracy for each k validation
accuracy.append(model.evaluate(test_x, test_y, verbose=0))
#Save logs
log = pd.DataFrame(hist.history)
logs.append(log)
#Save models on current folder with names subscripts 0 to 4
model.save(os.path.join(dirname, 'keras_model')+str(k)+'.h5')
#
accuracy = np.array(accuracy)
# Plot final cross validation accuracy
print ('Mean Cross-val accuracy', np.mean(accuracy[:,1]))
################################################################
# Plotting Class Activation Maps
################################################################
# Compute correctly classified and incorrectly classified cases
corrects, incorrects = find_incorrects(ground_truth,predictions_ord)
# Get dataframe of all incorrects and dataframe of all corrects
corrects_df = pd.concat([r for r in corrects], ignore_index=False, axis=0)
incorrects_df =
|
pd.concat([r for r in incorrects], ignore_index=False, axis=0)
|
pandas.concat
|
#!/usr/bin/env python3.6
"""This module describes functions for analysis of the SNSS Dataset"""
import os
import pandas as pd
from sas7bdat import SAS7BDAT
import numpy as np
import subprocess
from datetime import datetime, date
from csv import DictReader
from shutil import rmtree
from json import load as jsonLoad
import functools
import itertools
from colour import Color
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.patches as patches
import seaborn as sb
import textwrap as txtwrp
import ast
import imageio as imgio
import tqdm
import pickle
import scipy.stats as stats
import statsmodels.stats.api as sms
import scipy.interpolate as interpolate
from statsmodels.stats.weightstats import CompareMeans, DescrStatsW
from statsmodels.discrete.discrete_model import Logit
from statsmodels.tools.tools import add_constant
from sklearn import preprocessing, decomposition, manifold
from sklearn.metrics import confusion_matrix, \
accuracy_score, roc_auc_score, roc_curve, \
classification_report, precision_score, recall_score, explained_variance_score, r2_score, f1_score
from scipy.stats import logistic
from scipy.optimize import curve_fit
import pydot
from tensorflow.keras.metrics import top_k_categorical_accuracy
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense, Dropout, AlphaDropout, LeakyReLU
from tensorflow.keras.utils import plot_model
from tensorflow.keras.callbacks import CSVLogger, TensorBoard, Callback, EarlyStopping, ModelCheckpoint
from tensorflow.keras.backend import clear_session
import tensorflow.compat as tfCompat
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, <NAME>"
__credits__ = ["<NAME>", "<NAME>", "<NAME>", "<NAME>"]
__license__ = "Apache-2.0"
__version__ = "0.1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL> <EMAIL>"
""" Short title
Description
Args:
arg1: arg1 Description
Returns:
output1: output1 description.
Raises:
excpeption1: excpetion circumstances.
"""
def loadJSON(fname):
# Load configuration
f = open(fname) # Open config file...
cfg = jsonLoad(f) # Load data...
f.close() # Close config file...
return cfg
def moduleInit():
pd.options.display.max_columns = None
pd.options.display.max_rows = 20
tfCompat.v1.disable_eager_execution()
def rmws(strList):
stripList = []
for s in strList:
stripList.append(s.replace(" ", ""))
return stripList
def timeAppend(varList, T):
timeVarList = []
for v in varList:
timeVarList.append(T + '_' + v)
return timeVarList
def autoscale(x):
return (x-np.min(x))/np.max(x)
def normalise(x):
return (x-np.mean(x))/np.std(x)
def import_SNSS(usr, pwd, local_file=0):
""" Mount UoE CMVM smb and import SNSS as dataframe.
Note you must have access permissions to specific share.
Keyword arguments:
usr = Edinburgh University matriculation number
pwd = <PASSWORD>
Location of data is specified in a JSON config file not included.
The SNSS dataset includes confidential patient information and must be
handled according to Caldicott principles.
"""
cfg = loadJSON("config.json")
if local_file:
print('Importing local data file...')
# Open and read SNSS data file
fp = '../../../../../Volumes/mount/SNSSFull.sas7bdat'
f = SAS7BDAT(fp)
rawDf = f.to_data_frame()
print('Dataframe loaded!')
else:
cmd = "mount_smbfs"
mountCmd = cmd+" //'"+cfg['dom']+";"+usr+":"+pwd+"'@"+cfg['shr']+" "+cfg['mnt']
uMountCmd = 'umount raw_data/mount/'
# Send smb mount command..
print('Mounting datashare...')
smbCall = subprocess.call(mountCmd, shell=True)
# Open and read SNSS data file
f = SAS7BDAT(cfg['fpath'])
print('Converting sas7bdat file to pd.dataframe...')
rawDf = f.to_data_frame()
print('Conversion completed! Closing file...')
f.close()
print('Attempting Unmount..')
try:
smbCall = subprocess.call(uMountCmd, shell=True)
print('dataShare Unmounted Successfully!')
except(OSError, EOFError):
print('Unmount failed...')
return rawDf
def SNSSNullity(raw):
""" Assess nullity of raw data import
Takes the raw imported dataset, ensures index integrity, assigns new binary
variables for follow up at each study timepoint and computes attrittion numbers
and ratios for each.
Args:
raw: Pandas DataFrame object from SAS7BDAT file.
Returns:
raw: The validated raw dataframe.
retentionTable: A pandas dataframe of counts for use in scripts if required.
Raises:
NONE
"""
# Assign nPatid as index variable.
raw = raw.set_index('nPatid', verify_integrity=True)
# Convert diagnostic nullity into binary variable in dataframe.
raw['All'] = raw.T0_PatID.notna()
raw['T1_HCData'] = raw.T1_HealthChange.notna()
raw['T2_HCData'] = raw.T2_HealthChange.notna()
raw['T1and2_HCData'] = (raw.T2_HealthChange.notna()) & (raw.T1_HealthChange.notna())
# Quantify diagnostic nullity and export
T = []
FULabels = ['T1_HCData', 'T2_HCData', 'T1and2_HCData']
for FU in FULabels:
T.append(raw.groupby(FU)['ExpGroups'].agg([('Total', 'count'),
('Label', lambda x:
tuple(np.unique(x[~np.isnan(x)],
return_counts=True)[0])),
('N(i)', lambda x:
tuple(np.unique(x[~np.isnan(x)],
return_counts=True)[1])),
('%', lambda x:
tuple((np.unique(x[~np.isnan(x)],
return_counts=True)[1]/sum(~np.isnan(x))*100).round(2)))]))
retentionTable = pd.concat(T, keys=FULabels, axis=0)
retentionTable.index = retentionTable.index.rename(['', 'FUDataAvailable'])
retentionTable.to_csv('output/0_SNSS_retention.tsv', sep='\t')
return raw, retentionTable
def SNSSCompoundVariables(df):
"""Produce variable compund measures e.g. SF12, HADS etc.
Adds the specified custom variables normally products or sums of other Variables
or binarisation etc to the provided dataframe. This function also undertakes
SIMD quintile mapping to patient postcodes.
Args:
df: Pandas dataframe.
Returns:
df: The dataframe with new variables added..
Raises:
KeyError, ValueError: If errors in postcode mapping.
"""
# Deactivate assignment warning which slows down SIMD processing.
pd.options.mode.chained_assignment = None
# Declare variable groups
varGroups = {'PHQ13': ['StomachPain', 'BackPain', 'Paininarmslegsjoints',
'Headaches', 'Chestpain', 'Dizziness',
'FaintingSpells', 'HeartPoundingorRacing', 'ShortnessofBreath',
'Constipation', 'NauseaorGas', 'Tired', 'Sleeping'],
'NeuroSymptoms': ['Lackofcoordination', 'MemorConcentration', 'LossofSensation',
'LossofVision', 'LossofHearing', 'Paralysisorweakness',
'DoubleorBlurredVision', 'DifficultySwallowing',
'DifficultySpeaking', 'SeizureorFit',
'AnxietyattackorPanicAttack', 'Littleinterestorpleasure',
'Feelingdownorhopeless', 'Nervesorfeelinganxious',
'Worryingalot'],
'IllnessWorry': ['Wworry', 'Wseriousworry', 'Wattention'],
'Satisfaction': ['Sat1', 'Sat2', 'Sat3', 'Sat4', 'Sat5', 'Sat6', 'Sat7', 'Sat8'],
'other': ['LossofHearing', 'Littleinterestorpleasure', 'Feelingdownorhopeless',
'Nervesorfeelinganxious', 'Worryingalot', 'AnxietyattackorPanicAttack']}
# Time specify certain groups into useful keysets.
T0IllnessWorryKeys = timeAppend(varGroups['IllnessWorry'], 'T0')
T0PHQ13Keys = timeAppend(varGroups['PHQ13'], 'T0')
T1PHQ13Keys = timeAppend(varGroups['PHQ13'], 'T1')
T2PHQ13Keys = timeAppend(varGroups['PHQ13'], 'T2')
T0PHQNeuro28Keys = timeAppend(varGroups['PHQ13'] + varGroups['NeuroSymptoms'], 'T0')
T1PHQNeuro28Keys = timeAppend(varGroups['PHQ13'] + varGroups['NeuroSymptoms'], 'T1')
T2PHQNeuro28Keys = timeAppend(varGroups['PHQ13'] + varGroups['NeuroSymptoms'], 'T2')
T0SatisfactionKeys = timeAppend(varGroups['Satisfaction'], 'T0')
T1SatisfactionKeys = timeAppend(varGroups['Satisfaction'], 'T1')
# Criteria Used for defining successful follow up as T1 any satisfaction data available..
# df['T1_Satisfaction_Bool'] = df['T1_Satisfaction_Total'].notna() # Strict
df['T1_Satisfaction_Bool'] = df[T1SatisfactionKeys].notna().any(axis=1) # Loose
# Add binarised ExpGroups.
df['ExpGroups_bin'] = (df['ExpGroups']-2)*-1
# Add binarised gender.
df['Gender_bin'] = df['Gender']-1
# Adding summative compound measures
df['T0_PHQNeuro28_Total'] = df[T0PHQNeuro28Keys].sum(axis=1, skipna=False)
df['T1_PHQNeuro28_Total'] = df[T1PHQNeuro28Keys].sum(axis=1, skipna=False)
df['T2_PHQNeuro28_Total'] = df[T2PHQNeuro28Keys].sum(axis=1, skipna=False)
df['T0_PHQ13_Total'] = df[T0PHQ13Keys].sum(axis=1, skipna=False)
df['T1_PHQ13_Total'] = df[T1PHQ13Keys].sum(axis=1, skipna=False)
df['T2_PHQ13_Total'] = df[T2PHQ13Keys].sum(axis=1, skipna=False)
df['T0_IllnessWorry'] = df[T0IllnessWorryKeys].sum(axis=1, skipna=False)
df['T0_Satisfaction_Total'] = df[T0SatisfactionKeys].sum(axis=1, skipna=False)
df['T1_Satisfaction_Total'] = df[T1SatisfactionKeys].sum(axis=1, skipna=False)
df['T2_Satisfaction_Total'] = df[T1SatisfactionKeys].sum(axis=1, skipna=False)
# Adding boolean compound measures
df['T0_NegExpectation'] = (df['T0_IPQ1'] > 3).astype(int) # Define "Negative Expectation"
df['T0_NegExpectation'].loc[df['T0_IPQ1'].isna()] = np.nan # Boolean operator treats NaN as 0 so replace with NaNs
df['T0_PsychAttribution'] = ((df['T0_C7'] > 3) | (df['T0_C8'] > 3)).astype(int)
df['T0_PsychAttribution'].loc[(df['T0_C7'].isna()) | (df['T0_C8'].isna())] = np.nan
df['T0_LackofPsychAttribution'] = (df['T0_PsychAttribution']-1)*-1
for S in ['T0_Sat1', 'T0_Sat2', 'T0_Sat3',
'T0_Sat4', 'T0_Sat5', 'T0_Sat6', 'T0_Sat7', 'T0_Sat8']:
satNAIdx = df[S].isna()
df[S + '_Poor_Bin'] = df[S] <= 2 # Binarise Satsifaction into Poor/Fair or not
df[S + '_Poor_Bin'].loc[satNAIdx] = np.nan
# Add binned measures
df['T0_PHQ13_Binned'] = pd.cut(df['T0_PHQ13_Total'], [0, 2.1, 5.1, 8.1, 13.1],
labels=['0-2', '3-5', '6-8', '9-13'],
right=True, include_lowest=True)
df['T0_PHQ13_BinInt'] = pd.cut(df['T0_PHQ13_Total'], [0, 2.1, 5.1, 8.1, 13.1],
labels=False,
right=True, include_lowest=True)
df['T0_PHQNeuro28_Binned'] = pd.cut(df['T0_PHQNeuro28_Total'], [0, 5.1, 8.1, 13.1, 27.1],
labels=['0-5', '6-8', '9-13', '14-27'],
right=True, include_lowest=True)
df['T0_PHQNeuro28_BinInt'] = pd.cut(df['T0_PHQNeuro28_Total'], [0, 5.1, 8.1, 13.1, 27.1],
labels=False,
right=True, include_lowest=True)
df['AgeBins'] = pd.cut(df['Age'], [0, 36, 46, 56, max(df['Age'])+0.1],
labels=['<=35', '36-45', '46-55', '>=56'],
right=True, include_lowest=True)
df['AgeBinInt'] = pd.cut(df['Age'], [0, 36, 46, 56, max(df['Age'])+0.1],
labels=False,
right=True, include_lowest=True)
df['T0_HADS_Binned'] = pd.cut(df['T0_HADS'], [0, 7.1, 14.1, 21.1, max(df['T0_HADS'])+0.1],
labels=['0-7', '8-14', '15-21', '>=22'],
right=True, include_lowest=True)
df['T0_HADS_BinInt'] = pd.cut(df['T0_HADS'], [0, 7.1, 14.1, 21.1, max(df['T0_HADS'])+0.1],
labels=False,
right=True, include_lowest=True)
df['T0_SF12_PF_Binned'] = pd.cut(df['T0_SF12_PF'], [-0.1, 24.9, 49.9, 74.9, 99.9, 100.1],
labels=['0', '25', '50', '75', '100'],
right=True, include_lowest=True)
df['T0_SF12_PF_BinInt'] = pd.cut(df['T0_SF12_PF'], [-0.1, 24.9, 49.9, 74.9, 99.9, 100.1],
labels=False,
right=True, include_lowest=True)
# Add binarised outcomes
poorOutcomeDict = {0: 1, 1: 1, 2: 1, 3: 0, 4: 0}
strictPoorOutcomeDict = {0: 1, 1: 1, 2: 0, 3: 0, 4: 0}
ternaryPoorOutcomeDict = {0: 2, 1: 2, 2: 1, 3: 0, 4: 0}
df['T1_poorCGI'] = df['T1_HealthChange'].replace(poorOutcomeDict)
df['T1_poorIPS'] = df['T1_SymptomsChange'].replace(poorOutcomeDict)
df['T2_poorCGI'] = df['T2_HealthChange'].replace(poorOutcomeDict)
df['T2_poorIPS'] = df['T2_SymptomsChange'].replace(poorOutcomeDict)
df['T2_strictPoorCGI'] = df['T2_HealthChange'].replace(strictPoorOutcomeDict)
df['T2_strictPoorIPS'] = df['T2_SymptomsChange'].replace(strictPoorOutcomeDict)
df['T2_ternaryCGI'] = df['T2_HealthChange'].replace(ternaryPoorOutcomeDict)
df['T2_ternaryIPS'] = df['T2_SymptomsChange'].replace(ternaryPoorOutcomeDict)
# Add relative secondary outcomes
df['T0T1_SF12_NormedMCS'] = df['T1_SF12_NormedMCS'] - df['T0_SF12_NormedMCS']
df['T1T2_SF12_NormedMCS'] = df['T2_SF12_NormedMCS'] - df['T1_SF12_NormedMCS']
df['T0T2_SF12_NormedMCS'] = df['T2_SF12_NormedMCS'] - df['T0_SF12_NormedMCS']
df['T0T2_SF12_binaryNormedMCS'] = (df['T0T2_SF12_NormedMCS'] < 0).astype(int)
df['T0T2_SF12_binaryNormedMCS'].loc[df['T0T2_SF12_NormedMCS'].isna()] = np.nan
df['T0T1_SF12_NormedPCS'] = df['T1_SF12_NormedPCS'] - df['T0_SF12_NormedPCS']
df['T1T2_SF12_NormedPCS'] = df['T2_SF12_NormedPCS'] - df['T1_SF12_NormedPCS']
df['T0T2_SF12_NormedPCS'] = df['T2_SF12_NormedPCS'] - df['T0_SF12_NormedPCS']
df['T0T2_SF12_binaryNormedPCS'] = (df['T0T2_SF12_NormedPCS'] < 0).astype(int)
df['T0T2_SF12_binaryNormedPCS'].loc[df['T0T2_SF12_NormedPCS'].isna()] = np.nan
df['T0T1_HADS'] = df['T1_HADS'] - df['T0_HADS']
df['T1T2_HADS'] = df['T2_HADS'] - df['T1_HADS']
df['T0T2_HADS'] = df['T2_HADS'] - df['T0_HADS']
df['T0T2_binaryHADS'] = (df['T0T2_HADS'] < 0).astype(int)
df['T0T2_binaryHADS'].loc[df['T0T2_HADS'].isna()] = np.nan
df['T0T1_PHQNeuro28_Total'] = df['T1_PHQNeuro28_Total'] - df['T0_PHQNeuro28_Total']
df['T1T2_PHQNeuro28_Total'] = df['T2_PHQNeuro28_Total'] - df['T1_PHQNeuro28_Total']
df['T0T2_PHQNeuro28_Total'] = df['T2_PHQNeuro28_Total'] - df['T0_PHQNeuro28_Total']
df['T0T2_binaryPHQNeuro28_Total'] = (df['T0T2_PHQNeuro28_Total'] < 0).astype(int)
df['T0T2_binaryPHQNeuro28_Total'].loc[df['T0T2_PHQNeuro28_Total'].isna()] = np.nan
print('SIMD 2004 to 2006 Postcode conversion...')
SIMD04 = pd.read_csv('raw_data/SIMDData/postcode_2006_2_simd2004.csv', index_col=0)
nullIdx = SIMD04['simd2004rank'].str.contains(' ')
domains = ['inc', 'emp', 'hlth', 'educ', 'access', 'house']
for d in domains:
SIMD04['simd2004_' + d + '_quintile'] = 5-pd.qcut(SIMD04['simd2004_' + d + '_rank']
[~nullIdx].astype(float), 5,
retbins=False, labels=False)
SIMDDict = dict(zip([str.replace(' ', '') for str in SIMD04.sort_index().index.values.tolist()],
SIMD04[['simd2004_sc_quintile',
'simd2004score',
'simd2004_inc_score',
'simd2004_emp_score',
'simd2004_hlth_score',
'simd2004_educ_score',
'simd2004_access_score',
'simd2004_house_score',
'simd2004_inc_quintile',
'simd2004_emp_quintile',
'simd2004_hlth_quintile',
'simd2004_educ_quintile',
'simd2004_access_quintile',
'simd2004_house_quintile']].values))
# Initialising variables as NaN arrays
df['T0_SIMD04'] = np.nan
df['T0_SIMD04_score'] = np.nan
for d in domains:
df['T0_SIMD04_' + d + '_score'] = np.nan
df['T0_SIMD04_' + d + '_quintile'] = np.nan
print('Constructed SIMD quintiles and Initialised Panda Variables')
print('Iterating through postcodes')
i = 0
for p in df['Postcode']:
if (p == '') | pd.isnull(p):
df['Postcode'].iloc[i] = np.nan
df['T0_SIMD04'].iloc[i] = np.nan
i = i + 1
# print('No Postcode Data')
else:
try:
p = p.replace(' ', '')
# print(p)
df['T0_SIMD04'].iloc[i] = int(SIMDDict[p][0])
df['T0_SIMD04_score'].iloc[i] = float(SIMDDict[p][1])
dd = 2
for d in domains:
df['T0_SIMD04_' + d + '_score'].iloc[i] = float(SIMDDict[p][dd])
df['T0_SIMD04_' + d + '_quintile'].iloc[i] = int(SIMDDict[p][dd+len(domains)])
dd += 1
except (KeyError, ValueError) as err:
# print('%s: Error!' % (p))
df['T0_SIMD04'].iloc[i] = np.nan
# print('No SIMD04 postcode map')
i = i + 1
# Add most deprived binarisation
df['T0_SIMD04_bin'] = df['T0_SIMD04'] >= 4
# Add interaction variables
df['Diagnosis*T0_IncapacityBenefitorDLA'] = df['Diagnosis']*df['T0_IncapacityBenefitorDLA']
df['ExpGroups*T0_IncapacityBenefitorDLA'] = df['ExpGroups']*df['T0_IncapacityBenefitorDLA']
df['ExpGroups_bin*T0_IncapacityBenefitorDLA'] = df['ExpGroups_bin']*df['T0_IncapacityBenefitorDLA']
df['ExpGroups_bin*T0_LackofPsychAttribution'] = df['ExpGroups_bin']*df['T0_LackofPsychAttribution']
df['ExpGroups_bin*T0_SIMD04_bin'] = df['ExpGroups_bin']*df['T0_SIMD04_bin']
df['ExpGroups_bin*T0_SF12_PF_BinInt'] = df['ExpGroups_bin']*df['T0_SF12_PF_BinInt']
df['ExpGroups_bin*T0_NegExpectation'] = df['ExpGroups_bin']*df['T0_NegExpectation']
df['ExpGroups_bin*Gender_bin'] = df['ExpGroups_bin']*df['Gender_bin']
print('Complete!')
return df
def cohen_d(x, y):
stats = {}
nx = len(x); meanx = np.mean(x); stdx = np.std(x, ddof=1); semx = stdx/np.sqrt(nx);
ny = len(y); meany = np.mean(y); stdy = np.std(y, ddof=1); semy = stdy/np.sqrt(ny);
meancix = [meanx+(1.96*i*semx) for i in [-1, 1]]
meanciy = [meany+(1.96*i*semy) for i in [-1, 1]]
dof = nx + ny - 2
d = (meanx - meany) / np.sqrt(((nx-1)*stdx ** 2 +
(ny-1)*stdy ** 2) / dof)
vard = (((nx+ny)/(nx*ny))+((d**2)/(2*(nx+ny-2))))*((nx+ny)/(nx+ny-2))
sed = np.sqrt(vard)
cid = [d+(1.96*i*sed) for i in [-1, 1]]
stats['d'] = d
stats['cid'] = cid
stats['mean'] = [meanx, meany]
stats['std'] = [stdx, stdy]
stats['sem'] = [semx, semy]
return d, stats
def cramersV(nrows, ncols, chisquared, correct_bias=True):
nobs = nrows*ncols
if correct_bias is True:
phi = 0
else:
phi = chisquared/nobs
V = np.sqrt((phi**2)/(min(nrows-1, ncols-1)))
return V, phi
def partitionData(df, partitionRatio=0.7):
""" Partition data into training and evaluation sets
Takes a dataframe and returns two arrays with the proportion to use for
training declared as the partition ratio and the other as evaluation of
(1-partitionRatio) size.
Args:
df: Pandas DataFrame to be partitioned.
partitionRatio: Ratio of the data to be used for training.
Returns:
trainIdx: The indices of data asssigned to training set.
evalIdx: The indices of data asssigned to eval set.
Raises:
NONE
"""
randIdx = np.linspace(0, df.shape[0]-1, df.shape[0]).astype(int)
np.random.shuffle(randIdx)
trainIdx = randIdx[0:round(df.shape[0]*partitionRatio)]
evalIdx = randIdx[round(df.shape[0]*(partitionRatio)):len(randIdx)]
return trainIdx, evalIdx
def FollowUpandBaselineComparison(df):
""" A group-wise and follow-up wise comparison of declared Vars
Takes a pandas dataframe and as per the declared variables of interest below,
compares between groups and between lost to follow up and retained.
Args:
df: Pandas DataFrame to be assessed.
Returns:
NONE: All relevant tables are exported to CSV in the function.
Raises:
NONE
"""
def sigTest(G, varList, vType, df):
sigDict = {}
if vType == 'cat':
for v in varList:
T = pd.crosstab(index=df[G], columns=df[v],
margins=False, normalize=False)
chi2Stat, chi2p, _, _ = stats.chi2_contingency(T, correction=True)
cats = np.unique(df[v].dropna())
if len(cats) == 2:
LOR = np.log((T.iloc[0,0]*T.iloc[1,1])/(T.iloc[1,0]*T.iloc[0,1]))
SE = np.sqrt((1/T.iloc[0,0])+(1/T.iloc[1,0])+(1/T.iloc[0,1])+(1/T.iloc[1,1]))
CI = [np.exp(LOR-1.96*SE), np.exp(LOR+1.96*SE)]
OR = np.exp(LOR)
else:
OR = np.nan
CI = np.nan
sigDict[v] = [chi2p, chi2Stat, OR, CI]
elif vType == 'cont':
for v in varList:
if G == 'ExpGroups':
Gi = [1, 2]
elif G == 'T2_HCData':
Gi = [0, 1]
elif G == 'T2_poorCGI':
Gi = [0, 1]
cm = CompareMeans.from_data(df[v][(df[G] == Gi[0]) & (df[v].notna())],
df[v][(df[G] == Gi[1]) & (df[v].notna())])
tStat, tp, _ = cm.ttest_ind()
cohend, cohenstat = cohen_d(cm.d1.data, cm.d2.data)
sigDict[v] = [tp, tStat, cohend, cohenstat['cid']]
sigT = pd.DataFrame.from_dict(sigDict, orient='index', columns=['p', 'stat', 'effect', 'effectCI'])
return sigT
def varTables(G, varList, vType, df):
if vType == 'cont':
T = df.groupby(G)[varList].agg([('N', 'count'), ('Mean', 'mean'),
('SD', 'std')])
elif vType == 'cat':
T = df.groupby(G)[varList].\
agg([('N', 'count'),
('i', lambda x:
tuple(np.unique(x[~np.isnan(x)],
return_counts=True)[0])),
('N(i)', lambda x:
tuple(np.unique(x[~np.isnan(x)],
return_counts=True)[1])),
('%', lambda x:
tuple(np.unique(x[~np.isnan(x)],
return_counts=True)[1]/sum(~np.isnan(x))))])
return T
contVars = ['Age', 'T0_PHQ13_Total', 'T0_PHQNeuro28_Total', 'T0_HADS', 'T0_IllnessWorry', 'T0_SF12_PF']
catVars = ['AgeBins', 'Gender', 'ExpGroups', 'T0_PHQ13_Binned', 'T0_SF12_PF', 'T0_HADS_Binned',
'T0_NegExpectation', 'T0_PsychAttribution', 'T0_IllnessWorry', 'T0_IncapacityBenefitorDLA', 'T0_SIMD04_bin',
'ExpGroups_bin*T0_IncapacityBenefitorDLA', 'ExpGroups_bin*T0_LackofPsychAttribution','T0_Inemployment']
groupVar = 'T2_HCData'
catT = varTables(G=groupVar, varList=catVars, vType='cat', df=df)
catStats = sigTest(G=groupVar, varList=catVars, vType='cat', df=df)
catT.transpose().to_csv('output/0_FollowUpCategoricalTable.tsv', sep='\t')
catStats.transpose().to_csv('output/0_FollowUpCategoricalStats.tsv', sep='\t')
contT = varTables(G=groupVar, varList=contVars, vType='cont', df=df)
contStats = sigTest(G=groupVar, varList=contVars, vType='cont', df=df)
contT.transpose().to_csv('output/0_FollowUpContinuousTable.tsv', sep='\t')
contStats.transpose().to_csv('output/0_FollowUpContinuousStats.tsv', sep='\t')
groupVar = 'ExpGroups'
catT = varTables(G=groupVar, varList=catVars, vType='cat', df=df[df.T2_HCData == 1])
catStats = sigTest(G=groupVar, varList=catVars, vType='cat', df=df[df.T2_HCData == 1])
catT.transpose().to_csv('output/0_BaselineCategoricalTable.tsv', sep='\t')
catStats.transpose().to_csv('output/0_BaselineCategoricalStats.tsv', sep='\t')
contT = varTables(G=groupVar, varList=contVars, vType='cont', df=df[df.T2_HCData == 1])
contStats = sigTest(G=groupVar, varList=contVars, vType='cont', df=df[df.T2_HCData == 1])
contT.transpose().to_csv('output/0_BaselineContinuousTable.tsv', sep='\t')
contStats.transpose().to_csv('output/0_BaselineContinuousStats.tsv', sep='\t')
groupVar = 'T2_poorCGI'
catT = varTables(G=groupVar, varList=catVars, vType='cat', df=df[df.T2_HCData == 1])
catStats = sigTest(G=groupVar, varList=catVars, vType='cat', df=df[df.T2_HCData == 1])
catT.transpose().to_csv('output/0_OutcomeCategoricalTable.tsv', sep='\t')
catStats.transpose().to_csv('output/0_OutcomeCategoricalStats.tsv', sep='\t')
contT = varTables(G=groupVar, varList=contVars, vType='cont', df=df[df.T2_HCData == 1])
contStats = sigTest(G=groupVar, varList=contVars, vType='cont', df=df[df.T2_HCData == 1])
contT.transpose().to_csv('output/0_OutcomeContinuousTable.tsv', sep='\t')
contStats.transpose().to_csv('output/0_OutcomeContinuousStats.tsv', sep='\t')
return
def SNSSPrimaryOutcomeMeasures(df):
""" Compare IPS and CGI outcomes between functional groups.
This function compares CGI and IPS both in raw and pooled form between
functional groups. Outputs tables of counts and proportions of reported outcomes.
Args:
df: Pandas DataFrame to be assessed.
Returns:
NONE: All relevant tables are exported to CSV in the function.
Raises:
NONE
"""
outcomes = [['T2_HealthChange', 'T2_SymptomsChange'], ['T2_poorCGI', 'T2_poorIPS']]
outcomeTag = ['', 'Pool']
i = 0
for O in outcomes:
PrimaryOutcomeGroupT = []
PrimaryOutcomeGroupT.append(pd.crosstab(index=df.ExpGroups, columns=df[O[0]],
margins=False, normalize=False,
dropna=True))
PrimaryOutcomeGroupT.append(pd.crosstab(index=df.ExpGroups, columns=df[O[0]],
margins=False, normalize='index',
dropna=True))
PrimaryOutcomeGroupT.append(pd.crosstab(index=df.ExpGroups, columns=df[O[1]],
margins=False, normalize=False,
dropna=True))
PrimaryOutcomeGroupT.append(pd.crosstab(index=df.ExpGroups, columns=df[O[1]],
margins=False, normalize='index',
dropna=True))
PrimaryOutcomeGroupTExport = pd.concat(PrimaryOutcomeGroupT,
keys=['CGI_N', 'CGI_%',
'IPS_N', 'IPS_%'],
axis=0)
if i:
CGIchi2stat, CGIchi2p, _, _ = stats.chi2_contingency(PrimaryOutcomeGroupT[0],
correction=True)
CGIfisherOR, CGIfisherp = stats.fisher_exact(PrimaryOutcomeGroupT[0])
IPSchi2stat, IPSchi2p, _, _ = stats.chi2_contingency(PrimaryOutcomeGroupT[2],
correction=True)
IPSfisherOR, IPSfisherp = stats.fisher_exact(PrimaryOutcomeGroupT[2])
PrimaryOutcomeGroupTExport['chi2p'] = [CGIchi2p]*4 + [IPSchi2p]*4
PrimaryOutcomeGroupTExport['fisher2p'] = [CGIfisherp]*4 + [IPSfisherp]*4
PrimaryOutcomeGroupTExport.to_csv('output/1_PrimaryOutcome' + outcomeTag[i] + 'byGroup.tsv',
sep='\t')
i = i+1
return
def multi_text(ax, x, y, s, txt_params={}):
""" Matplotlib multi-line text plotting
Takes a matplotlib axes, set of strings and positions and plots.
Args:
ax: Matplotlib axes.
x: Array of x values
y: constant y value
s: Array of strings.
txt_params: Dict of text params.
Returns:
NONE: Text is plotted onto provided axes.
Raises:
NONE
"""
for i in range(len(s)):
ax.text(x[i], y, s[i], **txt_params)
def stackedBarPlot(x_var, y_vars, df, featMetaData):
""" Plots stacked bar charts as per declared variables.
Takes a matplotlib axes, set of strings and positions and plots a stacked bar
chart with the X variables being subdivided by the Y variables.
Args:
x_var: Names of variables on X_axis
y_vars: Names of variables with which to subdivide X variables.
df: Pandas dataframe to be used.
featMetaData: Variable meta data provided in JSON file.
Returns:
NONE: Figure is saved in function.
Raises:
NONE
"""
if not isinstance(y_vars, list):
y_vars = [y_vars]
fig_params={'num': 1,
'figsize': (6*len(y_vars), 6),
'dpi': 200,
'frameon': False}
txt_params={'fontsize': 6,
'ha': 'center',
'va': 'center'}
label_params={'fontsize': 10,
'ha': 'center',
'va': 'top'}
fig = plt.figure(**fig_params)
sp = 1
for y_var in y_vars:
data = df.dropna(subset=[y_var])
ax_params={'title': featMetaData[y_var]['label'],
'ylabel': 'Normalised Frequency',
'xlabel': y_var}
ax = fig.add_subplot(1, len(y_vars), sp, **ax_params)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
x_cats = np.unique(data[x_var])
x_cats = x_cats[~np.isnan(x_cats)]
x_var_meta = featMetaData[x_var]
y_cats = np.unique(data[y_var])
y_cats = y_cats[~np.isnan(y_cats)]
y_var_meta = featMetaData[y_var]
xMinorTicks = []
xMinorLabels = []
x = 0
bw = 0.8
y_bottom=0
for xc in x_cats:
for yc in y_cats:
y = np.nanmean(data[y_var][data[x_var] == xc] == yc)
t = str(int(round(y*100, 0)))+'%'
ax.bar(x=x, height=y, width=bw,
color=ast.literal_eval(y_var_meta['colors'][y_var_meta['values'].index(yc)]),
bottom=y_bottom)
ax.text(x, y_bottom+(y/2), t, **txt_params)
xMinorLabels.append(x_var_meta['truncvaluelabels'][x_var_meta['values'].index(xc)])
xMinorTicks.append(x)
y_bottom = y+y_bottom
y_bottom=0
x += 1
ax.set_xticks(xMinorTicks)
ax.set_xticklabels(xMinorLabels, **label_params)
sp+=1
fig.savefig('output/1_SNSSPrimaryOutcomeStackedBars.pdf', dpi=300,
format='pdf', pad_inches=0.1, bbox_inches='tight')
plt.close()
def subCatBarPlot(x_vars, x_sub_var, df, featMetaData):
""" Plots stacked bar charts as per declared variables.
Takes a matplotlib axes, set of strings and positions and plots a bar
chart with the X variables being subdivided by the X_sub variables and the
subdivisions being plotted side by side.
Args:
x_vars: Names of variables on X_axis
x_sub_var: Names of variables with which to subdivide X variables.
df: Pandas dataframe to be used.
featMetaData: Variable meta data provided in JSON file.
Returns:
NONE: Figure is saved in function.
Raises:
NONE
"""
if not isinstance(x_vars, list):
x_vars = [x_vars]
print('is not list')
fig_params={'num': 1,
'figsize': (6*len(x_vars), 6),
'dpi': 200,
'frameon': False}
txt_params={'fontsize': 6,
'ha': 'center',
'va': 'bottom'}
label_params={'fontsize': 10,
'ha': 'center',
'va': 'top'}
fig = plt.figure(**fig_params)
sp = 1
for x_var in x_vars:
data = df.dropna(subset=[x_var])
ax_params={'title': featMetaData[x_var]['label'],
'ylabel': 'Normalised Frequency',
'xlabel': ''}
ax = fig.add_subplot(1, len(x_vars), sp, **ax_params)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
x_cats = np.unique(data[x_var])
x_cats = x_cats[~np.isnan(x_cats)]
x_var_meta = featMetaData[x_var]
x_sub_cats = np.unique(data[x_sub_var])
x_sub_cats = x_sub_cats[~np.isnan(x_sub_cats)]
x_sub_var_meta = featMetaData[x_sub_var]
xMinorTicks = []
xMajorTicks = []
xMinorLabels = []
xMajorLabels = []
x = 0
bw = 1
for xc in x_cats:
for xsc in x_sub_cats:
y = np.nanmean(data[x_var][data[x_sub_var] == xsc] == xc)
t = str(int(round(y*100, 0)))+'%'
ax.bar(x=x, height=y, width=bw,
color=x_sub_var_meta['colors'][x_sub_var_meta['values'].index(xsc)])
ax.text(x, y, t, **txt_params)
xMinorLabels.append(x_sub_var_meta['truncvaluelabels'][x_sub_var_meta['values'].index(xsc)])
xMinorTicks.append(x)
x += 1
xMajorLabels.append(x_var_meta['truncvaluelabels'][x_var_meta['values'].index(xc)])
xMajorTicks.append(x-1-((len(x_sub_cats)-1)/2))
x += 1
ax.set_xticks(xMinorTicks)
ax.set_xticklabels(xMinorLabels, **label_params)
multi_text(ax, xMajorTicks, ax.get_ylim()[1]*-0.1, xMajorLabels, label_params)
sp+=1
fig.savefig('output/1_SNSSPrimaryOutcomeBars.pdf', dpi=300,
format='pdf', pad_inches=0.1, bbox_inches='tight')
plt.close()
def primaryOutcomePlot(outcome, group_var, data, featMetaData, style='subCat'):
""" Plots bar charts of declared outcome and grouping var.
Takes declared variables and plots bar chart accordingly.
Args:
outcome: name of outcome variable
group_var: Names grouping variable i.e. X variables to be used
data: Pandas dataframe to be used.
featMetaData: Variable meta data provided in JSON file.
style: Defaults to side-by-side vs stacked plotting.
Returns:
NONE: Figure is saved in respective function.
Raises:
NONE
"""
if style == 'subCat':
subCatBarPlot(outcome, group_var, data, featMetaData)
elif style == 'stacked':
stackedBarPlot(group_var, outcome, data, featMetaData)
def SNSSSecondaryOutcomeMeasures(df):
""" Plots line chart and produced table of secondary SNSS outcomes
Takes pandas dataframe and assesses between group differences over time
of secindary outcome measures including depressino scales and physical/mental
functioning.
Args:
df: Pandas dataframe
Returns:
outcomeT: Table of outcome measures grouped by functional diagnosis.
Raises:
NONE
"""
groupVar = 'ExpGroups'
SNSSVars = loadJSON('raw_data/SNSS_vars.json')
rowDict = dict(zip(SNSSVars[groupVar]['values'],
SNSSVars[groupVar]['valuelabels']))
outcomes = ['T0_SF12_NormedMCS', 'T1_SF12_NormedMCS', 'T2_SF12_NormedMCS',
'T0_SF12_NormedPCS', 'T1_SF12_NormedPCS', 'T2_SF12_NormedPCS',
'T0_PHQNeuro28_Total', 'T1_PHQNeuro28_Total', 'T2_PHQNeuro28_Total',
'T0_HADS', 'T1_HADS', 'T2_HADS',
'T0T1_SF12_NormedMCS', 'T1T2_SF12_NormedMCS',
'T0T1_SF12_NormedPCS', 'T1T2_SF12_NormedPCS',
'T0T1_PHQNeuro28_Total', 'T1T2_PHQNeuro28_Total',
'T0T1_HADS', 'T1T2_HADS']
outcomeT = df.groupby(groupVar)[outcomes].agg([('N', 'count'),
('Mean', 'mean'),
('SD', 'std'),
('CI', lambda x:
tuple(np.round(
DescrStatsW(x.dropna()).
tconfint_mean(), 2)))])
# Significance testing
for O in outcomes:
NE = (df.ExpGroups == 1) & (df[O].notna())
E = (df.ExpGroups == 2) & (df[O].notna())
cm = CompareMeans.from_data(df[O].loc[NE], df[O].loc[E])
outcomeT[O, 'tTestp'] = [cm.ttest_ind()[1]]*2
outcomeT[O, 'cohend'], _ = cohen_d(cm.d1.data, cm.d2.data)
outcomeT = outcomeT.sort_index(axis=1)
outcomeT.rename(index=rowDict).transpose().\
to_csv('output/2_SecondaryOutcomeMeasures.tsv', sep='\t')
return outcomeT
def plot_ci(ax, x, y, color, style='t'):
if style == 't':
for i in range(len(y)):
ax.plot([x[i], x[i]], [y[i][0], y[i][1]],
color=color, alpha=0.4,
marker='_', linewidth=2)
def lineTimeSeriesPlot(y_vars, groupVar, df, featMetaData):
fig_params={'num': 1,
'figsize': (6*4, 6),
'dpi': 200,
'frameon': False}
txt_params={'fontsize': 6,
'ha': 'center',
'va': 'center'}
label_params={'fontsize': 10,
'ha': 'center',
'va': 'top'}
fig = plt.figure(**fig_params)
grps = np.unique(df[groupVar])
grps = grps[~np.isnan(grps)]
groupVar_meta = featMetaData[groupVar]
sp = 1
time = [0, 3, 12]
for y_var_group in y_vars:
for y_var in y_var_group:
ax_params={'title': y_var[0],
'ylabel': 'Secondary Measure',
'xlabel': 'Time',
'xticks': [0, 3, 12],
'xticklabels': ['Baseline', '3 Months', '12 Months']}
ax = fig.add_subplot(1, 4, sp, **ax_params)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
grp_jitter = [0.1, 0.1, 0.1]
for grp in grps:
mean_array = []
ci_array = []
for T_var in y_var:
data = df.dropna(subset=[T_var])
mean_array.append(np.nanmean(data[T_var][data[groupVar] == grp]))
ci_array.append(DescrStatsW(data[T_var][data[groupVar] == grp]).tconfint_mean())
ax.plot(time, mean_array, color=groupVar_meta['colors'][groupVar_meta['values'].index(grp)],
alpha=0.9, linewidth=4)
plot_ci(ax, time, ci_array, groupVar_meta['colors'][groupVar_meta['values'].index(grp)],
't')
# ax.set_ylim([0, ax.get_ylim()[1]])
sp += 1
fig.subplots_adjust(wspace=0.3)
fig.savefig('output/2_SNSSSecondaryOutcomePlot.pdf', dpi=300,
format='pdf', pad_inches=0.1, bbox_inches='tight')
plt.close()
# color=groupVar_meta['colors'][groupVar_meta['values'].index(grp)]
def secondaryOutcomePlot(outcome, groupVar, df, featMetaData, style='line'):
if style == 'line':
lineTimeSeriesPlot(outcome, groupVar, df, featMetaData)
def SNSSSocioeconomicAssessment(df):
""" Multiple plots comparing SIMD quintile to functional diagnosis and outcome
Takes pandas dataframe and plots SIMD quintiles as per each functional Diagnosis
and primary and secondary outcomes.
Args:
df: Pandas dataframe
Returns:
NONE: All plots saved within function.
Raises:
NONE
"""
# Figure & Table 1: Are functional vs structural patients from different SIMD quintiles?
SIMDGroupT = []
SIMDGroupT.append(pd.crosstab(index=[df.ExpGroups], columns=df.T0_SIMD04,
margins=False, normalize='index',
dropna=True))
SIMDGroupT.append(pd.crosstab(index=[df.ExpGroups], columns=df.T0_SIMD04,
margins=False, normalize=False,
dropna=True))
SIMDGroupTExport = pd.concat(SIMDGroupT, keys=['N', '%'])
SIMDGroupTExport.to_csv('output/3_DeprivationGroups.tsv', sep='\t')
SIMDOutcomeT = []
SIMDOutcomeT.append(pd.crosstab(index=[df.ExpGroups, df.T2_poorCGI], columns=df.T0_SIMD04,
margins=False, normalize=False,
dropna=True))
SIMDOutcomeT.append(pd.crosstab(index=[df.ExpGroups, df.T2_poorCGI], columns=df.T0_SIMD04,
margins=False, normalize='index',
dropna=True))
SIMDOutcomeT.append(pd.crosstab(index=[df.ExpGroups, df.T2_poorIPS], columns=df.T0_SIMD04,
margins=False, normalize=False,
dropna=True))
SIMDOutcomeT.append(pd.crosstab(index=[df.ExpGroups, df.T2_poorIPS], columns=df.T0_SIMD04,
margins=False, normalize='index',
dropna=True))
SIMDOutcomeTExport = pd.concat(SIMDOutcomeT, keys=['CGI_N', 'CGI_%', 'IPS_N', 'IPS_%'])
SIMDOutcomeTExport.to_csv('output/3_DeprivationOutcomeAndGroup.tsv', sep='\t')
fig1 = plt.figure(num=1, figsize=(5, 5), dpi=200, frameon=False)
ax = fig1.add_subplot(111)
sb.distplot(df.T0_SIMD04[(df.T0_SIMD04.notna()) & (df.ExpGroups == 1)],
ax=ax, kde=False, norm_hist=True, bins=5,
kde_kws={'bw': 0.55}, hist_kws={'rwidth': 0.8},
color='xkcd:blood red')
sb.distplot(df.T0_SIMD04[(df.T0_SIMD04.notna()) & (df.ExpGroups == 2)],
ax=ax, kde=False, norm_hist=True, bins=5,
kde_kws={'bw': 0.55}, hist_kws={'rwidth': 0.8},
color='xkcd:ocean blue')
1.4+0.8*4
ax.set_xlabel('SIMD04 Quintile')
ax.set_xticks(np.linspace(start=1.4, stop=4.6, num=5))
ax.set_xticklabels(['1 (Least Deprived)',
'2', '3', '4',
'5 (Most Deprived)'],
rotation=45, ha='right', fontsize=8)
ax.set_ylabel('Proportion')
ax.set_xlim([1, 5])
ax.legend(labels=['Not Explained', 'Explained'],
bbox_to_anchor=(1.25, 1), loc=1)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
fig1.savefig('output/3_SNSSSocioeconomicGroups.pdf', dpi=300,
format='pdf', pad_inches=0.1, bbox_inches='tight')
plt.close()
# Figure 2: Does SIMD correlate with socioeconomic questions in SNSS and are outcomes different?
contOutcomes = ['T0_PHQNeuro28_Total',
'T0_HADS',
'T0_SF12_NormedMCS',
'T0_SF12_NormedPCS',
'T0T2_PHQNeuro28_Total',
'T0T2_HADS',
'T0T2_SF12_NormedMCS',
'T0T2_SF12_NormedPCS']
catOutcomes = ['T0_Inemployment',
'T0_IncapacityBenefitorDLA',
'T2_poorIPS',
'T2_poorCGI']
ylabels = ['Symptom Count (Baseline)',
'HADS Score (Baseline)',
'SF12 MCS (Baseline)',
'SF12 PCS (Baseline)',
'Symptom Count (12 Month Change)',
'HADS Score (12 Month Change)',
'SF12 MCS (12 Month Change)',
'SF12 PCS (12 Month Change)',
'% in Employment (Baseline)',
'% in Receipt of DLA (Baseline)',
'% Reporting Poor IPS (12 Months)',
'% Reporting Poor CGI (12 Months)']
fig2 = plt.figure(num=1, figsize=(16, 12), dpi=200, frameon=False)
i = 0
ax = []
for o in contOutcomes:
ax.append(fig2.add_subplot(3, 4, i+1))
sb.boxplot(x='ExpGroups', y=o, hue='T0_SIMD04',
data=df, ax=ax[i],
palette=sb.cubehelix_palette(5, start=0, reverse=False),
flierprops={'marker': '+'})
ax[i].set_xticklabels(labels=['Unexplained', 'Explained'])
ax[i].set_ylabel(ylabels[i])
if i == 3:
handles, _ = ax[i].get_legend_handles_labels()
ax[i].legend(handles=handles, labels=['1 (Least Deprived)',
'2', '3', '4',
'5 (Most Deprived)'],
bbox_to_anchor=(1.55, 1), loc=1)
else:
ax[i].legend_.remove()
i = i+1
for o in catOutcomes:
ax.append(fig2.add_subplot(3, 4, i+1))
sb.barplot(x='ExpGroups', y=o, hue='T0_SIMD04', data=df,
palette=sb.cubehelix_palette(5, start=0, reverse=False),
ax=ax[i])
ax[i].set_ylabel(ylabels[i])
ax[i].set_xticklabels(labels=['Unexplained', 'Explained'])
ax[i].set_ylim([0, 1])
ax[i].legend_.remove()
i = i+1
fig2.subplots_adjust(wspace=0.3, hspace=0.3)
fig2.savefig('output/3_SNSSSocioeconomicAssessment.pdf', dpi=300,
format='pdf', pad_inches=0.1, bbox_inches='tight')
plt.close()
# Figure 3: Do individual domains differ in outcome (!!! Not population weighted)
for Y in ['T0_Inemployment', 'T0_IncapacityBenefitorDLA', 'T2_poorCGI']:
fig3 = plt.figure(num=1, figsize=(9, 6), dpi=200, frameon=False)
i = 0
ax = []
domains = ['inc', 'emp', 'hlth', 'educ', 'access', 'house']
for d in domains:
ax.append(fig3.add_subplot(2, 3, i+1))
sb.barplot(x='ExpGroups', y=Y, hue='T0_SIMD04_' + d + '_quintile', data=df,
palette=sb.cubehelix_palette(5, start=0, reverse=False),
ax=ax[i])
# ax[i].set_ylabel(ylabels[i])
ax[i].set_xticklabels(labels=['Unexplained', 'Explained'])
ax[i].set_ylim([0, 1])
ax[i].legend_.remove()
ax[i].set_title(d)
i = i+1
fig3.subplots_adjust(wspace=0.3, hspace=0.3)
plt.close()
# sb.violinplot(x='ExpGroups', y='T0_SIMD04_access_score', hue='T2_SymptomsChange',
# palette=sb.cubehelix_palette(5, start=2, reverse=True), data=df)
fig3.savefig('output/3_SNSSSocioeconomicDomainsAssessment_' + Y + '.pdf', dpi=300,
format='pdf', pad_inches=0.1, bbox_inches='tight')
return
def performanceMetrics(trainDat, evalDat):
""" General function for assessing training vs eval performance
Takes two arrays of Nx2 size. Each array is made up of a TRUE label [0] and
a PREDICTED Score [1], the arrays are training and eval. The function computes
binary or multivariate performance metrics and outputs a dictionary.
Args:
trainDat: An Nx2 array of true labels and predicted scores for the training set.
evalDat: An Nx2 array of true labels and predicted scores for the eval set.
Returns:
perfDict: A dictionary which includes the original scores and labels as well as
all computed metrics.
Raises:
NONE
"""
perfDict = {}
nClasses = len(np.unique(trainDat[0]))
dLabels = ['train', 'eval']
i = 0
for d in [trainDat, evalDat]:
true = d[0]
score = d[1]
if nClasses == 2: # If binary classification problem...
perfDict['problemType'] = 'binaryProblem'
# Calculate 'optimal' ROC operating threshold to assign binary pred.
fpr, tpr, t = roc_curve(true, score)
optimalIdx = np.argmax(tpr - fpr)
optimalThreshold = t[optimalIdx]
pred = np.reshape((score >= optimalThreshold).astype(int), [len(score), ])
# Compute Accuracy Scores
Acc = accuracy_score(true, pred, normalize=True)
Auroc = roc_auc_score(true, score)
R2 = r2_score(true, pred)
f1 = f1_score(true, pred)
precision = precision_score(true, pred, average='binary')
recall = recall_score(true, pred, average='binary')
CM = confusion_matrix(true, pred)
TN = CM[0][0]
TP = CM[1][1]
FN = CM[1][0]
FP = CM[0][1]
Sens = TP/(TP+FN)
Spec = TN/(TN+FP)
perfDict[dLabels[i] + 'True'] = true
perfDict[dLabels[i] + 'Pred'] = pred
perfDict[dLabels[i] + 'Score'] = score
perfDict[dLabels[i] + 'Acc'] = Acc
perfDict[dLabels[i] + 'Auroc'] = Auroc
perfDict[dLabels[i] + 'R2'] = R2
perfDict[dLabels[i] + 'F1'] = f1
perfDict[dLabels[i] + 'Precision'] = precision
perfDict[dLabels[i] + 'Recall'] = recall
perfDict[dLabels[i] + 'CM'] = CM
perfDict[dLabels[i] + 'Sens'] = Sens
perfDict[dLabels[i] + 'Spec'] = Spec
perfDict[dLabels[i] + 'OperatingThreshold'] = optimalThreshold
i += 1
else: # If multiclass classification problem...
perfDict['problemType'] = 'multiClassProblem'
pred = np.argmax(score, axis=1)
Acc = accuracy_score(true, pred, normalize=True)
CM = confusion_matrix(true, pred)
microPrecision = precision_score(true, pred, average='micro')
microRecall = recall_score(true, pred, average='micro')
macroPrecision = precision_score(true, pred, average='macro')
macroRecall = recall_score(true, pred, average='macro')
# microAuroc = roc_auc_score(true, score, average='micro')
# macroAuroc = roc_auc_score(true, score, average='macro')
perfDict[dLabels[i] + 'True'] = true
perfDict[dLabels[i] + 'Pred'] = pred
perfDict[dLabels[i] + 'Score'] = score
perfDict[dLabels[i] + 'Acc'] = Acc
perfDict[dLabels[i] + 'CM'] = CM
perfDict[dLabels[i] + 'Precision'] = microPrecision
perfDict[dLabels[i] + 'Recall'] = microRecall
perfDict[dLabels[i] + 'MicroPrecision'] = microPrecision
perfDict[dLabels[i] + 'MicroRecall'] = microRecall
perfDict[dLabels[i] + 'MacroPrecision'] = macroPrecision
perfDict[dLabels[i] + 'MacroRecall'] = macroRecall
# perfDict[dLabels[i] + 'Auroc'] = microAuroc
# perfDict[dLabels[i] + 'MicroAuroc'] = microAuroc
# perfDict[dLabels[i] + 'MacroAuroc'] = macroAuroc
i += 1
return perfDict
def UVLogisticRegression_v2(df, featureSet, outcomeVar, featMetaData, featDataTypeDict,
dummyExceptionDict, trainIdx=[], evalIdx=[]):
""" Conducts univariable logistic regression for every variable in the feature set.
Takes a dataframe, feature set and outcome variable and conducts univariable logistic modelling.
Args:
df: The pandas dataframe object
featureSet: The names of columns to be assess as inputs into the model. (Exog)
outcomeVar: The outcome variable. (Endog)
featMetaData: Feature meta data for constructing legible tables etc.
featDataTypeDict: Feature data type dictionary e.g. discrete vs continuous. NOTE variables will be treated as per this in the models
discrete variables will be dummy encoded automatically.
dummyExceptionDict: A dictionary of variables and the value to use as the dummy variable if not the first.
trainIdx: Dataframe indices for observations to be used for training. If empty all will be used.
evalIdx: Dataframe indices for observations to be used for evaluation. If empty all will be used.
Returns:
UVMdlExportT: Returns the model results in a table of OR CIs and p-values.
mdlArray: Returns an array of statsmodels regression objects.
modelSummaryInfoDict: A dict of dictionaries containing summary statistics about each model.
Raises:
NONE
"""
mdlExportTArray = []
mdlArray = []
modelSummaryInfoDict = {}
for P in featureSet: # For each feature construct k-1 dummy array and construct model.
# Exclude missing data from featureSet subset
rDat = df.dropna(subset=[P] + [outcomeVar])
# Initialise feat & outcome arrays
outcome = np.asarray(rDat[outcomeVar]).astype(int) # Initialise outcome array, MUST BE BINARY
feats = np.ones([len(rDat), 1]).astype(int) # Initialise dummy feat array with constant
featNames = ['constant'] # Initialise dummy featNames array with constant
featNameIndex = ['constant']
sigTestIdx = {}
modelSummaryInfo = {}
if featDataTypeDict[P] in ['nominal', 'binary']:
if rDat[P].dtype.name != 'category': # If not a categorical then convert...
rDat[P] = pd.Categorical(rDat[P])
# Drop single category as constant.
# Decision based on dummy exception dict, defaults to first category.
try:
X = pd.get_dummies(rDat[P], drop_first=False).drop(axis=1,
columns=dummyExceptionDict[P])
except (KeyError) as err:
X =
|
pd.get_dummies(rDat[P], drop_first=True)
|
pandas.get_dummies
|
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day, prev_market_trade_day
from qteasy.utilfuncs import next_market_trade_day
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator
from qteasy.history import stack_dataframes
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.database import DataSource
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000, 20000, 10000])
self.op = np.array([0, 1, -0.33333333])
self.prices = np.array([10, 20, 10])
self.r = qt.Cost()
def test_rate_creation(self):
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
def test_rate_operations(self):
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r(self.amounts), [0.003, 0.003, 0.003]), True, 'fee calculation wrong')
def test_rate_fee(self):
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""Test transaction cost calculated by rate with min_fee"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
Timestamp('2031-12-29'),
Timestamp('2032-12-29'),
Timestamp('2033-12-29'),
Timestamp('2034-12-29'),
Timestamp('2035-12-29'),
Timestamp('2036-12-29'),
Timestamp('2037-12-29'),
Timestamp('2038-12-29'),
Timestamp('2039-12-29'),
Timestamp('2040-12-29'),
Timestamp('2041-12-29'),
Timestamp('2042-12-29')])
self.assertEqual(cp_mul_float2.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31')])
self.assertEqual(cp_mul_float2.amounts, [20000.0,
22000.0,
24000.0,
26000.0,
28000.0,
30000.0,
32000.0,
34000.0,
36000.0,
38000.0,
40000.0,
42000.0])
class TestPool(unittest.TestCase):
def setUp(self):
self.p = ResultPool(5)
self.items = ['first', 'second', (1, 2, 3), 'this', 24]
self.perfs = [1, 2, 3, 4, 5]
self.additional_result1 = ('abc', 12)
self.additional_result2 = ([1, 2], -1)
self.additional_result3 = (12, 5)
def test_create(self):
self.assertIsInstance(self.p, ResultPool)
def test_operation(self):
self.p.in_pool(self.additional_result1[0], self.additional_result1[1])
self.p.cut()
self.assertEqual(self.p.item_count, 1)
self.assertEqual(self.p.items, ['abc'])
for item, perf in zip(self.items, self.perfs):
self.p.in_pool(item, perf)
self.assertEqual(self.p.item_count, 6)
self.assertEqual(self.p.items, ['abc', 'first', 'second', (1, 2, 3), 'this', 24])
self.p.cut()
self.assertEqual(self.p.items, ['second', (1, 2, 3), 'this', 24, 'abc'])
self.assertEqual(self.p.perfs, [2, 3, 4, 5, 12])
self.p.in_pool(self.additional_result2[0], self.additional_result2[1])
self.p.in_pool(self.additional_result3[0], self.additional_result3[1])
self.assertEqual(self.p.item_count, 7)
self.p.cut(keep_largest=False)
self.assertEqual(self.p.items, [[1, 2], 'second', (1, 2, 3), 'this', 24])
self.assertEqual(self.p.perfs, [-1, 2, 3, 4, 5])
class TestCoreSubFuncs(unittest.TestCase):
"""Test all functions in core.py"""
def setUp(self):
pass
def test_input_to_list(self):
print('Testing input_to_list() function')
input_str = 'first'
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 3), ['first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 4), ['first', 'first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 2, None), ['first', 'first'])
input_list = ['first', 'second']
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 3), ['first', 'second', None])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 4, 'padder'), ['first', 'second', 'padder', 'padder'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 1), ['first', 'second'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, -5), ['first', 'second'])
def test_point_in_space(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
p2 = (-1, 3, 10)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
self.assertFalse(p2 in sp)
print(f'point {p2} is not in space {sp}')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)], 'conti, conti, enum')
p1 = (5.5, 3.2, 8)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
def test_space_in_space(self):
print('test if a space is in another space')
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
sp2 = Space([(0., 10.), (0., 10.), (0., 10.)])
self.assertTrue(sp2 in sp)
self.assertTrue(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is in space {sp2}\n'
f'they are equal to each other\n')
sp2 = Space([(0, 5.), (2, 7.), (3., 9.)])
self.assertTrue(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'{sp2} is a sub space of {sp}\n')
sp2 = Space([(0, 5), (2, 7), (3., 9)])
self.assertFalse(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)])
self.assertFalse(sp in sp2)
self.assertFalse(sp2 in sp)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
def test_space_around_centre(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
ssp = space_around_centre(space=sp, centre=p1, radius=1.2)
print(ssp.boes)
print('\ntest multiple diameters:')
self.assertEqual(ssp.boes, [(4.3, 6.7), (2.0, 4.4), (5.8, 8.2)])
ssp = space_around_centre(space=sp, centre=p1, radius=[1, 2, 1])
print(ssp.boes)
self.assertEqual(ssp.boes, [(4.5, 6.5), (1.2000000000000002, 5.2), (6.0, 8.0)])
print('\ntest points on edge:')
p2 = (5.5, 3.2, 10)
ssp = space_around_centre(space=sp, centre=p1, radius=3.9)
print(ssp.boes)
self.assertEqual(ssp.boes, [(1.6, 9.4), (0.0, 7.1), (3.1, 10.0)])
print('\ntest enum spaces')
sp = Space([(0, 100), range(40, 3, -2)], 'discr, enum')
p1 = [34, 12]
ssp = space_around_centre(space=sp, centre=p1, radius=5, ignore_enums=False)
self.assertEqual(ssp.boes, [(29, 39), (22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(ssp.boes)
print('\ntest enum space and ignore enum axis')
ssp = space_around_centre(space=sp, centre=p1, radius=5)
self.assertEqual(ssp.boes, [(29, 39),
(40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(sp.boes)
def test_time_string_format(self):
print('Testing qt.time_string_format() function:')
t = 3.14
self.assertEqual(time_str_format(t), '3s 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '3s ')
self.assertEqual(time_str_format(t, short_form=True), '3"140')
self.assertEqual(time_str_format(t, estimation=True, short_form=True), '3"')
t = 300.14
self.assertEqual(time_str_format(t), '5min 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '5min ')
self.assertEqual(time_str_format(t, short_form=True), "5'140")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "5'")
t = 7435.0014
self.assertEqual(time_str_format(t), '2hrs 3min 55s 1.4ms')
self.assertEqual(time_str_format(t, estimation=True), '2hrs ')
self.assertEqual(time_str_format(t, short_form=True), "2H3'55\"001")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "2H")
t = 88425.0509
self.assertEqual(time_str_format(t), '1days 33min 45s 50.9ms')
self.assertEqual(time_str_format(t, estimation=True), '1days ')
self.assertEqual(time_str_format(t, short_form=True), "1D33'45\"051")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "1D")
def test_get_stock_pool(self):
print(f'start test building stock pool function\n')
share_basics = stock_basic(fields='ts_code,symbol,name,area,industry,market,list_date,exchange')
print(f'\nselect all stocks by area')
stock_pool = qt.get_stock_pool(area='上海')
print(f'{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "上海"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].eq('上海').all())
print(f'\nselect all stocks by multiple areas')
stock_pool = qt.get_stock_pool(area='贵州,北京,天津')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are in list of ["贵州", "北京", "天津"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['贵州',
'北京',
'天津']).all())
print(f'\nselect all stocks by area and industry')
stock_pool = qt.get_stock_pool(area='四川', industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "四川", and industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['四川']).all())
print(f'\nselect all stocks by industry')
stock_pool = qt.get_stock_pool(industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stocks industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
print(f'\nselect all stocks by market')
stock_pool = qt.get_stock_pool(market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
print(f'\nselect all stocks by market and list date')
stock_pool = qt.get_stock_pool(date='2000-01-01', market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板", and list date after "2000-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('2000-01-01').all())
print(f'\nselect all stocks by list date')
stock_pool = qt.get_stock_pool(date='1997-01-01')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all list date after "1997-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1997-01-01').all())
print(f'\nselect all stocks by exchange')
stock_pool = qt.get_stock_pool(exchange='SSE')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['exchange'].eq('SSE').all())
print(f'\nselect all stocks by industry, area and list date')
industry_list = ['银行', '全国地产', '互联网', '环境保护', '区域地产',
'酒店餐饮', '运输设备', '综合类', '建筑工程', '玻璃',
'家用电器', '文教休闲', '其他商业', '元器件', 'IT设备',
'其他建材', '汽车服务', '火力发电', '医药商业', '汽车配件',
'广告包装', '轻工机械', '新型电力', '多元金融', '饲料']
area_list = ['深圳', '北京', '吉林', '江苏', '辽宁', '广东',
'安徽', '四川', '浙江', '湖南', '河北', '新疆',
'山东', '河南', '山西', '江西', '青海', '湖北',
'内蒙', '海南', '重庆', '陕西', '福建', '广西',
'上海']
stock_pool = qt.get_stock_pool(date='19980101',
industry=industry_list,
area=area_list)
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1998-01-01').all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(industry_list).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(area_list).all())
self.assertRaises(KeyError, qt.get_stock_pool, industry=25)
self.assertRaises(KeyError, qt.get_stock_pool, share_name='000300.SH')
self.assertRaises(KeyError, qt.get_stock_pool, markets='SSE')
class TestEvaluations(unittest.TestCase):
"""Test all evaluation functions in core.py"""
# 以下手动计算结果在Excel文件中
def setUp(self):
"""用np.random生成测试用数据,使用cumsum()模拟股票走势"""
self.test_data1 = pd.DataFrame([5.34892759, 5.65768696, 5.79227076, 5.56266871, 5.88189632,
6.24795001, 5.92755558, 6.38748165, 6.31331899, 5.86001665,
5.61048472, 5.30696736, 5.40406792, 5.03180571, 5.37886353,
5.78608307, 6.26540339, 6.59348026, 6.90943801, 6.70911677,
6.33015954, 6.06697417, 5.9752499, 6.45786408, 6.95273763,
6.7691991, 6.70355481, 6.28048969, 6.61344541, 6.24620003,
6.47409983, 6.4522311, 6.8773094, 6.99727832, 6.59262674,
6.59014938, 6.63758237, 6.38331869, 6.09902105, 6.35390109,
6.51993567, 6.87244592, 6.83963485, 7.08797815, 6.88003144,
6.83657323, 6.97819483, 7.01600276, 7.12554256, 7.58941523,
7.61014457, 7.21224091, 7.48174399, 7.66490854, 7.51371968,
7.11586198, 6.97147399, 6.67453301, 6.2042138, 6.33967015,
6.22187938, 5.98426993, 6.37096079, 6.55897161, 6.26422645,
6.69363762, 7.12668015, 6.83232926, 7.30524081, 7.4262041,
7.54031383, 7.17545919, 7.20659257, 7.44886016, 7.37094393,
6.88011022, 7.08142491, 6.74992833, 6.5967097, 6.21336693,
6.35565105, 6.82347596, 6.44773408, 6.84538053, 6.47966466,
6.09699528, 5.63927014, 6.01081024, 6.20585303, 6.60528206,
7.01594726, 7.03684251, 6.76574977, 7.08740846, 6.65336462,
7.07126686, 6.80058956, 6.79241977, 6.47843472, 6.39245474],
columns=['value'])
self.test_data2 = pd.DataFrame([5.09276527, 4.83828592, 4.6000911, 4.63170487, 4.63566451,
4.50546921, 4.96390044, 4.64557907, 4.25787855, 3.76585551,
3.38826334, 3.76243422, 4.06365426, 3.87084726, 3.91400935,
4.13438822, 4.27064542, 4.56776104, 5.03800296, 5.31070529,
5.39902276, 5.21186286, 5.05683114, 4.68842046, 5.11895168,
5.27151571, 5.72294993, 6.09961056, 6.26569635, 6.48806151,
6.16058885, 6.2582459, 6.38934791, 6.57831057, 6.19508831,
5.70155153, 5.20435735, 5.36538825, 5.40450056, 5.2227697,
5.37828693, 5.53058991, 6.02996797, 5.76802181, 5.66166713,
6.07988994, 5.61794367, 5.63218151, 6.10728013, 6.0324168,
6.27164431, 6.27551239, 6.52329665, 7.00470007, 7.34163113,
7.33699083, 7.67661334, 8.09395749, 7.68086668, 7.58341161,
7.46219819, 7.58671899, 7.19348298, 7.40088323, 7.47562005,
7.93342043, 8.2286081, 8.3521632, 8.43590025, 8.34977395,
8.57563095, 8.81586328, 9.08738649, 9.01542031, 8.8653815,
9.21763111, 9.04233017, 8.59533999, 8.47590075, 8.70857222,
8.78890756, 8.92697606, 9.35743773, 9.68280866, 10.15622021,
10.55908549, 10.6337894, 10.55197128, 10.65435176, 10.54611045,
10.19432562, 10.48320884, 10.36176768, 10.03186854, 10.23656092,
10.0062843, 10.13669686, 10.30758958, 9.87904176, 10.05126375],
columns=['value'])
self.test_data3 = pd.DataFrame([5.02851874, 5.20700348, 5.02410709, 5.49836387, 5.06834371,
5.10956737, 5.15314979, 5.02256472, 5.09746382, 5.23909247,
4.93410336, 4.96316186, 5.40026682, 5.7353255, 5.53438319,
5.79092139, 5.67528173, 5.89840855, 5.75379463, 6.10855386,
5.77322365, 5.84538021, 5.6103973, 5.7518655, 5.49729695,
5.13610628, 5.30524121, 5.68093462, 5.73251319, 6.04420783,
6.26929843, 6.59610234, 6.09872345, 6.25475121, 6.72927396,
6.91395783, 7.00693283, 7.36217783, 7.71516676, 7.67580263,
7.62477511, 7.73600568, 7.53457914, 7.46170277, 7.83658014,
8.11481319, 8.03705544, 7.64948845, 7.52043731, 7.67247943,
7.46511982, 7.43541798, 7.58856517, 7.9392717, 8.25406287,
7.77031632, 8.03223447, 7.86799055, 7.57630999, 7.33230519,
7.22378732, 6.85972264, 7.17548456, 7.5387846, 7.2392632,
6.8455644, 6.59557185, 6.6496796, 6.73685623, 7.18598015,
7.13619128, 6.88060157, 7.1399681, 7.30308077, 6.94942434,
7.0247815, 7.37567798, 7.50080197, 7.59719284, 7.14520561,
7.29913484, 7.79551341, 8.15497781, 8.40456095, 8.86516528,
8.53042688, 8.94268762, 8.52048006, 8.80036284, 8.91602364,
9.19953385, 8.70828953, 8.24613093, 8.18770453, 7.79548389,
7.68627967, 7.23205036, 6.98302636, 7.06515819, 6.95068113],
columns=['value'])
self.test_data4 = pd.DataFrame([4.97926539, 5.44016005, 5.45122915, 5.74485615, 5.45600553,
5.44858945, 5.2435413, 5.47315161, 5.58464303, 5.36179749,
5.38236326, 5.29614981, 5.76523508, 5.75102892, 6.15316618,
6.03852528, 6.01442228, 5.70510182, 5.22748133, 5.46762379,
5.78926267, 5.8221362, 5.61236849, 5.30615725, 5.24200611,
5.41042642, 5.59940342, 5.28306781, 4.99451932, 5.08799266,
5.38865647, 5.58229139, 5.33492845, 5.48206276, 5.09721379,
5.39190493, 5.29965087, 5.0374415, 5.50798022, 5.43107577,
5.22759507, 4.991809, 5.43153084, 5.39966868, 5.59916352,
5.66412137, 6.00611838, 5.63564902, 5.66723484, 5.29863863,
4.91115153, 5.3749929, 5.75082334, 6.08308148, 6.58091182,
6.77848803, 7.19588758, 7.64862286, 7.99818347, 7.91824794,
8.30341071, 8.45984973, 7.98700002, 8.18924931, 8.60755649,
8.66233396, 8.91018407, 9.0782739, 9.33515448, 8.95870245,
8.98426422, 8.50340317, 8.64916085, 8.93592407, 8.63145745,
8.65322862, 8.39543204, 8.37969997, 8.23394504, 8.04062872,
7.91259763, 7.57252171, 7.72670114, 7.74486117, 8.06908188,
7.99166889, 7.92155906, 8.39956136, 8.80181323, 8.47464091,
8.06557064, 7.87145573, 8.0237959, 8.39481998, 8.68525692,
8.81185461, 8.98632237, 9.0989835, 8.89787405, 8.86508591],
columns=['value'])
self.test_data5 = pd.DataFrame([4.50258923, 4.35142568, 4.07459514, 3.87791297, 3.73715985,
3.98455684, 4.07587908, 4.00042472, 4.28276612, 4.01362051,
4.13713565, 4.49312372, 4.48633159, 4.4641207, 4.13444605,
3.79107217, 4.22941629, 4.56548511, 4.92472163, 5.27723158,
5.67409193, 6.00176917, 5.88889928, 5.55256103, 5.39308314,
5.2610492, 5.30738908, 5.22222408, 4.90332238, 4.57499908,
4.96097146, 4.81531011, 4.39115442, 4.63200662, 5.04588813,
4.67866025, 5.01705123, 4.83562258, 4.60381702, 4.66187576,
4.41292828, 4.86604507, 4.42280124, 4.07517294, 4.16317319,
4.10316596, 4.42913598, 4.06609666, 3.96725913, 4.15965746,
4.12379564, 4.04054068, 3.84342851, 3.45902867, 3.17649855,
3.09773586, 3.5502119, 3.66396995, 3.66306483, 3.29131401,
2.79558533, 2.88319542, 3.03671098, 3.44645857, 3.88167161,
3.57961874, 3.60180276, 3.96702102, 4.05429995, 4.40056979,
4.05653231, 3.59600456, 3.60792477, 4.09989922, 3.73503663,
4.01892626, 3.94597242, 3.81466605, 3.71417992, 3.93767156,
4.42806557, 4.06988106, 4.03713636, 4.34408673, 4.79810156,
5.18115011, 4.89798406, 5.3960077, 5.72504875, 5.61894017,
5.1958197, 4.85275896, 5.17550207, 4.71548987, 4.62408567,
4.55488535, 4.36532649, 4.26031979, 4.25225607, 4.58627048],
columns=['value'])
self.test_data6 = pd.DataFrame([5.08639513, 5.05761083, 4.76160923, 4.62166504, 4.62923183,
4.25070173, 4.13447513, 3.90890013, 3.76687608, 3.43342482,
3.67648224, 3.6274775, 3.9385404, 4.39771627, 4.03199346,
3.93265288, 3.50059789, 3.3851961, 3.29743973, 3.2544872,
2.93692949, 2.70893003, 2.55461976, 2.20922332, 2.29054475,
2.2144714, 2.03726827, 2.39007617, 2.29866155, 2.40607111,
2.40440444, 2.79374649, 2.66541922, 2.27018079, 2.08505127,
2.55478864, 2.22415625, 2.58517923, 2.58802256, 2.94870959,
2.69301739, 2.19991535, 2.69473146, 2.64704637, 2.62753542,
2.14240825, 2.38565154, 1.94592117, 2.32243877, 2.69337246,
2.51283854, 2.62484451, 2.15559054, 2.35410875, 2.31219177,
1.96018265, 2.34711266, 2.58083322, 2.40290041, 2.20439791,
2.31472425, 2.16228248, 2.16439749, 2.20080737, 1.73293206,
1.9264407, 2.25089861, 2.69269101, 2.59296687, 2.1420998,
1.67819153, 1.98419023, 2.14479494, 1.89055376, 1.96720648,
1.9916694, 2.37227761, 2.14446036, 2.34573903, 1.86162546,
2.1410721, 2.39204939, 2.52529064, 2.47079939, 2.9299031,
3.09452923, 2.93276708, 3.21731309, 3.06248964, 2.90413406,
2.67844632, 2.45621213, 2.41463398, 2.7373913, 3.14917045,
3.4033949, 3.82283446, 4.02285451, 3.7619638, 4.10346795],
columns=['value'])
self.test_data7 = pd.DataFrame([4.75233583, 4.47668283, 4.55894263, 4.61765848, 4.622892,
4.58941116, 4.32535872, 3.88112797, 3.47237806, 3.50898953,
3.82530406, 3.6718017, 3.78918195, 4.1800752, 4.01818557,
4.40822582, 4.65474654, 4.89287256, 4.40879274, 4.65505126,
4.36876403, 4.58418934, 4.75687172, 4.3689799, 4.16126498,
4.0203982, 3.77148242, 3.38198096, 3.07261764, 2.9014741,
2.5049543, 2.756105, 2.28779058, 2.16986991, 1.8415962,
1.83319008, 2.20898291, 2.00128981, 1.75747025, 1.26676663,
1.40316876, 1.11126484, 1.60376367, 1.22523829, 1.58816681,
1.49705679, 1.80244138, 1.55128293, 1.35339409, 1.50985759,
1.0808451, 1.05892796, 1.43414812, 1.43039101, 1.73631655,
1.43940867, 1.82864425, 1.71088265, 2.12015154, 2.45417128,
2.84777618, 2.7925612, 2.90975121, 3.25920745, 3.13801182,
3.52733677, 3.65468491, 3.69395211, 3.49862035, 3.24786017,
3.64463138, 4.00331929, 3.62509565, 3.78013949, 3.4174012,
3.76312271, 3.62054004, 3.67206716, 3.60596058, 3.38636199,
3.42580676, 3.32921095, 3.02976759, 3.28258676, 3.45760838,
3.24917528, 2.94618304, 2.86980011, 2.63191259, 2.39566759,
2.53159917, 2.96273967, 3.25626185, 2.97425402, 3.16412191,
3.58280763, 3.23257727, 3.62353556, 3.12806399, 2.92532313],
columns=['value'])
# 建立一个长度为 500 个数据点的测试数据, 用于测试数据点多于250个的情况下的评价过程
self.long_data = pd.DataFrame([ 9.879, 9.916, 10.109, 10.214, 10.361, 10.768, 10.594, 10.288,
10.082, 9.994, 10.125, 10.126, 10.384, 10.734, 10.4 , 10.87 ,
11.338, 11.061, 11.415, 11.724, 12.077, 12.196, 12.064, 12.423,
12.19 , 11.729, 11.677, 11.448, 11.485, 10.989, 11.242, 11.239,
11.113, 11.075, 11.471, 11.745, 11.754, 11.782, 12.079, 11.97 ,
12.178, 11.95 , 12.438, 12.612, 12.804, 12.952, 12.612, 12.867,
12.832, 12.832, 13.015, 13.315, 13.249, 12.904, 12.776, 12.64 ,
12.543, 12.287, 12.225, 11.844, 11.985, 11.945, 11.542, 11.871,
12.245, 12.228, 12.362, 11.899, 11.962, 12.374, 12.816, 12.649,
12.252, 12.579, 12.3 , 11.988, 12.177, 12.312, 12.744, 12.599,
12.524, 12.82 , 12.67 , 12.876, 12.986, 13.271, 13.606, 13.82 ,
14.161, 13.833, 13.831, 14.137, 13.705, 13.414, 13.037, 12.759,
12.642, 12.948, 13.297, 13.483, 13.836, 14.179, 13.709, 13.655,
13.198, 13.508, 13.953, 14.387, 14.043, 13.987, 13.561, 13.391,
12.923, 12.555, 12.503, 12.292, 11.877, 12.34 , 12.141, 11.687,
11.992, 12.458, 12.131, 11.75 , 11.739, 11.263, 11.762, 11.976,
11.578, 11.854, 12.136, 12.422, 12.311, 12.56 , 12.879, 12.861,
12.973, 13.235, 13.53 , 13.531, 13.137, 13.166, 13.31 , 13.103,
13.007, 12.643, 12.69 , 12.216, 12.385, 12.046, 12.321, 11.9 ,
11.772, 11.816, 11.871, 11.59 , 11.518, 11.94 , 11.803, 11.924,
12.183, 12.136, 12.361, 12.406, 11.932, 11.684, 11.292, 11.388,
11.874, 12.184, 12.002, 12.16 , 11.741, 11.26 , 11.123, 11.534,
11.777, 11.407, 11.275, 11.679, 11.62 , 11.218, 11.235, 11.352,
11.366, 11.061, 10.661, 10.582, 10.899, 11.352, 11.792, 11.475,
11.263, 11.538, 11.183, 10.936, 11.399, 11.171, 11.214, 10.89 ,
10.728, 11.191, 11.646, 11.62 , 11.195, 11.178, 11.18 , 10.956,
11.205, 10.87 , 11.098, 10.639, 10.487, 10.507, 10.92 , 10.558,
10.119, 9.882, 9.573, 9.515, 9.845, 9.852, 9.495, 9.726,
10.116, 10.452, 10.77 , 11.225, 10.92 , 10.824, 11.096, 11.542,
11.06 , 10.568, 10.585, 10.884, 10.401, 10.068, 9.964, 10.285,
10.239, 10.036, 10.417, 10.132, 9.839, 9.556, 9.084, 9.239,
9.304, 9.067, 8.587, 8.471, 8.007, 8.321, 8.55 , 9.008,
9.138, 9.088, 9.434, 9.156, 9.65 , 9.431, 9.654, 10.079,
10.411, 10.865, 10.51 , 10.205, 10.519, 10.367, 10.855, 10.642,
10.298, 10.622, 10.173, 9.792, 9.995, 9.904, 9.771, 9.597,
9.506, 9.212, 9.688, 10.032, 9.723, 9.839, 9.918, 10.332,
10.236, 9.989, 10.192, 10.685, 10.908, 11.275, 11.72 , 12.158,
12.045, 12.244, 12.333, 12.246, 12.552, 12.958, 13.11 , 13.53 ,
13.123, 13.138, 13.57 , 13.389, 13.511, 13.759, 13.698, 13.744,
13.467, 13.795, 13.665, 13.377, 13.423, 13.772, 13.295, 13.073,
12.718, 12.388, 12.399, 12.185, 11.941, 11.818, 11.465, 11.811,
12.163, 11.86 , 11.935, 11.809, 12.145, 12.624, 12.768, 12.321,
12.277, 11.889, 12.11 , 12.606, 12.943, 12.945, 13.112, 13.199,
13.664, 14.051, 14.189, 14.339, 14.611, 14.656, 15.112, 15.086,
15.263, 15.021, 15.346, 15.572, 15.607, 15.983, 16.151, 16.215,
16.096, 16.089, 16.32 , 16.59 , 16.657, 16.752, 16.583, 16.743,
16.373, 16.662, 16.243, 16.163, 16.491, 16.958, 16.977, 17.225,
17.637, 17.344, 17.684, 17.892, 18.036, 18.182, 17.803, 17.588,
17.101, 17.538, 17.124, 16.787, 17.167, 17.138, 16.955, 17.148,
17.135, 17.635, 17.718, 17.675, 17.622, 17.358, 17.754, 17.729,
17.576, 17.772, 18.239, 18.441, 18.729, 18.319, 18.608, 18.493,
18.069, 18.122, 18.314, 18.423, 18.709, 18.548, 18.384, 18.391,
17.988, 17.986, 17.653, 17.249, 17.298, 17.06 , 17.36 , 17.108,
17.348, 17.596, 17.46 , 17.635, 17.275, 17.291, 16.933, 17.337,
17.231, 17.146, 17.148, 16.751, 16.891, 17.038, 16.735, 16.64 ,
16.231, 15.957, 15.977, 16.077, 16.054, 15.797, 15.67 , 15.911,
16.077, 16.17 , 15.722, 15.258, 14.877, 15.138, 15. , 14.811,
14.698, 14.407, 14.583, 14.704, 15.153, 15.436, 15.634, 15.453,
15.877, 15.696, 15.563, 15.927, 16.255, 16.696, 16.266, 16.698,
16.365, 16.493, 16.973, 16.71 , 16.327, 16.605, 16.486, 16.846,
16.935, 17.21 , 17.389, 17.546, 17.773, 17.641, 17.485, 17.794,
17.354, 16.904, 16.675, 16.43 , 16.898, 16.819, 16.921, 17.201,
17.617, 17.368, 17.864, 17.484],
columns=['value'])
self.long_bench = pd.DataFrame([ 9.7 , 10.179, 10.321, 9.855, 9.936, 10.096, 10.331, 10.662,
10.59 , 11.031, 11.154, 10.945, 10.625, 10.233, 10.284, 10.252,
10.221, 10.352, 10.444, 10.773, 10.904, 11.104, 10.797, 10.55 ,
10.943, 11.352, 11.641, 11.983, 11.696, 12.138, 12.365, 12.379,
11.969, 12.454, 12.947, 13.119, 13.013, 12.763, 12.632, 13.034,
12.681, 12.561, 12.938, 12.867, 13.202, 13.132, 13.539, 13.91 ,
13.456, 13.692, 13.771, 13.904, 14.069, 13.728, 13.97 , 14.228,
13.84 , 14.041, 13.963, 13.689, 13.543, 13.858, 14.118, 13.987,
13.611, 14.028, 14.229, 14.41 , 14.74 , 15.03 , 14.915, 15.207,
15.354, 15.665, 15.877, 15.682, 15.625, 15.175, 15.105, 14.893,
14.86 , 15.097, 15.178, 15.293, 15.238, 15. , 15.283, 14.994,
14.907, 14.664, 14.888, 15.297, 15.313, 15.368, 14.956, 14.802,
14.506, 14.257, 14.619, 15.019, 15.049, 14.625, 14.894, 14.978,
15.434, 15.578, 16.038, 16.107, 16.277, 16.365, 16.204, 16.465,
16.401, 16.895, 17.057, 16.621, 16.225, 16.075, 15.863, 16.292,
16.551, 16.724, 16.817, 16.81 , 17.192, 16.86 , 16.745, 16.707,
16.552, 16.133, 16.301, 16.08 , 15.81 , 15.75 , 15.909, 16.127,
16.457, 16.204, 16.329, 16.748, 16.624, 17.011, 16.548, 16.831,
16.653, 16.791, 16.57 , 16.778, 16.928, 16.932, 17.22 , 16.876,
17.301, 17.422, 17.689, 17.316, 17.547, 17.534, 17.409, 17.669,
17.416, 17.859, 17.477, 17.307, 17.245, 17.352, 17.851, 17.412,
17.144, 17.138, 17.085, 16.926, 16.674, 16.854, 17.064, 16.95 ,
16.609, 16.957, 16.498, 16.552, 16.175, 15.858, 15.697, 15.781,
15.583, 15.36 , 15.558, 16.046, 15.968, 15.905, 16.358, 16.783,
17.048, 16.762, 17.224, 17.363, 17.246, 16.79 , 16.608, 16.423,
15.991, 15.527, 15.147, 14.759, 14.792, 15.206, 15.148, 15.046,
15.429, 14.999, 15.407, 15.124, 14.72 , 14.713, 15.022, 15.092,
14.982, 15.001, 14.734, 14.713, 14.841, 14.562, 15.005, 15.483,
15.472, 15.277, 15.503, 15.116, 15.12 , 15.442, 15.476, 15.789,
15.36 , 15.764, 16.218, 16.493, 16.642, 17.088, 16.816, 16.645,
16.336, 16.511, 16.2 , 15.994, 15.86 , 15.929, 16.316, 16.416,
16.746, 17.173, 17.531, 17.627, 17.407, 17.49 , 17.768, 17.509,
17.795, 18.147, 18.63 , 18.945, 19.021, 19.518, 19.6 , 19.744,
19.63 , 19.32 , 18.933, 19.297, 19.598, 19.446, 19.236, 19.198,
19.144, 19.159, 19.065, 19.032, 18.586, 18.272, 18.119, 18.3 ,
17.894, 17.744, 17.5 , 17.083, 17.092, 16.864, 16.453, 16.31 ,
16.681, 16.342, 16.447, 16.715, 17.068, 17.067, 16.822, 16.673,
16.675, 16.592, 16.686, 16.397, 15.902, 15.597, 15.357, 15.162,
15.348, 15.603, 15.283, 15.257, 15.082, 14.621, 14.366, 14.039,
13.957, 14.141, 13.854, 14.243, 14.414, 14.033, 13.93 , 14.104,
14.461, 14.249, 14.053, 14.165, 14.035, 14.408, 14.501, 14.019,
14.265, 14.67 , 14.797, 14.42 , 14.681, 15.16 , 14.715, 14.292,
14.411, 14.656, 15.094, 15.366, 15.055, 15.198, 14.762, 14.294,
13.854, 13.811, 13.549, 13.927, 13.897, 13.421, 13.037, 13.32 ,
13.721, 13.511, 13.999, 13.529, 13.418, 13.881, 14.326, 14.362,
13.987, 14.015, 13.599, 13.343, 13.307, 13.689, 13.851, 13.404,
13.577, 13.395, 13.619, 13.195, 12.904, 12.553, 12.294, 12.649,
12.425, 11.967, 12.062, 11.71 , 11.645, 12.058, 12.136, 11.749,
11.953, 12.401, 12.044, 11.901, 11.631, 11.396, 11.036, 11.244,
10.864, 11.207, 11.135, 11.39 , 11.723, 12.084, 11.8 , 11.471,
11.33 , 11.504, 11.295, 11.3 , 10.901, 10.494, 10.825, 11.054,
10.866, 10.713, 10.875, 10.846, 10.947, 11.422, 11.158, 10.94 ,
10.521, 10.36 , 10.411, 10.792, 10.472, 10.305, 10.525, 10.853,
10.556, 10.72 , 10.54 , 10.583, 10.299, 10.061, 10.004, 9.903,
9.796, 9.472, 9.246, 9.54 , 9.456, 9.177, 9.484, 9.557,
9.493, 9.968, 9.536, 9.39 , 8.922, 8.423, 8.518, 8.686,
8.771, 9.098, 9.281, 8.858, 9.027, 8.553, 8.784, 8.996,
9.379, 9.846, 9.855, 9.502, 9.608, 9.761, 9.409, 9.4 ,
9.332, 9.34 , 9.284, 8.844, 8.722, 8.376, 8.775, 8.293,
8.144, 8.63 , 8.831, 8.957, 9.18 , 9.601, 9.695, 10.018,
9.841, 9.743, 9.292, 8.85 , 9.316, 9.288, 9.519, 9.738,
9.289, 9.785, 9.804, 10.06 , 10.188, 10.095, 9.739, 9.881,
9.7 , 9.991, 10.391, 10.002],
columns=['value'])
def test_performance_stats(self):
"""test the function performance_statistics()
"""
pass
def test_fv(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_fv(self.test_data1), 6.39245474)
self.assertAlmostEqual(eval_fv(self.test_data2), 10.05126375)
self.assertAlmostEqual(eval_fv(self.test_data3), 6.95068113)
self.assertAlmostEqual(eval_fv(self.test_data4), 8.86508591)
self.assertAlmostEqual(eval_fv(self.test_data5), 4.58627048)
self.assertAlmostEqual(eval_fv(self.test_data6), 4.10346795)
self.assertAlmostEqual(eval_fv(self.test_data7), 2.92532313)
self.assertAlmostEqual(eval_fv(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
def test_max_drawdown(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_max_drawdown(self.test_data1)[0], 0.264274308)
self.assertEqual(eval_max_drawdown(self.test_data1)[1], 53)
self.assertEqual(eval_max_drawdown(self.test_data1)[2], 86)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data1)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data2)[0], 0.334690849)
self.assertEqual(eval_max_drawdown(self.test_data2)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data2)[2], 10)
self.assertEqual(eval_max_drawdown(self.test_data2)[3], 19)
self.assertAlmostEqual(eval_max_drawdown(self.test_data3)[0], 0.244452899)
self.assertEqual(eval_max_drawdown(self.test_data3)[1], 90)
self.assertEqual(eval_max_drawdown(self.test_data3)[2], 99)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data3)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data4)[0], 0.201849684)
self.assertEqual(eval_max_drawdown(self.test_data4)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4)[2], 50)
self.assertEqual(eval_max_drawdown(self.test_data4)[3], 54)
self.assertAlmostEqual(eval_max_drawdown(self.test_data5)[0], 0.534206456)
self.assertEqual(eval_max_drawdown(self.test_data5)[1], 21)
self.assertEqual(eval_max_drawdown(self.test_data5)[2], 60)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data5)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data6)[0], 0.670062689)
self.assertEqual(eval_max_drawdown(self.test_data6)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data6)[2], 70)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data6)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data7)[0], 0.783577449)
self.assertEqual(eval_max_drawdown(self.test_data7)[1], 17)
self.assertEqual(eval_max_drawdown(self.test_data7)[2], 51)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data7)[3]))
self.assertEqual(eval_max_drawdown(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
# test max drawdown == 0:
# TODO: investigate: how does divide by zero change?
self.assertAlmostEqual(eval_max_drawdown(self.test_data4 - 5)[0], 1.0770474121951792)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[2], 50)
def test_info_ratio(self):
reference = self.test_data1
self.assertAlmostEqual(eval_info_ratio(self.test_data2, reference, 'value'), 0.075553316)
self.assertAlmostEqual(eval_info_ratio(self.test_data3, reference, 'value'), 0.018949457)
self.assertAlmostEqual(eval_info_ratio(self.test_data4, reference, 'value'), 0.056328143)
self.assertAlmostEqual(eval_info_ratio(self.test_data5, reference, 'value'), -0.004270068)
self.assertAlmostEqual(eval_info_ratio(self.test_data6, reference, 'value'), 0.009198027)
self.assertAlmostEqual(eval_info_ratio(self.test_data7, reference, 'value'), -0.000890283)
def test_volatility(self):
self.assertAlmostEqual(eval_volatility(self.test_data1), 0.748646166)
self.assertAlmostEqual(eval_volatility(self.test_data2), 0.75527442)
self.assertAlmostEqual(eval_volatility(self.test_data3), 0.654188853)
self.assertAlmostEqual(eval_volatility(self.test_data4), 0.688375814)
self.assertAlmostEqual(eval_volatility(self.test_data5), 1.089989522)
self.assertAlmostEqual(eval_volatility(self.test_data6), 1.775419308)
self.assertAlmostEqual(eval_volatility(self.test_data7), 1.962758406)
self.assertAlmostEqual(eval_volatility(self.test_data1, logarithm=False), 0.750993311)
self.assertAlmostEqual(eval_volatility(self.test_data2, logarithm=False), 0.75571473)
self.assertAlmostEqual(eval_volatility(self.test_data3, logarithm=False), 0.655331424)
self.assertAlmostEqual(eval_volatility(self.test_data4, logarithm=False), 0.692683021)
self.assertAlmostEqual(eval_volatility(self.test_data5, logarithm=False), 1.09602969)
self.assertAlmostEqual(eval_volatility(self.test_data6, logarithm=False), 1.774789504)
self.assertAlmostEqual(eval_volatility(self.test_data7, logarithm=False), 2.003329156)
self.assertEqual(eval_volatility(pd.DataFrame()), -np.inf)
self.assertRaises(AssertionError, eval_volatility, [1, 2, 3])
# 测试长数据的Volatility计算
expected_volatility = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
0.39955371, 0.39974258, 0.40309866, 0.40486593, 0.4055514 ,
0.40710639, 0.40708157, 0.40609006, 0.4073625 , 0.40835305,
0.41155304, 0.41218193, 0.41207489, 0.41300276, 0.41308415,
0.41292392, 0.41207645, 0.41238397, 0.41229291, 0.41164056,
0.41316317, 0.41348842, 0.41462249, 0.41474574, 0.41652625,
0.41649176, 0.41701556, 0.4166593 , 0.41684221, 0.41491689,
0.41435209, 0.41549087, 0.41849338, 0.41998049, 0.41959106,
0.41907311, 0.41916103, 0.42120773, 0.42052391, 0.42111225,
0.42124589, 0.42356445, 0.42214672, 0.42324022, 0.42476639,
0.42621689, 0.42549439, 0.42533678, 0.42539414, 0.42545038,
0.42593637, 0.42652095, 0.42665489, 0.42699563, 0.42798159,
0.42784512, 0.42898006, 0.42868781, 0.42874188, 0.42789631,
0.4277768 , 0.42776827, 0.42685216, 0.42660989, 0.42563155,
0.42618281, 0.42606281, 0.42505222, 0.42653242, 0.42555378,
0.42500842, 0.42561939, 0.42442059, 0.42395414, 0.42384356,
0.42319135, 0.42397497, 0.42488579, 0.42449729, 0.42508766,
0.42509878, 0.42456616, 0.42535577, 0.42681884, 0.42688552,
0.42779918, 0.42706058, 0.42792887, 0.42762114, 0.42894045,
0.42977398, 0.42919859, 0.42829041, 0.42780946, 0.42825318,
0.42858952, 0.42858315, 0.42805601, 0.42764751, 0.42744107,
0.42775518, 0.42707283, 0.4258592 , 0.42615335, 0.42526286,
0.4248906 , 0.42368986, 0.4232565 , 0.42265079, 0.42263954,
0.42153046, 0.42132051, 0.41995353, 0.41916605, 0.41914271,
0.41876945, 0.41740175, 0.41583884, 0.41614026, 0.41457908,
0.41472411, 0.41310876, 0.41261041, 0.41212369, 0.41211677,
0.4100645 , 0.40852504, 0.40860297, 0.40745338, 0.40698661,
0.40644546, 0.40591375, 0.40640744, 0.40620663, 0.40656649,
0.40727154, 0.40797605, 0.40807137, 0.40808913, 0.40809676,
0.40711767, 0.40724628, 0.40713077, 0.40772698, 0.40765157,
0.40658297, 0.4065991 , 0.405011 , 0.40537645, 0.40432626,
0.40390177, 0.40237701, 0.40291623, 0.40301797, 0.40324145,
0.40312864, 0.40328316, 0.40190955, 0.40246506, 0.40237663,
0.40198407, 0.401969 , 0.40185623, 0.40198313, 0.40005643,
0.39940743, 0.39850438, 0.39845398, 0.39695093, 0.39697295,
0.39663201, 0.39675444, 0.39538699, 0.39331959, 0.39326074,
0.39193287, 0.39157266, 0.39021327, 0.39062591, 0.38917591,
0.38976991, 0.38864187, 0.38872158, 0.38868096, 0.38868377,
0.38842057, 0.38654784, 0.38649517, 0.38600464, 0.38408115,
0.38323049, 0.38260215, 0.38207663, 0.38142669, 0.38003262,
0.37969367, 0.37768092, 0.37732108, 0.37741991, 0.37617779,
0.37698504, 0.37606784, 0.37499276, 0.37533731, 0.37350437,
0.37375172, 0.37385382, 0.37384003, 0.37338938, 0.37212288,
0.37273075, 0.370559 , 0.37038506, 0.37062153, 0.36964661,
0.36818564, 0.3656634 , 0.36539259, 0.36428672, 0.36502487,
0.3647148 , 0.36551435, 0.36409919, 0.36348181, 0.36254383,
0.36166601, 0.36142665, 0.35954942, 0.35846915, 0.35886759,
0.35813867, 0.35642888, 0.35375231, 0.35061783, 0.35078463,
0.34995508, 0.34688918, 0.34548257, 0.34633158, 0.34622833,
0.34652111, 0.34622774, 0.34540951, 0.34418809, 0.34276593,
0.34160916, 0.33811193, 0.33822709, 0.3391685 , 0.33883381])
test_volatility = eval_volatility(self.long_data)
test_volatility_roll = self.long_data['volatility'].values
self.assertAlmostEqual(test_volatility, np.nanmean(expected_volatility))
self.assertTrue(np.allclose(expected_volatility, test_volatility_roll, equal_nan=True))
def test_sharp(self):
self.assertAlmostEqual(eval_sharp(self.test_data1, 5, 0), 0.06135557)
self.assertAlmostEqual(eval_sharp(self.test_data2, 5, 0), 0.167858667)
self.assertAlmostEqual(eval_sharp(self.test_data3, 5, 0), 0.09950547)
self.assertAlmostEqual(eval_sharp(self.test_data4, 5, 0), 0.154928241)
self.assertAlmostEqual(eval_sharp(self.test_data5, 5, 0.002), 0.007868673)
self.assertAlmostEqual(eval_sharp(self.test_data6, 5, 0.002), 0.018306537)
self.assertAlmostEqual(eval_sharp(self.test_data7, 5, 0.002), 0.006259971)
# 测试长数据的sharp率计算
expected_sharp = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.02346815, -0.02618783, -0.03763912, -0.03296276, -0.03085698,
-0.02851101, -0.02375842, -0.02016746, -0.01107885, -0.01426613,
-0.00787204, -0.01135784, -0.01164232, -0.01003481, -0.00022512,
-0.00046792, -0.01209378, -0.01278892, -0.01298135, -0.01938214,
-0.01671044, -0.02120509, -0.0244281 , -0.02416067, -0.02763238,
-0.027579 , -0.02372774, -0.02215294, -0.02467094, -0.02091266,
-0.02590194, -0.03049876, -0.02077131, -0.01483653, -0.02488144,
-0.02671638, -0.02561547, -0.01957986, -0.02479803, -0.02703162,
-0.02658087, -0.01641755, -0.01946472, -0.01647757, -0.01280889,
-0.00893643, -0.00643275, -0.00698457, -0.00549962, -0.00654677,
-0.00494757, -0.0035633 , -0.00109037, 0.00750654, 0.00451208,
0.00625502, 0.01221367, 0.01326454, 0.01535037, 0.02269538,
0.02028715, 0.02127712, 0.02333264, 0.02273159, 0.01670643,
0.01376513, 0.01265342, 0.02211647, 0.01612449, 0.00856706,
-0.00077147, -0.00268848, 0.00210993, -0.00443934, -0.00411912,
-0.0018756 , -0.00867461, -0.00581601, -0.00660835, -0.00861137,
-0.00678614, -0.01188408, -0.00589617, -0.00244323, -0.00201891,
-0.01042846, -0.01471016, -0.02167034, -0.02258554, -0.01306809,
-0.00909086, -0.01233746, -0.00595166, -0.00184208, 0.00750497,
0.01481886, 0.01761972, 0.01562886, 0.01446414, 0.01285826,
0.01357719, 0.00967613, 0.01636272, 0.01458437, 0.02280183,
0.02151903, 0.01700276, 0.01597368, 0.02114336, 0.02233297,
0.02585631, 0.02768459, 0.03519235, 0.04204535, 0.04328161,
0.04672855, 0.05046191, 0.04619848, 0.04525853, 0.05381529,
0.04598861, 0.03947394, 0.04665006, 0.05586077, 0.05617728,
0.06495018, 0.06205172, 0.05665466, 0.06500615, 0.0632062 ,
0.06084328, 0.05851466, 0.05659229, 0.05159347, 0.0432977 ,
0.0474047 , 0.04231723, 0.03613176, 0.03618391, 0.03591012,
0.03885674, 0.0402686 , 0.03846423, 0.04534014, 0.04721458,
0.05130912, 0.05026281, 0.05394312, 0.05529349, 0.05949243,
0.05463304, 0.06195165, 0.06767606, 0.06880985, 0.07048996,
0.07078815, 0.07420767, 0.06773439, 0.0658441 , 0.06470875,
0.06302349, 0.06456876, 0.06411282, 0.06216669, 0.067094 ,
0.07055075, 0.07254976, 0.07119253, 0.06173308, 0.05393352,
0.05681246, 0.05250643, 0.06099845, 0.0655544 , 0.06977334,
0.06636514, 0.06177949, 0.06869908, 0.06719767, 0.06178738,
0.05915714, 0.06882277, 0.06756821, 0.06507994, 0.06489791,
0.06553941, 0.073123 , 0.07576757, 0.06805446, 0.06063571,
0.05033801, 0.05206971, 0.05540306, 0.05249118, 0.05755587,
0.0586174 , 0.05051288, 0.0564852 , 0.05757284, 0.06358355,
0.06130082, 0.04925482, 0.03834472, 0.04163981, 0.04648316,
0.04457858, 0.04324626, 0.04328791, 0.04156207, 0.04818652,
0.04972634, 0.06024123, 0.06489556, 0.06255485, 0.06069815,
0.06466389, 0.07081163, 0.07895358, 0.0881782 , 0.09374151,
0.08336506, 0.08764795, 0.09080174, 0.08808926, 0.08641158,
0.07811943, 0.06885318, 0.06479503, 0.06851185, 0.07382819,
0.07047903, 0.06658251, 0.07638379, 0.08667974, 0.08867918,
0.08245323, 0.08961866, 0.09905298, 0.0961908 , 0.08562706,
0.0839014 , 0.0849072 , 0.08338395, 0.08783487, 0.09463609,
0.10332336, 0.11806497, 0.11220297, 0.11589097, 0.11678405])
test_sharp = eval_sharp(self.long_data, 5, 0.00035)
self.assertAlmostEqual(np.nanmean(expected_sharp), test_sharp)
self.assertTrue(np.allclose(self.long_data['sharp'].values, expected_sharp, equal_nan=True))
def test_beta(self):
reference = self.test_data1
self.assertAlmostEqual(eval_beta(self.test_data2, reference, 'value'), -0.017148939)
self.assertAlmostEqual(eval_beta(self.test_data3, reference, 'value'), -0.042204233)
self.assertAlmostEqual(eval_beta(self.test_data4, reference, 'value'), -0.15652986)
self.assertAlmostEqual(eval_beta(self.test_data5, reference, 'value'), -0.049195532)
self.assertAlmostEqual(eval_beta(self.test_data6, reference, 'value'), -0.026995082)
self.assertAlmostEqual(eval_beta(self.test_data7, reference, 'value'), -0.01147809)
self.assertRaises(TypeError, eval_beta, [1, 2, 3], reference, 'value')
self.assertRaises(TypeError, eval_beta, self.test_data3, [1, 2, 3], 'value')
self.assertRaises(KeyError, eval_beta, self.test_data3, reference, 'not_found_value')
# 测试长数据的beta计算
expected_beta = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.04988841, -0.05127618, -0.04692104, -0.04272652, -0.04080598,
-0.0493347 , -0.0460858 , -0.0416761 , -0.03691527, -0.03724924,
-0.03678865, -0.03987324, -0.03488321, -0.02567672, -0.02690303,
-0.03010128, -0.02437967, -0.02571932, -0.02455681, -0.02839811,
-0.03358653, -0.03396697, -0.03466321, -0.03050966, -0.0247583 ,
-0.01629325, -0.01880895, -0.01480403, -0.01348783, -0.00544294,
-0.00648176, -0.00467036, -0.01135331, -0.0156841 , -0.02340763,
-0.02615705, -0.02730771, -0.02906174, -0.02860664, -0.02412914,
-0.02066416, -0.01744816, -0.02185133, -0.02145285, -0.02681765,
-0.02827694, -0.02394581, -0.02744096, -0.02778825, -0.02703065,
-0.03160023, -0.03615371, -0.03681072, -0.04265126, -0.04344738,
-0.04232421, -0.04705272, -0.04533344, -0.04605934, -0.05272737,
-0.05156463, -0.05134196, -0.04730733, -0.04425352, -0.03869831,
-0.04159571, -0.04223998, -0.04346747, -0.04229844, -0.04740093,
-0.04992507, -0.04621232, -0.04477644, -0.0486915 , -0.04598224,
-0.04943463, -0.05006391, -0.05362256, -0.04994067, -0.05464769,
-0.05443275, -0.05513493, -0.05173594, -0.04500994, -0.04662891,
-0.03903505, -0.0419592 , -0.04307773, -0.03925718, -0.03711574,
-0.03992631, -0.0433058 , -0.04533641, -0.0461183 , -0.05600344,
-0.05758377, -0.05959874, -0.05605942, -0.06002859, -0.06253002,
-0.06747014, -0.06427915, -0.05931947, -0.05769974, -0.04791515,
-0.05175088, -0.05748039, -0.05385232, -0.05072975, -0.05052637,
-0.05125567, -0.05005785, -0.05325104, -0.04977727, -0.04947867,
-0.05148544, -0.05739156, -0.05742069, -0.06047279, -0.0558414 ,
-0.06086126, -0.06265151, -0.06411129, -0.06828052, -0.06781762,
-0.07083409, -0.07211207, -0.06799162, -0.06913295, -0.06775162,
-0.0696265 , -0.06678248, -0.06867502, -0.06581961, -0.07055823,
-0.06448184, -0.06097973, -0.05795587, -0.0618383 , -0.06130145,
-0.06050652, -0.05936661, -0.05749424, -0.0499 , -0.05050495,
-0.04962687, -0.05033439, -0.05070116, -0.05422009, -0.05369759,
-0.05548943, -0.05907353, -0.05933035, -0.05927918, -0.06227663,
-0.06011455, -0.05650432, -0.05828134, -0.05620949, -0.05715323,
-0.05482478, -0.05387113, -0.05095559, -0.05377999, -0.05334267,
-0.05220438, -0.04001521, -0.03892434, -0.03660782, -0.04282708,
-0.04324623, -0.04127048, -0.04227559, -0.04275226, -0.04347049,
-0.04125853, -0.03806295, -0.0330632 , -0.03155531, -0.03277152,
-0.03304518, -0.03878731, -0.03830672, -0.03727434, -0.0370571 ,
-0.04509224, -0.04207632, -0.04116198, -0.04545179, -0.04584584,
-0.05287341, -0.05417433, -0.05175836, -0.05005509, -0.04268674,
-0.03442321, -0.03457309, -0.03613426, -0.03524391, -0.03629479,
-0.04361312, -0.02626705, -0.02406115, -0.03046384, -0.03181044,
-0.03375164, -0.03661673, -0.04520779, -0.04926951, -0.05726738,
-0.0584486 , -0.06220608, -0.06800563, -0.06797431, -0.07562211,
-0.07481996, -0.07731229, -0.08413381, -0.09031826, -0.09691925,
-0.11018071, -0.11952675, -0.10826026, -0.11173895, -0.10756359,
-0.10775916, -0.11664559, -0.10505051, -0.10606547, -0.09855355,
-0.10004159, -0.10857084, -0.12209301, -0.11605758, -0.11105113,
-0.1155195 , -0.11569505, -0.10513348, -0.09611072, -0.10719791,
-0.10843965, -0.11025856, -0.10247839, -0.10554044, -0.10927647,
-0.10645088, -0.09982498, -0.10542734, -0.09631372, -0.08229695])
test_beta_mean = eval_beta(self.long_data, self.long_bench, 'value')
test_beta_roll = self.long_data['beta'].values
self.assertAlmostEqual(test_beta_mean, np.nanmean(expected_beta))
self.assertTrue(np.allclose(test_beta_roll, expected_beta, equal_nan=True))
def test_alpha(self):
reference = self.test_data1
self.assertAlmostEqual(eval_alpha(self.test_data2, 5, reference, 'value', 0.5), 11.63072977)
self.assertAlmostEqual(eval_alpha(self.test_data3, 5, reference, 'value', 0.5), 1.886590071)
self.assertAlmostEqual(eval_alpha(self.test_data4, 5, reference, 'value', 0.5), 6.827021872)
self.assertAlmostEqual(eval_alpha(self.test_data5, 5, reference, 'value', 0.92), -1.192265168)
self.assertAlmostEqual(eval_alpha(self.test_data6, 5, reference, 'value', 0.92), -1.437142359)
self.assertAlmostEqual(eval_alpha(self.test_data7, 5, reference, 'value', 0.92), -1.781311545)
# 测试长数据的alpha计算
expected_alpha = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.09418119, -0.11188463, -0.17938358, -0.15588172, -0.1462678 ,
-0.13089586, -0.10780125, -0.09102891, -0.03987585, -0.06075686,
-0.02459503, -0.04104284, -0.0444565 , -0.04074585, 0.02191275,
0.02255955, -0.05583375, -0.05875539, -0.06055551, -0.09648245,
-0.07913737, -0.10627829, -0.12320965, -0.12368335, -0.1506743 ,
-0.15768033, -0.13638829, -0.13065298, -0.14537834, -0.127428 ,
-0.15504529, -0.18184636, -0.12652146, -0.09190138, -0.14847221,
-0.15840648, -0.1525789 , -0.11859418, -0.14700954, -0.16295761,
-0.16051645, -0.10364859, -0.11961134, -0.10258267, -0.08090148,
-0.05727746, -0.0429945 , -0.04672356, -0.03581408, -0.0439215 ,
-0.03429495, -0.0260362 , -0.01075022, 0.04931808, 0.02779388,
0.03984083, 0.08311951, 0.08995566, 0.10522428, 0.16159058,
0.14238174, 0.14759783, 0.16257712, 0.158908 , 0.11302115,
0.0909566 , 0.08272888, 0.15261884, 0.10546376, 0.04990313,
-0.01284111, -0.02720704, 0.00454725, -0.03965491, -0.03818265,
-0.02186992, -0.06574751, -0.04846454, -0.05204211, -0.06316498,
-0.05095099, -0.08502656, -0.04681162, -0.02362027, -0.02205091,
-0.07706374, -0.10371841, -0.14434688, -0.14797935, -0.09055402,
-0.06739549, -0.08824959, -0.04855888, -0.02291244, 0.04027138,
0.09370505, 0.11472939, 0.10243593, 0.0921445 , 0.07662648,
0.07946651, 0.05450718, 0.10497677, 0.09068334, 0.15462924,
0.14231034, 0.10544952, 0.09980256, 0.14035223, 0.14942974,
0.17624102, 0.19035477, 0.2500807 , 0.30724652, 0.31768915,
0.35007521, 0.38412975, 0.34356521, 0.33614463, 0.41206165,
0.33999177, 0.28045963, 0.34076789, 0.42220356, 0.42314636,
0.50790423, 0.47713348, 0.42520169, 0.50488411, 0.48705211,
0.46252601, 0.44325578, 0.42640573, 0.37986783, 0.30652822,
0.34503393, 0.2999069 , 0.24928617, 0.24730218, 0.24326897,
0.26657905, 0.27861168, 0.26392824, 0.32552649, 0.34177792,
0.37837011, 0.37025267, 0.4030612 , 0.41339361, 0.45076809,
0.40383354, 0.47093422, 0.52505036, 0.53614256, 0.5500943 ,
0.55319293, 0.59021451, 0.52358459, 0.50605947, 0.49359168,
0.47895956, 0.49320243, 0.4908336 , 0.47310767, 0.51821564,
0.55105932, 0.57291504, 0.5599809 , 0.46868842, 0.39620087,
0.42086934, 0.38317217, 0.45934108, 0.50048866, 0.53941991,
0.50676751, 0.46500915, 0.52993663, 0.51668366, 0.46405428,
0.44100603, 0.52726147, 0.51565458, 0.49186248, 0.49001081,
0.49367648, 0.56422294, 0.58882785, 0.51334664, 0.44386256,
0.35056709, 0.36490029, 0.39205071, 0.3677061 , 0.41134736,
0.42315067, 0.35356394, 0.40324562, 0.41340007, 0.46503322,
0.44355762, 0.34854314, 0.26412842, 0.28633753, 0.32335224,
0.30761141, 0.29709569, 0.29570487, 0.28000063, 0.32802547,
0.33967726, 0.42511212, 0.46252357, 0.44244974, 0.42152907,
0.45436727, 0.50482359, 0.57339198, 0.6573356 , 0.70912003,
0.60328917, 0.6395092 , 0.67015805, 0.64241557, 0.62779142,
0.55028063, 0.46448736, 0.43709245, 0.46777983, 0.51789439,
0.48594916, 0.4456216 , 0.52008189, 0.60548684, 0.62792473,
0.56645031, 0.62766439, 0.71829315, 0.69481356, 0.59550329,
0.58133754, 0.59014148, 0.58026655, 0.61719273, 0.67373203,
0.75573056, 0.89501633, 0.8347253 , 0.87964685, 0.89015835])
test_alpha_mean = eval_alpha(self.long_data, 100, self.long_bench, 'value')
test_alpha_roll = self.long_data['alpha'].values
self.assertAlmostEqual(test_alpha_mean, np.nanmean(expected_alpha))
self.assertTrue(np.allclose(test_alpha_roll, expected_alpha, equal_nan=True))
def test_calmar(self):
"""test evaluate function eval_calmar()"""
pass
def test_benchmark(self):
reference = self.test_data1
tr, yr = eval_benchmark(self.test_data2, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data3, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data4, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data5, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data6, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data7, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
def test_evaluate(self):
pass
class TestLoop(unittest.TestCase):
"""通过一个假设但精心设计的例子来测试loop_step以及loop方法的正确性"""
def setUp(self):
self.shares = ['share1', 'share2', 'share3', 'share4', 'share5', 'share6', 'share7']
self.dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08',
'2016/09/09', '2016/09/12', '2016/09/13', '2016/09/14', '2016/09/15',
'2016/09/16', '2016/09/19', '2016/09/20', '2016/09/21', '2016/09/22',
'2016/09/23', '2016/09/26', '2016/09/27', '2016/09/28', '2016/09/29',
'2016/09/30', '2016/10/10', '2016/10/11', '2016/10/12', '2016/10/13',
'2016/10/14', '2016/10/17', '2016/10/18', '2016/10/19', '2016/10/20',
'2016/10/21', '2016/10/23', '2016/10/24', '2016/10/25', '2016/10/26',
'2016/10/27', '2016/10/29', '2016/10/30', '2016/10/31', '2016/11/01',
'2016/11/02', '2016/11/05', '2016/11/06', '2016/11/07', '2016/11/08',
'2016/11/09', '2016/11/12', '2016/11/13', '2016/11/14', '2016/11/15',
'2016/11/16', '2016/11/19', '2016/11/20', '2016/11/21', '2016/11/22']
self.dates = [pd.Timestamp(date_text) for date_text in self.dates]
self.prices = np.array([[5.35, 5.09, 5.03, 4.98, 4.50, 5.09, 4.75],
[5.66, 4.84, 5.21, 5.44, 4.35, 5.06, 4.48],
[5.79, 4.60, 5.02, 5.45, 4.07, 4.76, 4.56],
[5.56, 4.63, 5.50, 5.74, 3.88, 4.62, 4.62],
[5.88, 4.64, 5.07, 5.46, 3.74, 4.63, 4.62],
[6.25, 4.51, 5.11, 5.45, 3.98, 4.25, 4.59],
[5.93, 4.96, 5.15, 5.24, 4.08, 4.13, 4.33],
[6.39, 4.65, 5.02, 5.47, 4.00, 3.91, 3.88],
[6.31, 4.26, 5.10, 5.58, 4.28, 3.77, 3.47],
[5.86, 3.77, 5.24, 5.36, 4.01, 3.43, 3.51],
[5.61, 3.39, 4.93, 5.38, 4.14, 3.68, 3.83],
[5.31, 3.76, 4.96, 5.30, 4.49, 3.63, 3.67],
[5.40, 4.06, 5.40, 5.77, 4.49, 3.94, 3.79],
[5.03, 3.87, 5.74, 5.75, 4.46, 4.40, 4.18],
[5.38, 3.91, 5.53, 6.15, 4.13, 4.03, 4.02],
[5.79, 4.13, 5.79, 6.04, 3.79, 3.93, 4.41],
[6.27, 4.27, 5.68, 6.01, 4.23, 3.50, 4.65],
[6.59, 4.57, 5.90, 5.71, 4.57, 3.39, 4.89],
[6.91, 5.04, 5.75, 5.23, 4.92, 3.30, 4.41],
[6.71, 5.31, 6.11, 5.47, 5.28, 3.25, 4.66],
[6.33, 5.40, 5.77, 5.79, 5.67, 2.94, 4.37],
[6.07, 5.21, 5.85, 5.82, 6.00, 2.71, 4.58],
[5.98, 5.06, 5.61, 5.61, 5.89, 2.55, 4.76],
[6.46, 4.69, 5.75, 5.31, 5.55, 2.21, 4.37],
[6.95, 5.12, 5.50, 5.24, 5.39, 2.29, 4.16],
[6.77, 5.27, 5.14, 5.41, 5.26, 2.21, 4.02],
[6.70, 5.72, 5.31, 5.60, 5.31, 2.04, 3.77],
[6.28, 6.10, 5.68, 5.28, 5.22, 2.39, 3.38],
[6.61, 6.27, 5.73, 4.99, 4.90, 2.30, 3.07],
[6.25, 6.49, 6.04, 5.09, 4.57, 2.41, 2.90],
[6.47, 6.16, 6.27, 5.39, 4.96, 2.40, 2.50],
[6.45, 6.26, 6.60, 5.58, 4.82, 2.79, 2.76],
[6.88, 6.39, 6.10, 5.33, 4.39, 2.67, 2.29],
[7.00, 6.58, 6.25, 5.48, 4.63, 2.27, 2.17],
[6.59, 6.20, 6.73, 5.10, 5.05, 2.09, 1.84],
[6.59, 5.70, 6.91, 5.39, 4.68, 2.55, 1.83],
[6.64, 5.20, 7.01, 5.30, 5.02, 2.22, 2.21],
[6.38, 5.37, 7.36, 5.04, 4.84, 2.59, 2.00],
[6.10, 5.40, 7.72, 5.51, 4.60, 2.59, 1.76],
[6.35, 5.22, 7.68, 5.43, 4.66, 2.95, 1.27],
[6.52, 5.38, 7.62, 5.23, 4.41, 2.69, 1.40],
[6.87, 5.53, 7.74, 4.99, 4.87, 2.20, 1.11],
[6.84, 6.03, 7.53, 5.43, 4.42, 2.69, 1.60],
[7.09, 5.77, 7.46, 5.40, 4.08, 2.65, 1.23],
[6.88, 5.66, 7.84, 5.60, 4.16, 2.63, 1.59],
[6.84, 6.08, 8.11, 5.66, 4.10, 2.14, 1.50],
[6.98, 5.62, 8.04, 6.01, 4.43, 2.39, 1.80],
[7.02, 5.63, 7.65, 5.64, 4.07, 1.95, 1.55],
[7.13, 6.11, 7.52, 5.67, 3.97, 2.32, 1.35],
[7.59, 6.03, 7.67, 5.30, 4.16, 2.69, 1.51],
[7.61, 6.27, 7.47, 4.91, 4.12, 2.51, 1.08],
[7.21, 6.28, 7.44, 5.37, 4.04, 2.62, 1.06],
[7.48, 6.52, 7.59, 5.75, 3.84, 2.16, 1.43],
[7.66, 7.00, 7.94, 6.08, 3.46, 2.35, 1.43],
[7.51, 7.34, 8.25, 6.58, 3.18, 2.31, 1.74],
[7.12, 7.34, 7.77, 6.78, 3.10, 1.96, 1.44],
[6.97, 7.68, 8.03, 7.20, 3.55, 2.35, 1.83],
[6.67, 8.09, 7.87, 7.65, 3.66, 2.58, 1.71],
[6.20, 7.68, 7.58, 8.00, 3.66, 2.40, 2.12],
[6.34, 7.58, 7.33, 7.92, 3.29, 2.20, 2.45],
[6.22, 7.46, 7.22, 8.30, 2.80, 2.31, 2.85],
[5.98, 7.59, 6.86, 8.46, 2.88, 2.16, 2.79],
[6.37, 7.19, 7.18, 7.99, 3.04, 2.16, 2.91],
[6.56, 7.40, 7.54, 8.19, 3.45, 2.20, 3.26],
[6.26, 7.48, 7.24, 8.61, 3.88, 1.73, 3.14],
[6.69, 7.93, 6.85, 8.66, 3.58, 1.93, 3.53],
[7.13, 8.23, 6.60, 8.91, 3.60, 2.25, 3.65],
[6.83, 8.35, 6.65, 9.08, 3.97, 2.69, 3.69],
[7.31, 8.44, 6.74, 9.34, 4.05, 2.59, 3.50],
[7.43, 8.35, 7.19, 8.96, 4.40, 2.14, 3.25],
[7.54, 8.58, 7.14, 8.98, 4.06, 1.68, 3.64],
[7.18, 8.82, 6.88, 8.50, 3.60, 1.98, 4.00],
[7.21, 9.09, 7.14, 8.65, 3.61, 2.14, 3.63],
[7.45, 9.02, 7.30, 8.94, 4.10, 1.89, 3.78],
[7.37, 8.87, 6.95, 8.63, 3.74, 1.97, 3.42],
[6.88, 9.22, 7.02, 8.65, 4.02, 1.99, 3.76],
[7.08, 9.04, 7.38, 8.40, 3.95, 2.37, 3.62],
[6.75, 8.60, 7.50, 8.38, 3.81, 2.14, 3.67],
[6.60, 8.48, 7.60, 8.23, 3.71, 2.35, 3.61],
[6.21, 8.71, 7.15, 8.04, 3.94, 1.86, 3.39],
[6.36, 8.79, 7.30, 7.91, 4.43, 2.14, 3.43],
[6.82, 8.93, 7.80, 7.57, 4.07, 2.39, 3.33],
[6.45, 9.36, 8.15, 7.73, 4.04, 2.53, 3.03],
[6.85, 9.68, 8.40, 7.74, 4.34, 2.47, 3.28],
[6.48, 10.16, 8.87, 8.07, 4.80, 2.93, 3.46],
[6.10, 10.56, 8.53, 7.99, 5.18, 3.09, 3.25],
[5.64, 10.63, 8.94, 7.92, 4.90, 2.93, 2.95],
[6.01, 10.55, 8.52, 8.40, 5.40, 3.22, 2.87],
[6.21, 10.65, 8.80, 8.80, 5.73, 3.06, 2.63],
[6.61, 10.55, 8.92, 8.47, 5.62, 2.90, 2.40],
[7.02, 10.19, 9.20, 8.07, 5.20, 2.68, 2.53],
[7.04, 10.48, 8.71, 7.87, 4.85, 2.46, 2.96],
[6.77, 10.36, 8.25, 8.02, 5.18, 2.41, 3.26],
[7.09, 10.03, 8.19, 8.39, 4.72, 2.74, 2.97],
[6.65, 10.24, 7.80, 8.69, 4.62, 3.15, 3.16],
[7.07, 10.01, 7.69, 8.81, 4.55, 3.40, 3.58],
[6.80, 10.14, 7.23, 8.99, 4.37, 3.82, 3.23],
[6.79, 10.31, 6.98, 9.10, 4.26, 4.02, 3.62],
[6.48, 9.88, 7.07, 8.90, 4.25, 3.76, 3.13],
[6.39, 10.05, 6.95, 8.87, 4.59, 4.10, 2.93]])
self.op_signals = np.array([[0, 0, 0, 0, 0.25, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0.1, 0.15],
[0.2, 0.2, 0, 0, 0, 0, 0],
[0, 0, 0.1, 0, 0, 0, 0],
[0, 0, 0, 0, -0.75, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[-0.333, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, -0.5, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1],
[0, 0, 0, 0, 0.2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[-0.5, 0, 0, 0.15, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0.2, 0, -1, 0.2, 0],
[0.5, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0.2, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -0.5, 0.2],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0.2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0.15, 0, 0],
[-1, 0, 0.25, 0.25, 0, 0.25, 0],
[0, 0, 0, 0, 0, 0, 0],
[0.25, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0.2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -1, 0, 0.2],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, -1, 0, 0, 0, 0, 0],
[-1, 0, 0.15, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
self.cash = qt.CashPlan(['2016/07/01', '2016/08/12', '2016/09/23'], [10000, 10000, 10000])
self.rate = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=0,
sell_min=0,
slipage=0)
self.rate2 = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=10,
sell_min=5,
slipage=0)
self.op_signal_df = pd.DataFrame(self.op_signals, index=self.dates, columns=self.shares)
self.history_list = pd.DataFrame(self.prices, index=self.dates, columns=self.shares)
self.res = np.array([[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 205.065, 321.089, 5059.722, 0.000, 9761.111],
[346.982, 416.679, 0.000, 0.000, 555.556, 205.065, 321.089, 1201.278, 0.000, 9646.112],
[346.982, 416.679, 191.037, 0.000, 555.556, 205.065, 321.089, 232.719, 0.000, 9685.586],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9813.218],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9803.129],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9608.020],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9311.573],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8883.625],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8751.390],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8794.181],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9136.570],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9209.359],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9093.829],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9387.554],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9585.959],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 9928.777],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10060.381],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10281.002],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10095.561],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 0.000, 4506.393, 0.000, 10029.957],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9875.613],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9614.946],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9824.172],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9732.574],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9968.339],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 10056.158],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9921.492],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9894.162],
[115.719, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 6179.774, 0.000, 20067.937],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21133.508],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20988.848],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20596.743],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 19910.773],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20776.707],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20051.797],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20725.388],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20828.880],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21647.181],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21310.169],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20852.099],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21912.395],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21937.828],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21962.458],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21389.402],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22027.453],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 20939.999],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21250.064],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22282.781],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21407.066],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21160.237],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21826.768],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22744.940],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23466.118],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22017.882],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23191.466],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23099.082],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22684.767],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22842.135],
[1073.823, 416.679, 735.644, 269.850, 1785.205, 938.697, 1339.207, 5001.425, 0, 33323.836],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 32820.290],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 33174.614],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35179.466],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 34465.195],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 34712.354],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35755.550],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37895.223],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37854.284],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37198.374],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35916.711],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35806.937],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 36317.592],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37103.973],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35457.883],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 36717.685],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37641.463],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 36794.298],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37073.817],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35244.299],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37062.382],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37420.067],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 38089.058],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 39260.542],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 42609.684],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 43109.309],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 42283.408],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 43622.444],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 42830.254],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 41266.463],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 41164.839],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 41797.937],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 42440.861],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 42113.839],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 43853.588],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 46216.760],
[0.000, 0.000, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 5140.743, 0.000, 45408.737],
[0.000, 0.000, 2027.188, 719.924, 0.000, 2701.488, 4379.099, 0.000, 0.000, 47413.401],
[0.000, 0.000, 2027.188, 719.924, 0.000, 2701.488, 4379.099, 0.000, 0.000, 44603.718],
[0.000, 0.000, 2027.188, 719.924, 0.000, 2701.488, 4379.099, 0.000, 0.000, 44381.544]])
def test_loop_step(self):
cash, amounts, fee, value = qt.core._loop_step(pre_cash=10000,
pre_amounts=np.zeros(7, dtype='float'),
op=self.op_signals[0],
prices=self.prices[0],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
self.assertAlmostEqual(value, 10000.00)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=5059.722222,
pre_amounts=np.array([0, 0, 0, 0, 555.5555556,
205.0653595, 321.0891813]),
op=self.op_signals[3],
prices=self.prices[3],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 1201.2775195, 5)
self.assertTrue(np.allclose(amounts, np.array([346.9824373, 416.6786936, 0, 0,
555.5555556, 205.0653595, 321.0891813])))
self.assertAlmostEqual(value, 9646.111756, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=6179.77423,
pre_amounts=np.array([115.7186428, 416.6786936, 735.6441811,
269.8495646, 0, 1877.393446, 0]),
op=self.op_signals[31],
prices=self.prices[31],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 0, 5)
self.assertTrue(np.allclose(amounts, np.array([1073.823175, 416.6786936, 735.6441811,
269.8495646, 0, 1877.393446, 0])))
self.assertAlmostEqual(value, 21133.50798, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=10000,
pre_amounts=np.array([1073.823175, 416.6786936, 735.6441811,
269.8495646, 0, 938.6967231, 1339.207325]),
op=self.op_signals[60],
prices=self.prices[60],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 5001.424618, 5)
self.assertTrue(np.allclose(amounts, np.array([1073.823175, 416.6786936, 735.6441811, 269.8495646,
1785.205494, 938.6967231, 1339.207325])))
self.assertAlmostEqual(value, 33323.83588, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=cash,
pre_amounts=amounts,
op=self.op_signals[61],
prices=self.prices[61],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 0, 5)
self.assertTrue(np.allclose(amounts, np.array([0, 416.6786936, 1290.69215, 719.9239224,
1785.205494, 2701.487958, 1339.207325])))
self.assertAlmostEqual(value, 32820.29007, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=915.6208259,
pre_amounts=np.array([0, 416.6786936, 1290.69215, 719.9239224,
0, 2701.487958, 4379.098907]),
op=self.op_signals[96],
prices=self.prices[96],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 5140.742779, 5)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 1290.69215, 719.9239224, 0, 2701.487958, 4379.098907])))
self.assertAlmostEqual(value, 45408.73655, 4)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=cash,
pre_amounts=amounts,
op=self.op_signals[97],
prices=self.prices[97],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 0, 5)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 2027.18825, 719.9239224, 0, 2701.487958, 4379.098907])))
self.assertAlmostEqual(value, 47413.40131, 4)
def test_loop(self):
res = apply_loop(op_list=self.op_signal_df,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res.values, self.res, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
self.op_signal_df,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
self.op_signal_df,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_list=self.op_signal_df,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
class TestOperatorSubFuncs(unittest.TestCase):
def setUp(self):
mask_list = [[0.0, 0.0, 0.0, 0.0],
[0.5, 0.0, 0.0, 1.0],
[0.5, 0.0, 0.3, 1.0],
[0.5, 0.0, 0.3, 0.5],
[0.5, 0.5, 0.3, 0.5],
[0.5, 0.5, 0.3, 1.0],
[0.3, 0.5, 0.0, 1.0],
[0.3, 1.0, 0.0, 1.0]]
signal_list = [[0.0, 0.0, 0.0, 0.0],
[0.5, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.3, 0.0],
[0.0, 0.0, 0.0, -0.5],
[0.0, 0.5, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.5],
[-0.4, 0.0, -1.0, 0.0],
[0.0, 0.5, 0.0, 0.0]]
mask_multi = [[[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[0, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[1, 0, 0, 0, 1]],
[[0, 0, 1, 0, 1],
[0, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 1, 0],
[0, 1, 0, 1, 0]],
[[0, 0, 0., 0, 1],
[0, 0, 1., 0, 1],
[0, 0, 1., 0, 1],
[1, 0, 1., 0, 1],
[1, 1, .5, 1, 1],
[1, 0, .5, 1, 0],
[1, 1, .5, 1, 0],
[0, 1, 0., 0, 0],
[1, 0, 0., 0, 0],
[0, 1, 0., 0, 0]]]
signal_multi = [[[0., 0., 1., 1., 0.],
[0., 1., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0.],
[0., 0., -1., 0., 0.],
[-1., 0., 0., -1., 0.],
[1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., -1., 0., 0., 0.]],
[[0., 0., 1., 0., 1.],
[0., 1., 0., 1., 0.],
[1., 0., -1., 0., 0.],
[0., 0., 1., -1., -1.],
[0., 0., -1., 0., 0.],
[0., -1., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 0., 1., 0.],
[-1., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 1.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[0., 1., -0.5, 1., 0.],
[0., -1., 0., 0., -1.],
[0., 1., 0., 0., 0.],
[-1., 0., -1., -1., 0.],
[1., -1., 0., 0., 0.],
[-1., 1., 0., 0., 0.]]]
self.mask = np.array(mask_list)
self.multi_mask = np.array(mask_multi)
self.correct_signal = np.array(signal_list)
self.correct_multi_signal = np.array(signal_multi)
self.op = qt.Operator()
def test_ls_blend(self):
"""测试多空蒙板的混合器,三种混合方式均需要测试"""
ls_mask1 = [[0.0, 0.0, 0.0, -0.0],
[1.0, 0.0, 0.0, -1.0],
[1.0, 0.0, 1.0, -1.0],
[1.0, 0.0, 1.0, -1.0],
[1.0, 1.0, 1.0, -1.0],
[1.0, 1.0, 1.0, -1.0],
[0.0, 1.0, 0.0, -1.0],
[0.0, 1.0, 0.0, -1.0]]
ls_mask2 = [[0.0, 0.0, 0.5, -0.5],
[0.0, 0.0, 0.5, -0.3],
[0.0, 0.5, 0.5, -0.0],
[0.5, 0.5, 0.3, -0.0],
[0.5, 0.5, 0.3, -0.3],
[0.5, 0.5, 0.0, -0.5],
[0.3, 0.5, 0.0, -1.0],
[0.3, 1.0, 0.0, -1.0]]
ls_mask3 = [[0.5, 0.0, 1.0, -0.4],
[0.4, 0.0, 1.0, -0.3],
[0.3, 0.0, 0.8, -0.2],
[0.2, 0.0, 0.6, -0.1],
[0.1, 0.2, 0.4, -0.2],
[0.1, 0.3, 0.2, -0.5],
[0.1, 0.4, 0.0, -0.5],
[0.1, 0.5, 0.0, -1.0]]
# result with blender 'avg'
ls_blnd_avg = [[0.16666667, 0.00000000, 0.50000000, -0.3],
[0.46666667, 0.00000000, 0.50000000, -0.53333333],
[0.43333333, 0.16666667, 0.76666667, -0.4],
[0.56666667, 0.16666667, 0.63333333, -0.36666667],
[0.53333333, 0.56666667, 0.56666667, -0.5],
[0.53333333, 0.60000000, 0.40000000, -0.66666667],
[0.13333333, 0.63333333, 0.00000000, -0.83333333],
[0.13333333, 0.83333333, 0.00000000, -1.]]
# result with blender 'str-1.5'
ls_blnd_str_15 = [[0, 0, 1, 0],
[0, 0, 1, -1],
[0, 0, 1, 0],
[1, 0, 1, 0],
[1, 1, 1, -1],
[1, 1, 0, -1],
[0, 1, 0, -1],
[0, 1, 0, -1]]
# result with blender 'pos-2' == 'pos-2-0'
ls_blnd_pos_2 = [[0, 0, 1, -1],
[1, 0, 1, -1],
[1, 0, 1, -1],
[1, 0, 1, -1],
[1, 1, 1, -1],
[1, 1, 1, -1],
[1, 1, 0, -1],
[1, 1, 0, -1]]
# result with blender 'pos-2-0.25'
ls_blnd_pos_2_25 = [[0, 0, 1, -1],
[1, 0, 1, -1],
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 1, 1, -1],
[1, 1, 0, -1],
[0, 1, 0, -1],
[0, 1, 0, -1]]
# result with blender 'avg_pos-2' == 'pos-2-0'
ls_blnd_avg_pos_2 = [[0.00000000, 0.00000000, 0.50000000, -0.3],
[0.46666667, 0.00000000, 0.50000000, -0.53333333],
[0.43333333, 0.00000000, 0.76666667, -0.4],
[0.56666667, 0.00000000, 0.63333333, -0.36666667],
[0.53333333, 0.56666667, 0.56666667, -0.5],
[0.53333333, 0.60000000, 0.40000000, -0.66666667],
[0.13333333, 0.63333333, 0.00000000, -0.83333333],
[0.13333333, 0.83333333, 0.00000000, -1.]]
# result with blender 'avg_pos-2-0.25'
ls_blnd_avg_pos_2_25 = [[0.00000000, 0.00000000, 0.50000000, -0.3],
[0.46666667, 0.00000000, 0.50000000, -0.53333333],
[0.43333333, 0.00000000, 0.76666667, 0.00000000],
[0.56666667, 0.00000000, 0.63333333, 0.00000000],
[0.53333333, 0.56666667, 0.56666667, -0.5],
[0.53333333, 0.60000000, 0.00000000, -0.66666667],
[0.00000000, 0.63333333, 0.00000000, -0.83333333],
[0.00000000, 0.83333333, 0.00000000, -1.]]
# result with blender 'combo'
ls_blnd_combo = [[0.5, 0., 1.5, -0.9],
[1.4, 0., 1.5, -1.6],
[1.3, 0.5, 2.3, -1.2],
[1.7, 0.5, 1.9, -1.1],
[1.6, 1.7, 1.7, -1.5],
[1.6, 1.8, 1.2, -2.],
[0.4, 1.9, 0., -2.5],
[0.4, 2.5, 0., -3.]]
ls_masks = np.array([np.array(ls_mask1), np.array(ls_mask2), np.array(ls_mask3)])
# test A: the ls_blender 'str-T'
self.op.set_blender('ls', 'str-1.5')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'test A: result of ls_blender: str-1.5: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_str_15))
# test B: the ls_blender 'pos-N-T'
self.op.set_blender('ls', 'pos-2')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test B-1: result of ls_blender: pos-2: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2))
self.op.set_blender('ls', 'pos-2-0.25')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test B-2: result of ls_blender: pos-2-0.25: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2_25))
# test C: the ls_blender 'avg_pos-N-T'
self.op.set_blender('ls', 'avg_pos-2')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test C-1: result of ls_blender: avg_pos-2: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2, 5))
self.op.set_blender('ls', 'avg_pos-2-0.25')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test C-2: result of ls_blender: avg_pos-2-0.25: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2_25, 5))
# test D: the ls_blender 'avg'
self.op.set_blender('ls', 'avg')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test D: result of ls_blender: avg: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_avg))
# test E: the ls_blender 'combo'
self.op.set_blender('ls', 'combo')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test E: result of ls_blender: combo: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_combo))
def test_sel_blend(self):
"""测试选股蒙板的混合器,包括所有的混合模式"""
# step2, test blending of sel masks
pass
def test_bs_blend(self):
"""测试买卖信号混合模式"""
# step3, test blending of op signals
pass
def test_unify(self):
print('Testing Unify functions\n')
l1 = np.array([[3, 2, 5], [5, 3, 2]])
res = qt.unify(l1)
target = np.array([[0.3, 0.2, 0.5], [0.5, 0.3, 0.2]])
self.assertIs(np.allclose(res, target), True, 'sum of all elements is 1')
l1 = np.array([[1, 1, 1, 1, 1], [2, 2, 2, 2, 2]])
res = qt.unify(l1)
target = np.array([[0.2, 0.2, 0.2, 0.2, 0.2], [0.2, 0.2, 0.2, 0.2, 0.2]])
self.assertIs(np.allclose(res, target), True, 'sum of all elements is 1')
def test_mask_to_signal(self):
signal = qt.mask_to_signal(self.mask)
print(f'Test A: single mask to signal, result: \n{signal}')
self.assertTrue(np.allclose(signal, self.correct_signal))
signal = qt.mask_to_signal(self.multi_mask)
print(f'Test A: single mask to signal, result: \n{signal}')
self.assertTrue(np.allclose(signal, self.correct_multi_signal))
class TestLSStrategy(qt.RollingTiming):
"""用于test测试的简单多空蒙板生成策略。基于RollingTiming滚动择时方法生成
该策略有两个参数,N与Price
N用于计算OHLC价格平均值的N日简单移动平均,判断,当移动平均值大于等于Price时,状态为看多,否则为看空
"""
def __init__(self):
super().__init__(stg_name='test_LS',
stg_text='test long/short strategy',
par_count=2,
par_types='discr, conti',
par_bounds_or_enums=([1, 5], [2, 10]),
data_types='close, open, high, low',
data_freq='d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
n, price = params
h = hist_data.T
avg = (h[0] + h[1] + h[2] + h[3]) / 4
ma = sma(avg, n)
if ma[-1] < price:
return 0
else:
return 1
class TestSelStrategy(qt.SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='high, low, close',
data_freq='d',
sample_freq='10d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = np.nanmean(hist_data, axis=(1, 2))
dif = (hist_data[:, :, 2] - np.roll(hist_data[:, :, 2], 1, 1))
dif_no_nan = np.array([arr[~np.isnan(arr)][-1] for arr in dif])
difper = dif_no_nan / avg
large2 = difper.argsort()[1:]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSelStrategyDiffTime(qt.SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='close, low, open',
data_freq='d',
sample_freq='w',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = hist_data.mean(axis=1).squeeze()
difper = (hist_data[:, :, 0] - np.roll(hist_data[:, :, 0], 1))[:, -1] / avg
large2 = difper.argsort()[0:2]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSigStrategy(qt.SimpleTiming):
"""用于Test测试的简单信号生成策略,基于SimpleTiming策略生成
策略有三个参数,第一个参数为ratio,另外两个参数为price1以及price2
ratio是k线形状比例的阈值,定义为abs((C-O)/(H-L))。当这个比值小于ratio阈值时,判断该K线为十字交叉(其实还有丁字等多种情形,但这里做了
简化处理。
信号生成的规则如下:
1,当某个K线出现十字交叉,且昨收与今收之差大于price1时,买入信号
2,当某个K线出现十字交叉,且昨收与今收之差小于price2时,卖出信号
"""
def __init__(self):
super().__init__(stg_name='test_SIG',
stg_text='test signal creation strategy',
par_count=3,
par_types='conti, conti, conti',
par_bounds_or_enums=([2, 10], [0, 3], [0, 3]),
data_types='close, open, high, low',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
r, price1, price2 = params
h = hist_data.T
ratio = np.abs((h[0] - h[1]) / (h[3] - h[2]))
diff = h[0] - np.roll(h[0], 1)
sig = np.where((ratio < r) & (diff > price1),
1,
np.where((ratio < r) & (diff < price2), -1, 0))
return sig
class TestOperator(unittest.TestCase):
"""全面测试Operator对象的所有功能。包括:
1, Strategy 参数的设置
2, 历史数据的获取与分配提取
3, 策略优化参数的批量设置和优化空间的获取
4, 策略输出值的正确性验证
5, 策略结果的混合结果确认
"""
def setUp(self):
"""prepare data for Operator test"""
print('start testing HistoryPanel object\n')
# build up test data: a 4-type, 3-share, 50-day matrix of prices that contains nan values in some days
# for some share_pool
# for share1:
data_rows = 50
share1_close = [10.04, 10, 10, 9.99, 9.97, 9.99, 10.03, 10.03, 10.06, 10.06, 10.11,
10.09, 10.07, 10.06, 10.09, 10.03, 10.03, 10.06, 10.08, 10, 9.99,
10.03, 10.03, 10.06, 10.03, 9.97, 9.94, 9.83, 9.77, 9.84, 9.91, 9.93,
9.96, 9.91, 9.91, 9.88, 9.91, 9.64, 9.56, 9.57, 9.55, 9.57, 9.61, 9.61,
9.55, 9.57, 9.63, 9.64, 9.65, 9.62]
share1_open = [10.02, 10, 9.98, 9.97, 9.99, 10.01, 10.04, 10.06, 10.06, 10.11,
10.11, 10.07, 10.06, 10.09, 10.03, 10.02, 10.06, 10.08, 9.99, 10,
10.03, 10.02, 10.06, 10.03, 9.97, 9.94, 9.83, 9.78, 9.77, 9.91, 9.92,
9.97, 9.91, 9.9, 9.88, 9.91, 9.63, 9.64, 9.57, 9.55, 9.58, 9.61, 9.62,
9.55, 9.57, 9.61, 9.63, 9.64, 9.61, 9.56]
share1_high = [10.07, 10, 10, 10, 10.03, 10.03, 10.04, 10.09, 10.1, 10.14, 10.11, 10.1,
10.09, 10.09, 10.1, 10.05, 10.07, 10.09, 10.1, 10, 10.04, 10.04, 10.06,
10.09, 10.05, 9.97, 9.96, 9.86, 9.77, 9.92, 9.94, 9.97, 9.97, 9.92, 9.92,
9.92, 9.93, 9.64, 9.58, 9.6, 9.58, 9.62, 9.62, 9.64, 9.59, 9.62, 9.63,
9.7, 9.66, 9.64]
share1_low = [9.99, 10, 9.97, 9.97, 9.97, 9.98, 9.99, 10.03, 10.03, 10.04, 10.11, 10.07,
10.05, 10.03, 10.03, 10.01, 9.99, 10.03, 9.95, 10, 9.95, 10, 10.01, 9.99,
9.96, 9.89, 9.83, 9.77, 9.77, 9.8, 9.9, 9.91, 9.89, 9.89, 9.87, 9.85, 9.6,
9.64, 9.53, 9.55, 9.54, 9.55, 9.58, 9.54, 9.53, 9.53, 9.63, 9.64, 9.59, 9.56]
# for share2:
share2_close = [9.68, 9.87, 9.86, 9.87, 9.79, 9.82, 9.8, 9.66, 9.62, 9.58, 9.69, 9.78, 9.75,
9.96, 9.9, 10.04, 10.06, 10.08, 10.24, 10.24, 10.24, 9.86, 10.13, 10.12,
10.1, 10.25, 10.24, 10.22, 10.75, 10.64, 10.56, 10.6, 10.42, 10.25, 10.24,
10.49, 10.57, 10.63, 10.48, 10.37, 10.96, 11.02, np.nan, np.nan, 10.88, 10.87, 11.01,
11.01, 11.58, 11.8]
share2_open = [9.88, 9.88, 9.89, 9.75, 9.74, 9.8, 9.62, 9.65, 9.58, 9.67, 9.81, 9.8, 10,
9.95, 10.1, 10.06, 10.14, 9.9, 10.2, 10.29, 9.86, 9.48, 10.01, 10.24, 10.26,
10.24, 10.12, 10.65, 10.64, 10.56, 10.42, 10.43, 10.29, 10.3, 10.44, 10.6,
10.67, 10.46, 10.39, 10.9, 11.01, 11.01, np.nan, np.nan, 10.82, 11.02, 10.96,
11.55, 11.74, 11.8]
share2_high = [9.91, 10.04, 9.93, 10.04, 9.84, 9.88, 9.99, 9.7, 9.67, 9.71, 9.85, 9.9, 10,
10.2, 10.11, 10.18, 10.21, 10.26, 10.38, 10.47, 10.42, 10.07, 10.24, 10.27,
10.38, 10.43, 10.39, 10.65, 10.84, 10.65, 10.73, 10.63, 10.51, 10.35, 10.46,
10.63, 10.74, 10.76, 10.54, 11.02, 11.12, 11.17, np.nan, np.nan, 10.92, 11.15,
11.11, 11.55, 11.95, 11.93]
share2_low = [9.63, 9.84, 9.81, 9.74, 9.67, 9.72, 9.57, 9.54, 9.51, 9.47, 9.68, 9.63, 9.75,
9.65, 9.9, 9.93, 10.03, 9.8, 10.14, 10.09, 9.78, 9.21, 9.11, 9.68, 10.05,
10.12, 9.89, 9.89, 10.59, 10.43, 10.34, 10.32, 10.21, 10.2, 10.18, 10.36,
10.51, 10.41, 10.32, 10.37, 10.87, 10.95, np.nan, np.nan, 10.65, 10.71, 10.75,
10.91, 11.31, 11.58]
# for share3:
share3_close = [6.64, 7.26, 7.03, 6.87, np.nan, 6.64, 6.85, 6.7, 6.39, 6.22, 5.92, 5.91, 6.11,
5.91, 6.23, 6.28, 6.28, 6.27, np.nan, 5.56, 5.67, 5.16, 5.69, 6.32, 6.14, 6.25,
5.79, 5.26, 5.05, 5.45, 6.06, 6.21, 5.69, 5.46, 6.02, 6.69, 7.43, 7.72, 8.16,
7.83, 8.7, 8.71, 8.88, 8.54, 8.87, 8.87, 8.18, 7.8, 7.97, 8.25]
share3_open = [7.26, 7, 6.88, 6.91, np.nan, 6.81, 6.63, 6.45, 6.16, 6.24, 5.96, 5.97, 5.96,
6.2, 6.35, 6.11, 6.37, 5.58, np.nan, 5.65, 5.19, 5.42, 6.3, 6.15, 6.05, 5.89,
5.22, 5.2, 5.07, 6.04, 6.12, 5.85, 5.67, 6.02, 6.04, 7.07, 7.64, 7.99, 7.59,
8.73, 8.72, 8.97, 8.58, 8.71, 8.77, 8.4, 7.95, 7.76, 8.25, 7.51]
share3_high = [7.41, 7.31, 7.14, 7, np.nan, 6.82, 6.96, 6.85, 6.5, 6.34, 6.04, 6.02, 6.12, 6.38,
6.43, 6.46, 6.43, 6.27, np.nan, 6.01, 5.67, 5.67, 6.35, 6.32, 6.43, 6.36, 5.79,
5.47, 5.65, 6.04, 6.14, 6.23, 5.83, 6.25, 6.27, 7.12, 7.82, 8.14, 8.27, 8.92,
8.76, 9.15, 8.9, 9.01, 9.16, 9, 8.27, 7.99, 8.33, 8.25]
share3_low = [6.53, 6.87, 6.83, 6.7, np.nan, 6.63, 6.57, 6.41, 6.15, 6.07, 5.89, 5.82, 5.73, 5.81,
6.1, 6.06, 6.16, 5.57, np.nan, 5.51, 5.19, 5.12, 5.69, 6.01, 5.97, 5.86, 5.18, 5.19,
4.96, 5.45, 5.84, 5.85, 5.28, 5.42, 6.02, 6.69, 7.28, 7.64, 7.25, 7.83, 8.41, 8.66,
8.53, 8.54, 8.73, 8.27, 7.95, 7.67, 7.8, 7.51]
# for sel_finance test
shares_eps = np.array([[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, 0.2, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[0.1, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.3, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.15, np.nan, np.nan],
[np.nan, 0.1, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.2, np.nan, np.nan],
[np.nan, 0.5, np.nan],
[0.4, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[0.9, np.nan, np.nan],
[np.nan, np.nan, 0.1]])
self.date_indices = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14', '2016-07-15', '2016-07-18',
'2016-07-19', '2016-07-20', '2016-07-21', '2016-07-22',
'2016-07-25', '2016-07-26', '2016-07-27', '2016-07-28',
'2016-07-29', '2016-08-01', '2016-08-02', '2016-08-03',
'2016-08-04', '2016-08-05', '2016-08-08', '2016-08-09',
'2016-08-10', '2016-08-11', '2016-08-12', '2016-08-15',
'2016-08-16', '2016-08-17', '2016-08-18', '2016-08-19',
'2016-08-22', '2016-08-23', '2016-08-24', '2016-08-25',
'2016-08-26', '2016-08-29', '2016-08-30', '2016-08-31',
'2016-09-01', '2016-09-02', '2016-09-05', '2016-09-06',
'2016-09-07', '2016-09-08']
self.shares = ['000010', '000030', '000039']
self.types = ['close', 'open', 'high', 'low']
self.sel_finance_tyeps = ['eps']
self.test_data_3D = np.zeros((3, data_rows, 4))
self.test_data_2D = np.zeros((data_rows, 3))
self.test_data_2D2 = np.zeros((data_rows, 4))
self.test_data_sel_finance = np.empty((3, data_rows, 1))
# Build up 3D data
self.test_data_3D[0, :, 0] = share1_close
self.test_data_3D[0, :, 1] = share1_open
self.test_data_3D[0, :, 2] = share1_high
self.test_data_3D[0, :, 3] = share1_low
self.test_data_3D[1, :, 0] = share2_close
self.test_data_3D[1, :, 1] = share2_open
self.test_data_3D[1, :, 2] = share2_high
self.test_data_3D[1, :, 3] = share2_low
self.test_data_3D[2, :, 0] = share3_close
self.test_data_3D[2, :, 1] = share3_open
self.test_data_3D[2, :, 2] = share3_high
self.test_data_3D[2, :, 3] = share3_low
self.test_data_sel_finance[:, :, 0] = shares_eps.T
self.hp1 = qt.HistoryPanel(values=self.test_data_3D,
levels=self.shares,
columns=self.types,
rows=self.date_indices)
print(f'in test Operator, history panel is created for timing test')
self.hp1.info()
self.hp2 = qt.HistoryPanel(values=self.test_data_sel_finance,
levels=self.shares,
columns=self.sel_finance_tyeps,
rows=self.date_indices)
print(f'in test_Operator, history panel is created for selection finance test:')
self.hp2.info()
self.op = qt.Operator(selecting_types=['all'], timing_types='dma', ricon_types='urgent')
def test_info(self):
"""Test information output of Operator"""
print(f'test printing information of operator object')
# self.op.info()
def test_operator_ready(self):
"""test the method ready of Operator"""
pass
# print(f'operator is ready? "{self.op.ready}"')
def test_operator_add_strategy(self):
"""test adding strategies to Operator"""
pass
# self.assertIsInstance(self.op, qt.Operator)
# self.assertIsInstance(self.op.timing[0], qt.TimingDMA)
# self.assertIsInstance(self.op.selecting[0], qt.SelectingAll)
# self.assertIsInstance(self.op.ricon[0], qt.RiconUrgent)
# self.assertEqual(self.op.selecting_count, 1)
# self.assertEqual(self.op.strategy_count, 3)
# self.assertEqual(self.op.ricon_count, 1)
# self.assertEqual(self.op.timing_count, 1)
# self.assertEqual(self.op.ls_blender, 'pos-1')
# print(f'test adding strategies into existing op')
# print('test adding strategy by string')
# self.op.add_strategy('macd', 'timing')
# self.assertIsInstance(self.op.timing[0], qt.TimingDMA)
# self.assertIsInstance(self.op.timing[1], qt.TimingMACD)
# self.assertEqual(self.op.selecting_count, 1)
# self.assertEqual(self.op.strategy_count, 4)
# self.assertEqual(self.op.ricon_count, 1)
# self.assertEqual(self.op.timing_count, 2)
# self.assertEqual(self.op.ls_blender, 'pos-1')
# self.op.add_strategy('random', 'selecting')
# self.assertIsInstance(self.op.selecting[0], qt.TimingDMA)
# self.assertIsInstance(self.op.selecting[1], qt.TimingMACD)
# self.assertEqual(self.op.selecting_count, 2)
# self.assertEqual(self.op.strategy_count, 5)
# self.assertEqual(self.op.ricon_count, 1)
# self.assertEqual(self.op.timing_count, 2)
# self.assertEqual(self.op.selecting_blender, '0 or 1')
# self.op.add_strategy('none', 'ricon')
# self.assertIsInstance(self.op.ricon[0], qt.TimingDMA)
# self.assertIsInstance(self.op.ricon[1], qt.TimingMACD)
# self.assertEqual(self.op.selecting_count, 2)
# self.assertEqual(self.op.strategy_count, 6)
# self.assertEqual(self.op.ricon_count, 2)
# self.assertEqual(self.op.timing_count, 2)
# print('test adding strategy by list')
# self.op.add_strategy(['dma', 'macd'], 'timing')
# print('test adding strategy by object')
# test_ls = TestLSStrategy()
# self.op.add_strategy(test_ls, 'timing')
def test_operator_remove_strategy(self):
"""test removing strategies from Operator"""
pass
# self.op.remove_strategy(stg='macd')
def test_property_get(self):
self.assertIsInstance(self.op, qt.Operator)
self.assertIsInstance(self.op.timing[0], qt.TimingDMA)
self.assertIsInstance(self.op.selecting[0], qt.SelectingAll)
self.assertIsInstance(self.op.ricon[0], qt.RiconUrgent)
self.assertEqual(self.op.selecting_count, 1)
self.assertEqual(self.op.strategy_count, 3)
self.assertEqual(self.op.ricon_count, 1)
self.assertEqual(self.op.timing_count, 1)
print(self.op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy: \n{self.op.strategies[0].info()}')
self.assertEqual(len(self.op.strategies), 3)
self.assertIsInstance(self.op.strategies[0], qt.TimingDMA)
self.assertIsInstance(self.op.strategies[1], qt.SelectingAll)
self.assertIsInstance(self.op.strategies[2], qt.RiconUrgent)
self.assertEqual(self.op.strategy_count, 3)
self.assertEqual(self.op.op_data_freq, 'd')
self.assertEqual(self.op.op_data_types, ['close'])
self.assertEqual(self.op.opt_space_par, ([], []))
self.assertEqual(self.op.max_window_length, 270)
self.assertEqual(self.op.ls_blender, 'pos-1')
self.assertEqual(self.op.selecting_blender, '0')
self.assertEqual(self.op.ricon_blender, 'add')
self.assertEqual(self.op.opt_types, [0, 0, 0])
def test_prepare_data(self):
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(timing_types=[test_ls],
selecting_types=[test_sel],
ricon_types=[test_sig])
too_early_cash = qt.CashPlan(dates='2016-01-01', amounts=10000)
early_cash = qt.CashPlan(dates='2016-07-01', amounts=10000)
on_spot_cash = qt.CashPlan(dates='2016-07-08', amounts=10000)
no_trade_cash = qt.CashPlan(dates='2016-07-08, 2016-07-30, 2016-08-11, 2016-09-03',
amounts=[10000, 10000, 10000, 10000])
late_cash = qt.CashPlan(dates='2016-12-31', amounts=10000)
multi_cash = qt.CashPlan(dates='2016-07-08, 2016-08-08', amounts=[10000, 10000])
self.op.set_parameter(stg_id='t-0',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='s-0',
pars=())
self.op.set_parameter(stg_id='r-0',
pars=(0.2, 0.02, -0.02))
self.op.prepare_data(hist_data=self.hp1,
cash_plan=on_spot_cash)
self.assertIsInstance(self.op._selecting_history_data, list)
self.assertIsInstance(self.op._timing_history_data, list)
self.assertIsInstance(self.op._ricon_history_data, list)
self.assertEqual(len(self.op._selecting_history_data), 1)
self.assertEqual(len(self.op._timing_history_data), 1)
self.assertEqual(len(self.op._ricon_history_data), 1)
sel_hist_data = self.op._selecting_history_data[0]
tim_hist_data = self.op._timing_history_data[0]
ric_hist_data = self.op._ricon_history_data[0]
print(f'in test_prepare_data in TestOperator:')
print('selecting history data:\n', sel_hist_data)
print('originally passed data in correct sequence:\n', self.test_data_3D[:, 3:, [2, 3, 0]])
print('difference is \n', sel_hist_data - self.test_data_3D[:, :, [2, 3, 0]])
self.assertTrue(np.allclose(sel_hist_data, self.test_data_3D[:, :, [2, 3, 0]], equal_nan=True))
self.assertTrue(np.allclose(tim_hist_data, self.test_data_3D, equal_nan=True))
self.assertTrue(np.allclose(ric_hist_data, self.test_data_3D[:, 3:, :], equal_nan=True))
# raises Value Error if empty history panel is given
empty_hp = qt.HistoryPanel()
correct_hp = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 4)),
columns=self.types,
levels=self.shares,
rows=self.date_indices)
too_many_shares = qt.HistoryPanel(values=np.random.randint(10, size=(5, 50, 4)))
too_many_types = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 5)))
# raises Error when history panel is empty
self.assertRaises(ValueError,
self.op.prepare_data,
empty_hp,
on_spot_cash)
# raises Error when first investment date is too early
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
early_cash)
# raises Error when last investment date is too late
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
late_cash)
# raises Error when some of the investment dates are on no-trade-days
self.assertRaises(ValueError,
self.op.prepare_data,
correct_hp,
no_trade_cash)
# raises Error when number of shares in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_shares,
on_spot_cash)
# raises Error when too early cash investment date
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
too_early_cash)
# raises Error when number of d_types in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_types,
on_spot_cash)
# test the effect of data type sequence in strategy definition
def test_operator_generate(self):
"""
:return:
"""
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(timing_types=[test_ls],
selecting_types=[test_sel],
ricon_types=[test_sig])
self.assertIsInstance(self.op, qt.Operator, 'Operator Creation Error')
self.op.set_parameter(stg_id='t-0',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='s-0',
pars=())
# 在所有策略的参数都设置好之前调用prepare_data会发生assertion Error
self.assertRaises(AssertionError,
self.op.prepare_data,
hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
self.op.set_parameter(stg_id='r-0',
pars=(0.2, 0.02, -0.02))
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
self.op.info()
op_list = self.op.create_signal(hist_data=self.hp1)
print(f'operation list is created: as following:\n {op_list}')
self.assertTrue(isinstance(op_list, pd.DataFrame))
self.assertEqual(op_list.shape, (26, 3))
# 删除去掉重复信号的code后,信号从原来的23条变为26条,包含三条重复信号,但是删除重复信号可能导致将不应该删除的信号删除,详见
# operator.py的create_signal()函数注释836行
target_op_dates = ['2016/07/08', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/18', '2016/07/20', '2016/07/22', '2016/07/26',
'2016/07/27', '2016/07/28', '2016/08/02', '2016/08/03',
'2016/08/04', '2016/08/05', '2016/08/08', '2016/08/10',
'2016/08/16', '2016/08/18', '2016/08/24', '2016/08/26',
'2016/08/29', '2016/08/30', '2016/08/31', '2016/09/05',
'2016/09/06', '2016/09/08']
target_op_values = np.array([[0.0, 1.0, 0.0],
[0.5, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.5, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, -1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 1.0],
[-1.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0]])
target_op = pd.DataFrame(data=target_op_values, index=target_op_dates, columns=['000010', '000030', '000039'])
target_op = target_op.rename(index=pd.Timestamp)
print(f'target operation list is as following:\n {target_op}')
dates_pairs = [[date1, date2, date1 == date2]
for date1, date2
in zip(target_op.index.strftime('%m-%d'), op_list.index.strftime('%m-%d'))]
signal_pairs = [[list(sig1), list(sig2), all(sig1 == sig2)]
for sig1, sig2
in zip(list(target_op.values), list(op_list.values))]
print(f'dates side by side:\n '
f'{dates_pairs}')
print(f'signals side by side:\n'
f'{signal_pairs}')
print([item[2] for item in dates_pairs])
print([item[2] for item in signal_pairs])
self.assertTrue(np.allclose(target_op.values, op_list.values, equal_nan=True))
self.assertTrue(all([date1 == date2
for date1, date2
in zip(target_op.index.strftime('%m-%d'), op_list.index.strftime('%m-%d'))]))
def test_operator_parameter_setting(self):
"""
:return:
"""
new_op = qt.Operator(selecting_types=['all'], timing_types='dma', ricon_types='urgent')
print(new_op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy in new op: \n{new_op.strategies[0].info()}')
self.op.set_parameter('t-0',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.op.set_parameter(stg_id='s-0',
pars=None,
opt_tag=1,
sample_freq='10d',
window_length=10,
data_types='close, open')
self.op.set_parameter(stg_id='r-0',
pars=None,
opt_tag=0,
sample_freq='d',
window_length=20,
data_types='close, open')
self.assertEqual(self.op.timing[0].pars, (5, 10, 5))
self.assertEqual(self.op.timing[0].par_boes, ((5, 10), (5, 15), (10, 15)))
self.assertEqual(self.op.op_data_freq, 'd')
self.assertEqual(self.op.op_data_types, ['close', 'high', 'open'])
self.assertEqual(self.op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (0, 1)], ['discr', 'discr', 'discr', 'conti']))
self.assertEqual(self.op.max_window_length, 20)
self.assertRaises(AssertionError, self.op.set_parameter, stg_id='t-1', pars=(1, 2))
self.assertRaises(AssertionError, self.op.set_parameter, stg_id='t1', pars=(1, 2))
self.assertRaises(AssertionError, self.op.set_parameter, stg_id=32, pars=(1, 2))
self.op.set_blender('selecting', '0 and 1 or 2')
self.op.set_blender('ls', 'str-1.2')
self.assertEqual(self.op.ls_blender, 'str-1.2')
self.assertEqual(self.op.selecting_blender, '0 and 1 or 2')
self.assertEqual(self.op.selecting_blender_expr, ['or', 'and', '0', '1', '2'])
self.assertEqual(self.op.ricon_blender, 'add')
self.assertRaises(ValueError, self.op.set_blender, 'select', '0and1')
self.assertRaises(TypeError, self.op.set_blender, 35, '0 and 1')
self.assertEqual(self.op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (0, 1)], ['discr', 'discr', 'discr', 'conti']))
self.assertEqual(self.op.opt_types, [1, 1, 0])
def test_exp_to_blender(self):
self.op.set_blender('selecting', '0 and 1 or 2')
self.assertEqual(self.op.selecting_blender_expr, ['or', 'and', '0', '1', '2'])
self.op.set_blender('selecting', '0 and ( 1 or 2 )')
self.assertEqual(self.op.selecting_blender_expr, ['and', '0', 'or', '1', '2'])
self.assertRaises(ValueError, self.op.set_blender, 'selecting', '0 and (1 or 2)')
def test_set_opt_par(self):
self.op.set_parameter('t-0',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.op.set_parameter(stg_id='s-0',
pars=(0.5,),
opt_tag=0,
sample_freq='10d',
window_length=10,
data_types='close, open')
self.op.set_parameter(stg_id='r-0',
pars=(9, -0.23),
opt_tag=1,
sample_freq='d',
window_length=20,
data_types='close, open')
self.assertEqual(self.op.timing[0].pars, (5, 10, 5))
self.assertEqual(self.op.selecting[0].pars, (0.5,))
self.assertEqual(self.op.ricon[0].pars, (9, -0.23))
self.assertEqual(self.op.opt_types, [1, 0, 1])
self.op.set_opt_par((5, 12, 9, 8, -0.1))
self.assertEqual(self.op.timing[0].pars, (5, 12, 9))
self.assertEqual(self.op.selecting[0].pars, (0.5,))
self.assertEqual(self.op.ricon[0].pars, (8, -0.1))
# test set_opt_par when opt_tag is set to be 2 (enumerate type of parameters)
self.assertRaises(ValueError, self.op.set_opt_par, (5, 12, 9, 8))
def test_stg_attribute_get_and_set(self):
self.stg = qt.TimingCrossline()
self.stg_type = 'TIMING'
self.stg_name = "CROSSLINE STRATEGY"
self.stg_text = 'Moving average crossline strategy, determine long/short position according to the cross ' \
'point' \
' of long and short term moving average prices '
self.pars = (35, 120, 10, 'buy')
self.par_boes = [(10, 250), (10, 250), (1, 100), ('buy', 'sell', 'none')]
self.par_count = 4
self.par_types = ['discr', 'discr', 'conti', 'enum']
self.opt_tag = 0
self.data_types = ['close']
self.data_freq = 'd'
self.sample_freq = 'd'
self.window_length = 270
self.assertEqual(self.stg.stg_type, self.stg_type)
self.assertEqual(self.stg.stg_name, self.stg_name)
self.assertEqual(self.stg.stg_text, self.stg_text)
self.assertEqual(self.stg.pars, self.pars)
self.assertEqual(self.stg.par_types, self.par_types)
self.assertEqual(self.stg.par_boes, self.par_boes)
self.assertEqual(self.stg.par_count, self.par_count)
self.assertEqual(self.stg.opt_tag, self.opt_tag)
self.assertEqual(self.stg.data_freq, self.data_freq)
self.assertEqual(self.stg.sample_freq, self.sample_freq)
self.assertEqual(self.stg.data_types, self.data_types)
self.assertEqual(self.stg.window_length, self.window_length)
self.stg.stg_name = 'NEW NAME'
self.stg.stg_text = 'NEW TEXT'
self.assertEqual(self.stg.stg_name, 'NEW NAME')
self.assertEqual(self.stg.stg_text, 'NEW TEXT')
self.stg.pars = (1, 2, 3, 4)
self.assertEqual(self.stg.pars, (1, 2, 3, 4))
self.stg.par_count = 3
self.assertEqual(self.stg.par_count, 3)
self.stg.par_boes = [(1, 10), (1, 10), (1, 10), (1, 10)]
self.assertEqual(self.stg.par_boes, [(1, 10), (1, 10), (1, 10), (1, 10)])
self.stg.par_types = ['conti', 'conti', 'discr', 'enum']
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'enum'])
self.stg.par_types = 'conti, conti, discr, conti'
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'conti'])
self.stg.data_types = 'close, open'
self.assertEqual(self.stg.data_types, ['close', 'open'])
self.stg.data_types = ['close', 'high', 'low']
self.assertEqual(self.stg.data_types, ['close', 'high', 'low'])
self.stg.data_freq = 'w'
self.assertEqual(self.stg.data_freq, 'w')
self.stg.window_length = 300
self.assertEqual(self.stg.window_length, 300)
def test_rolling_timing(self):
stg = TestLSStrategy()
stg_pars = {'000100': (5, 10),
'000200': (5, 10),
'000300': (5, 6)}
stg.set_pars(stg_pars)
history_data = self.hp1.values
output = stg.generate(hist_data=history_data)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
lsmask = np.array([[0., 0., 1.],
[0., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 0.],
[1., 1., 0.],
[1., 1., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.]])
# TODO: Issue to be solved: the np.nan value are converted to 0 in the lsmask,这样做可能会有意想不到的后果
# TODO: 需要解决nan值的问题
self.assertEqual(output.shape, lsmask.shape)
self.assertTrue(np.allclose(output, lsmask, equal_nan=True))
def test_sel_timing(self):
stg = TestSelStrategy()
stg_pars = ()
stg.set_pars(stg_pars)
history_data = self.hp1['high, low, close', :, :]
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
def test_simple_timing(self):
stg = TestSigStrategy()
stg_pars = (0.2, 0.02, -0.02)
stg.set_pars(stg_pars)
history_data = self.hp1['close, open, high, low', :, 3:50]
output = stg.generate(hist_data=history_data, shares=self.shares, dates=self.date_indices)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
sigmatrix = np.array([[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0]])
side_by_side_array = np.array([[i, out_line, sig_line]
for
i, out_line, sig_line
in zip(range(len(output)), output, sigmatrix)])
print(f'output and signal matrix lined up side by side is \n'
f'{side_by_side_array}')
self.assertEqual(sigmatrix.shape, output.shape)
self.assertTrue(np.allclose(output, sigmatrix))
def test_sel_finance(self):
"""Test selecting_finance strategy, test all built-in strategy parameters"""
stg = SelectingFinanceIndicator()
stg_pars = (False, 'even', 'greater', 0, 0, 0.67)
stg.set_pars(stg_pars)
stg.window_length = 5
stg.data_freq = 'd'
stg.sample_freq = '10d'
stg.sort_ascending = False
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg._poq = 0.67
history_data = self.hp2.values
print(f'Start to test financial selection parameter {stg_pars}')
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get mininum factor
stg_pars = (True, 'even', 'less', 1, 1, 0.67)
stg.sort_ascending = True
stg.condition = 'less'
stg.lbound = 1
stg.ubound = 1
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'linear', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'linear'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'proportion', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'proportion'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
# test single factor, get max factor in linear weight, threshold 0.2
stg_pars = (False, 'even', 'greater', 0.2, 0.2, 0.67)
stg.sort_ascending = False
stg.weighting = 'even'
stg.condition = 'greater'
stg.lbound = 0.2
stg.ubound = 0.2
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
class TestLog(unittest.TestCase):
def test_init(self):
pass
class TestConfig(unittest.TestCase):
"""测试Config对象以及QT_CONFIG变量的设置和获取值"""
def test_init(self):
pass
def test_invest(self):
pass
def test_pars_string_to_type(self):
_parse_string_kwargs('000300', 'asset_pool', _valid_qt_kwargs())
class TestHistoryPanel(unittest.TestCase):
def setUp(self):
print('start testing HistoryPanel object\n')
self.data = np.random.randint(10, size=(5, 10, 4))
self.index = pd.date_range(start='20200101', freq='d', periods=10)
self.index2 = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14']
self.index3 = '2016-07-01, 2016-07-04, 2016-07-05, 2016-07-06, 2016-07-07, ' \
'2016-07-08, 2016-07-11, 2016-07-12, 2016-07-13, 2016-07-14'
self.shares = '000100,000101,000102,000103,000104'
self.htypes = 'close,open,high,low'
self.data2 = np.random.randint(10, size=(10, 5))
self.data3 = np.random.randint(10, size=(10, 4))
self.data4 = np.random.randint(10, size=(10))
self.hp = qt.HistoryPanel(values=self.data, levels=self.shares, columns=self.htypes, rows=self.index)
self.hp2 = qt.HistoryPanel(values=self.data2, levels=self.shares, columns='close', rows=self.index)
self.hp3 = qt.HistoryPanel(values=self.data3, levels='000100', columns=self.htypes, rows=self.index2)
self.hp4 = qt.HistoryPanel(values=self.data4, levels='000100', columns='close', rows=self.index3)
self.hp5 = qt.HistoryPanel(values=self.data)
self.hp6 = qt.HistoryPanel(values=self.data, levels=self.shares, rows=self.index3)
def test_properties(self):
""" test all properties of HistoryPanel
"""
self.assertFalse(self.hp.is_empty)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.shape, (5, 10, 4))
self.assertSequenceEqual(self.hp.htypes, ['close', 'open', 'high', 'low'])
self.assertSequenceEqual(self.hp.shares, ['000100', '000101', '000102', '000103', '000104'])
self.assertSequenceEqual(list(self.hp.hdates), list(self.index))
self.assertDictEqual(self.hp.columns, {'close': 0, 'open': 1, 'high': 2, 'low': 3})
self.assertDictEqual(self.hp.levels, {'000100': 0, '000101': 1, '000102': 2, '000103': 3, '000104': 4})
row_dict = {Timestamp('2020-01-01 00:00:00', freq='D'): 0,
Timestamp('2020-01-02 00:00:00', freq='D'): 1,
Timestamp('2020-01-03 00:00:00', freq='D'): 2,
Timestamp('2020-01-04 00:00:00', freq='D'): 3,
Timestamp('2020-01-05 00:00:00', freq='D'): 4,
Timestamp('2020-01-06 00:00:00', freq='D'): 5,
Timestamp('2020-01-07 00:00:00', freq='D'): 6,
Timestamp('2020-01-08 00:00:00', freq='D'): 7,
Timestamp('2020-01-09 00:00:00', freq='D'): 8,
Timestamp('2020-01-10 00:00:00', freq='D'): 9}
self.assertDictEqual(self.hp.rows, row_dict)
def test_len(self):
""" test the function len(HistoryPanel)
:return:
"""
self.assertEqual(len(self.hp), 10)
def test_empty_history_panel(self):
"""测试空HP或者特殊HP如维度标签为纯数字的HP"""
test_hp = qt.HistoryPanel(self.data)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
self.assertEqual(test_hp.level_count, 5)
self.assertEqual(test_hp.row_count, 10)
self.assertEqual(test_hp.column_count, 4)
self.assertEqual(test_hp.shares, list(range(5)))
self.assertEqual(test_hp.hdates, list(pd.date_range(start='20200730', periods=10, freq='d')))
self.assertEqual(test_hp.htypes, list(range(4)))
self.assertTrue(np.allclose(test_hp.values, self.data))
print(f'shares: {test_hp.shares}\nhtypes: {test_hp.htypes}')
print(test_hp)
empty_hp = qt.HistoryPanel()
self.assertTrue(empty_hp.is_empty)
self.assertIsInstance(empty_hp, qt.HistoryPanel)
self.assertEqual(empty_hp.shape[0], 0)
self.assertEqual(empty_hp.shape[1], 0)
self.assertEqual(empty_hp.shape[2], 0)
self.assertEqual(empty_hp.level_count, 0)
self.assertEqual(empty_hp.row_count, 0)
self.assertEqual(empty_hp.column_count, 0)
def test_create_history_panel(self):
""" test the creation of a HistoryPanel object by passing all data explicitly
"""
self.assertIsInstance(self.hp, qt.HistoryPanel)
self.assertEqual(self.hp.shape[0], 5)
self.assertEqual(self.hp.shape[1], 10)
self.assertEqual(self.hp.shape[2], 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(list(self.hp.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp2, qt.HistoryPanel)
self.assertEqual(self.hp2.shape[0], 5)
self.assertEqual(self.hp2.shape[1], 10)
self.assertEqual(self.hp2.shape[2], 1)
self.assertEqual(self.hp2.level_count, 5)
self.assertEqual(self.hp2.row_count, 10)
self.assertEqual(self.hp2.column_count, 1)
self.assertEqual(list(self.hp2.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp2.columns.keys()), ['close'])
self.assertEqual(list(self.hp2.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp3, qt.HistoryPanel)
self.assertEqual(self.hp3.shape[0], 1)
self.assertEqual(self.hp3.shape[1], 10)
self.assertEqual(self.hp3.shape[2], 4)
self.assertEqual(self.hp3.level_count, 1)
self.assertEqual(self.hp3.row_count, 10)
self.assertEqual(self.hp3.column_count, 4)
self.assertEqual(list(self.hp3.levels.keys()), ['000100'])
self.assertEqual(list(self.hp3.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp3.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.assertIsInstance(self.hp4, qt.HistoryPanel)
self.assertEqual(self.hp4.shape[0], 1)
self.assertEqual(self.hp4.shape[1], 10)
self.assertEqual(self.hp4.shape[2], 1)
self.assertEqual(self.hp4.level_count, 1)
self.assertEqual(self.hp4.row_count, 10)
self.assertEqual(self.hp4.column_count, 1)
self.assertEqual(list(self.hp4.levels.keys()), ['000100'])
self.assertEqual(list(self.hp4.columns.keys()), ['close'])
self.assertEqual(list(self.hp4.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.hp5.info()
self.assertIsInstance(self.hp5, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp5.values, self.data))
self.assertEqual(self.hp5.shape[0], 5)
self.assertEqual(self.hp5.shape[1], 10)
self.assertEqual(self.hp5.shape[2], 4)
self.assertEqual(self.hp5.level_count, 5)
self.assertEqual(self.hp5.row_count, 10)
self.assertEqual(self.hp5.column_count, 4)
self.assertEqual(list(self.hp5.levels.keys()), [0, 1, 2, 3, 4])
self.assertEqual(list(self.hp5.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp5.rows.keys())[0], pd.Timestamp('2020-07-30'))
self.hp6.info()
self.assertIsInstance(self.hp6, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp6.values, self.data))
self.assertEqual(self.hp6.shape[0], 5)
self.assertEqual(self.hp6.shape[1], 10)
self.assertEqual(self.hp6.shape[2], 4)
self.assertEqual(self.hp6.level_count, 5)
self.assertEqual(self.hp6.row_count, 10)
self.assertEqual(self.hp6.column_count, 4)
self.assertEqual(list(self.hp6.levels.keys()), ['000100', '000101', '000102', '000103', '000104'])
self.assertEqual(list(self.hp6.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp6.rows.keys())[0], pd.Timestamp('2016-07-01'))
# Error testing during HistoryPanel creating
# shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data,
levels=self.shares, columns='close', rows=self.index)
# valus is not np.ndarray
self.assertRaises(AssertionError,
qt.HistoryPanel,
list(self.data))
# dimension/shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data2,
levels='000100', columns=self.htypes, rows=self.index)
# value dimension over 3
self.assertRaises(AssertionError,
qt.HistoryPanel,
np.random.randint(10, size=(5, 10, 4, 2)))
# lebel value not valid
self.assertRaises(ValueError,
qt.HistoryPanel,
self.data2,
levels=self.shares, columns='close',
rows='a,b,c,d,e,f,g,h,i,j')
def test_history_panel_slicing(self):
"""测试HistoryPanel的各种切片方法
包括通过标签名称切片,通过数字切片,通过逗号分隔的标签名称切片,通过冒号分隔的标签名称切片等切片方式"""
self.assertTrue(np.allclose(self.hp['close'], self.data[:, :, 0:1]))
self.assertTrue(np.allclose(self.hp['close,open'], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp[['close', 'open']], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp['close:high'], self.data[:, :, 0:3]))
self.assertTrue(np.allclose(self.hp['close,high'], self.data[:, :, [0, 2]]))
self.assertTrue(np.allclose(self.hp[:, '000100'], self.data[0:1, :, ]))
self.assertTrue(np.allclose(self.hp[:, '000100,000101'], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, ['000100', '000101']], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, '000100:000102'], self.data[0:3, :]))
self.assertTrue(np.allclose(self.hp[:, '000100,000102'], self.data[[0, 2], :]))
self.assertTrue(np.allclose(self.hp['close,open', '000100,000102'], self.data[[0, 2], :, 0:2]))
print('start testing HistoryPanel')
data = np.random.randint(10, size=(10, 5))
# index = pd.date_range(start='20200101', freq='d', periods=10)
shares = '000100,000101,000102,000103,000104'
dtypes = 'close'
df = pd.DataFrame(data)
print('=========================\nTesting HistoryPanel creation from DataFrame')
hp = qt.dataframe_to_hp(df=df, shares=shares, htypes=dtypes)
hp.info()
hp = qt.dataframe_to_hp(df=df, shares='000100', htypes='close, open, high, low, middle', column_type='htypes')
hp.info()
print('=========================\nTesting HistoryPanel creation from initialization')
data = np.random.randint(10, size=(5, 10, 4)).astype('float')
index = pd.date_range(start='20200101', freq='d', periods=10)
dtypes = 'close, open, high,low'
data[0, [5, 6, 9], [0, 1, 3]] = np.nan
data[1:4, [4, 7, 6, 2], [1, 1, 3, 0]] = np.nan
data[4:5, [2, 9, 1, 2], [0, 3, 2, 1]] = np.nan
hp = qt.HistoryPanel(data, levels=shares, columns=dtypes, rows=index)
hp.info()
print('==========================\n输出close类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close', :, :], data[:, :, 0:1], equal_nan=True))
print(f'==========================\n输出close和open类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1], :, :], data[:, :, 0:2], equal_nan=True))
print(f'==========================\n输出第一只股票的所有类型历史数据\n')
self.assertTrue(np.allclose(hp[:, [0], :], data[0:1, :, :], equal_nan=True))
print('==========================\n输出第0、1、2个htype对应的所有股票全部历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1, 2]], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[['close', 'high']], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出0、1两个htype的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1]], data[:, :, 0:2], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close,high'], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出close起到high止的三个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close:high'], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出0、1、3三个股票的全部历史数据\n')
self.assertTrue(np.allclose(hp[:, [0, 1, 3]], data[[0, 1, 3], :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, ['000100', '000102']], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出0、1、2三个股票的历史数据\n', hp[:, 0: 3])
self.assertTrue(np.allclose(hp[:, 0: 3], data[0:3, :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100, 000102'], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出所有股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, :, 0:8], data[:, 0:8, :], equal_nan=True))
print('==========================\n输出000100股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100', 0:8], data[0, 0:8, :], equal_nan=True))
print('==========================\nstart testing multy axis slicing of HistoryPanel object')
print('==========================\n输出000100、000120两只股票的close、open两组历史数据\n',
hp['close,open', ['000100', '000102']])
print('==========================\n输出000100、000120两只股票的close到open三组历史数据\n',
hp['close,open', '000100, 000102'])
print(f'historyPanel: hp:\n{hp}')
print(f'data is:\n{data}')
hp.htypes = 'open,high,low,close'
hp.info()
hp.shares = ['000300', '600227', '600222', '000123', '000129']
hp.info()
def test_relabel(self):
new_shares_list = ['000001', '000002', '000003', '000004', '000005']
new_shares_str = '000001, 000002, 000003, 000004, 000005'
new_htypes_list = ['close', 'volume', 'value', 'exchange']
new_htypes_str = 'close, volume, value, exchange'
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_list)
print(temp_hp.info())
print(temp_hp.htypes)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.htypes, temp_hp.htypes)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.shares, new_shares_list)
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_str)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.htypes, temp_hp.htypes)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.shares, new_shares_list)
temp_hp = self.hp.copy()
temp_hp.re_label(htypes=new_htypes_list)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.shares, temp_hp.shares)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.htypes, new_htypes_list)
temp_hp = self.hp.copy()
temp_hp.re_label(htypes=new_htypes_str)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.shares, temp_hp.shares)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.htypes, new_htypes_list)
print(f'test errors raising')
temp_hp = self.hp.copy()
self.assertRaises(AssertionError, temp_hp.re_label, htypes=new_shares_str)
self.assertRaises(TypeError, temp_hp.re_label, htypes=123)
self.assertRaises(AssertionError, temp_hp.re_label, htypes='wrong input!')
def test_csv_to_hp(self):
pass
def test_hdf_to_hp(self):
pass
def test_hp_join(self):
# TODO: 这里需要加强,需要用具体的例子确认hp_join的结果正确
# TODO: 尤其是不同的shares、htypes、hdates,以及它们在顺
# TODO: 序不同的情况下是否能正确地组合
print(f'join two simple HistoryPanels with same shares')
temp_hp = self.hp.join(self.hp2, same_shares=True)
self.assertIsInstance(temp_hp, qt.HistoryPanel)
def test_df_to_hp(self):
print(f'test converting DataFrame to HistoryPanel')
data = np.random.randint(10, size=(10, 5))
df1 = pd.DataFrame(data)
df2 = pd.DataFrame(data, columns=qt.str_to_list(self.shares))
df3 = pd.DataFrame(data[:, 0:4])
df4 = pd.DataFrame(data[:, 0:4], columns=qt.str_to_list(self.htypes))
hp = qt.dataframe_to_hp(df1, htypes='close')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, [0, 1, 2, 3, 4])
self.assertEqual(hp.htypes, ['close'])
self.assertEqual(hp.hdates, [pd.Timestamp('1970-01-01 00:00:00'),
pd.Timestamp('1970-01-01 00:00:00.000000001'),
pd.Timestamp('1970-01-01 00:00:00.000000002'),
pd.Timestamp('1970-01-01 00:00:00.000000003'),
pd.Timestamp('1970-01-01 00:00:00.000000004'),
pd.Timestamp('1970-01-01 00:00:00.000000005'),
pd.Timestamp('1970-01-01 00:00:00.000000006'),
pd.Timestamp('1970-01-01 00:00:00.000000007'),
pd.Timestamp('1970-01-01 00:00:00.000000008'),
pd.Timestamp('1970-01-01 00:00:00.000000009')])
hp = qt.dataframe_to_hp(df2, shares=self.shares, htypes='close')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, qt.str_to_list(self.shares))
self.assertEqual(hp.htypes, ['close'])
hp = qt.dataframe_to_hp(df3, shares='000100', column_type='htypes')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, ['000100'])
self.assertEqual(hp.htypes, [0, 1, 2, 3])
hp = qt.dataframe_to_hp(df4, shares='000100', htypes=self.htypes, column_type='htypes')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, ['000100'])
self.assertEqual(hp.htypes, qt.str_to_list(self.htypes))
hp.info()
self.assertRaises(KeyError, qt.dataframe_to_hp, df1)
def test_to_dataframe(self):
""" 测试HistoryPanel对象的to_dataframe方法
"""
print(f'START TEST == test_to_dataframe')
print(f'test converting test hp to dataframe with share == "000102":')
df_test = self.hp.to_dataframe(share='000102')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.htypes), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp[:, '000102'], values))
print(f'test DataFrame conversion with share == "000100"')
df_test = self.hp.to_dataframe(share='000100')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.htypes), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp[:, '000100'], values))
print(f'test DataFrame conversion error: type incorrect')
self.assertRaises(AssertionError, self.hp.to_dataframe, share=3.0)
print(f'test DataFrame error raising with share not found error')
self.assertRaises(KeyError, self.hp.to_dataframe, share='000300')
print(f'test DataFrame conversion with htype == "close"')
df_test = self.hp.to_dataframe(htype='close')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp['close'].T, values))
print(f'test DataFrame conversion with htype == "high"')
df_test = self.hp.to_dataframe(htype='high')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp['high'].T, values))
print(f'test DataFrame conversion with htype == "high" and dropna')
v = self.hp.values.astype('float')
v[:, 3, :] = np.nan
v[:, 4, :] = np.inf
test_hp = qt.HistoryPanel(v, levels=self.shares, columns=self.htypes, rows=self.index)
df_test = test_hp.to_dataframe(htype='high', dropna=True)
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates[:3]) + list(self.hp.hdates[4:]), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
target_values = test_hp['high'].T
target_values = target_values[np.where(~np.isnan(target_values))].reshape(9, 5)
self.assertTrue(np.allclose(target_values, values))
print(f'test DataFrame conversion with htype == "high", dropna and treat infs as na')
v = self.hp.values.astype('float')
v[:, 3, :] = np.nan
v[:, 4, :] = np.inf
test_hp = qt.HistoryPanel(v, levels=self.shares, columns=self.htypes, rows=self.index)
df_test = test_hp.to_dataframe(htype='high', dropna=True, inf_as_na=True)
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates[:3]) + list(self.hp.hdates[5:]), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
target_values = test_hp['high'].T
target_values = target_values[np.where(~np.isnan(target_values) & ~np.isinf(target_values))].reshape(8, 5)
self.assertTrue(np.allclose(target_values, values))
print(f'test DataFrame conversion error: type incorrect')
self.assertRaises(AssertionError, self.hp.to_dataframe, htype=pd.DataFrame())
print(f'test DataFrame error raising with share not found error')
self.assertRaises(KeyError, self.hp.to_dataframe, htype='non_type')
print(f'Raises ValueError when both or none parameter is given')
self.assertRaises(KeyError, self.hp.to_dataframe)
self.assertRaises(KeyError, self.hp.to_dataframe, share='000100', htype='close')
def test_to_df_dict(self):
"""测试HistoryPanel公有方法to_df_dict"""
print('test convert history panel slice by share')
df_dict = self.hp.to_df_dict('share')
self.assertEqual(self.hp.shares, list(df_dict.keys()))
df_dict = self.hp.to_df_dict()
self.assertEqual(self.hp.shares, list(df_dict.keys()))
print('test convert historypanel slice by htype ')
df_dict = self.hp.to_df_dict('htype')
self.assertEqual(self.hp.htypes, list(df_dict.keys()))
print('test raise assertion error')
self.assertRaises(AssertionError, self.hp.to_df_dict, by='random text')
self.assertRaises(AssertionError, self.hp.to_df_dict, by=3)
print('test empty hp')
df_dict = qt.HistoryPanel().to_df_dict('share')
self.assertEqual(df_dict, {})
def test_stack_dataframes(self):
print('test stack dataframes in a list')
df1 = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [2, 3, 4, 5], 'c': [3, 4, 5, 6]})
df1.index = ['20200101', '20200102', '20200103', '20200104']
df2 = pd.DataFrame({'b': [4, 3, 2, 1], 'd': [1, 1, 1, 1], 'c': [6, 5, 4, 3]})
df2.index = ['20200101', '20200102', '20200104', '20200105']
df3 = pd.DataFrame({'a': [6, 6, 6, 6], 'd': [4, 4, 4, 4], 'b': [2, 4, 6, 8]})
df3.index = ['20200101', '20200102', '20200103', '20200106']
values1 = np.array([[[1., 2., 3., np.nan],
[2., 3., 4., np.nan],
[3., 4., 5., np.nan],
[4., 5., 6., np.nan],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan]],
[[np.nan, 4., 6., 1.],
[np.nan, 3., 5., 1.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 2., 4., 1.],
[np.nan, 1., 3., 1.],
[np.nan, np.nan, np.nan, np.nan]],
[[6., 2., np.nan, 4.],
[6., 4., np.nan, 4.],
[6., 6., np.nan, 4.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[6., 8., np.nan, 4.]]])
values2 = np.array([[[1., np.nan, 6.],
[2., np.nan, 6.],
[3., np.nan, 6.],
[4., np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 6.]],
[[2., 4., 2.],
[3., 3., 4.],
[4., np.nan, 6.],
[5., 2., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 8.]],
[[3., 6., np.nan],
[4., 5., np.nan],
[5., np.nan, np.nan],
[6., 4., np.nan],
[np.nan, 3., np.nan],
[np.nan, np.nan, np.nan]],
[[np.nan, 1., 4.],
[np.nan, 1., 4.],
[np.nan, np.nan, 4.],
[np.nan, 1., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 4.]]])
print(df1.rename(index=pd.to_datetime))
print(df2.rename(index=pd.to_datetime))
print(df3.rename(index=pd.to_datetime))
hp1 = stack_dataframes([df1, df2, df3], stack_along='shares',
shares=['000100', '000200', '000300'])
hp2 = stack_dataframes([df1, df2, df3], stack_along='shares',
shares='000100, 000300, 000200')
print('hp1 is:\n', hp1)
print('hp2 is:\n', hp2)
self.assertEqual(hp1.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp1.shares, ['000100', '000200', '000300'])
self.assertTrue(np.allclose(hp1.values, values1, equal_nan=True))
self.assertEqual(hp2.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp2.shares, ['000100', '000300', '000200'])
self.assertTrue(np.allclose(hp2.values, values1, equal_nan=True))
hp3 = stack_dataframes([df1, df2, df3], stack_along='htypes',
htypes=['close', 'high', 'low'])
hp4 = stack_dataframes([df1, df2, df3], stack_along='htypes',
htypes='open, close, high')
print('hp3 is:\n', hp3.values)
print('hp4 is:\n', hp4.values)
self.assertEqual(hp3.htypes, ['close', 'high', 'low'])
self.assertEqual(hp3.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp3.values, values2, equal_nan=True))
self.assertEqual(hp4.htypes, ['open', 'close', 'high'])
self.assertEqual(hp4.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp4.values, values2, equal_nan=True))
print('test stack dataframes in a dict')
df1 = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [2, 3, 4, 5], 'c': [3, 4, 5, 6]})
df1.index = ['20200101', '20200102', '20200103', '20200104']
df2 = pd.DataFrame({'b': [4, 3, 2, 1], 'd': [1, 1, 1, 1], 'c': [6, 5, 4, 3]})
df2.index = ['20200101', '20200102', '20200104', '20200105']
df3 = pd.DataFrame({'a': [6, 6, 6, 6], 'd': [4, 4, 4, 4], 'b': [2, 4, 6, 8]})
df3.index = ['20200101', '20200102', '20200103', '20200106']
values1 = np.array([[[1., 2., 3., np.nan],
[2., 3., 4., np.nan],
[3., 4., 5., np.nan],
[4., 5., 6., np.nan],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan]],
[[np.nan, 4., 6., 1.],
[np.nan, 3., 5., 1.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 2., 4., 1.],
[np.nan, 1., 3., 1.],
[np.nan, np.nan, np.nan, np.nan]],
[[6., 2., np.nan, 4.],
[6., 4., np.nan, 4.],
[6., 6., np.nan, 4.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[6., 8., np.nan, 4.]]])
values2 = np.array([[[1., np.nan, 6.],
[2., np.nan, 6.],
[3., np.nan, 6.],
[4., np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 6.]],
[[2., 4., 2.],
[3., 3., 4.],
[4., np.nan, 6.],
[5., 2., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 8.]],
[[3., 6., np.nan],
[4., 5., np.nan],
[5., np.nan, np.nan],
[6., 4., np.nan],
[np.nan, 3., np.nan],
[np.nan, np.nan, np.nan]],
[[np.nan, 1., 4.],
[np.nan, 1., 4.],
[np.nan, np.nan, 4.],
[np.nan, 1., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 4.]]])
print(df1.rename(index=pd.to_datetime))
print(df2.rename(index=pd.to_datetime))
print(df3.rename(index=pd.to_datetime))
hp1 = stack_dataframes(dfs={'000001.SZ': df1, '000002.SZ': df2, '000003.SZ': df3},
stack_along='shares')
hp2 = stack_dataframes(dfs={'000001.SZ': df1, '000002.SZ': df2, '000003.SZ': df3},
stack_along='shares',
shares='000100, 000300, 000200')
print('hp1 is:\n', hp1)
print('hp2 is:\n', hp2)
self.assertEqual(hp1.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp1.shares, ['000001.SZ', '000002.SZ', '000003.SZ'])
self.assertTrue(np.allclose(hp1.values, values1, equal_nan=True))
self.assertEqual(hp2.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp2.shares, ['000100', '000300', '000200'])
self.assertTrue(np.allclose(hp2.values, values1, equal_nan=True))
hp3 = stack_dataframes(dfs={'close': df1, 'high': df2, 'low': df3},
stack_along='htypes')
hp4 = stack_dataframes(dfs={'close': df1, 'low': df2, 'high': df3},
stack_along='htypes',
htypes='open, close, high')
print('hp3 is:\n', hp3.values)
print('hp4 is:\n', hp4.values)
self.assertEqual(hp3.htypes, ['close', 'high', 'low'])
self.assertEqual(hp3.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp3.values, values2, equal_nan=True))
self.assertEqual(hp4.htypes, ['open', 'close', 'high'])
self.assertEqual(hp4.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp4.values, values2, equal_nan=True))
def test_to_csv(self):
pass
def test_to_hdf(self):
pass
def test_fill_na(self):
print(self.hp)
new_values = self.hp.values.astype(float)
new_values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]] = np.nan
print(new_values)
temp_hp = qt.HistoryPanel(values=new_values, levels=self.hp.levels, rows=self.hp.rows, columns=self.hp.columns)
self.assertTrue(np.allclose(temp_hp.values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]], np.nan, equal_nan=True))
temp_hp.fillna(2.3)
new_values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]] = 2.3
self.assertTrue(np.allclose(temp_hp.values,
new_values, equal_nan=True))
def test_get_history_panel(self):
# TODO: implement this test case
# test get only one line of data
pass
def test_get_price_type_raw_data(self):
shares = '000039.SZ, 600748.SH, 000040.SZ'
start = '20200101'
end = '20200131'
htypes = 'open, high, low, close'
target_price_000039 = [[9.45, 9.49, 9.12, 9.17],
[9.46, 9.56, 9.4, 9.5],
[9.7, 9.76, 9.5, 9.51],
[9.7, 9.75, 9.7, 9.72],
[9.73, 9.77, 9.7, 9.73],
[9.83, 9.85, 9.71, 9.72],
[9.85, 9.85, 9.75, 9.79],
[9.96, 9.96, 9.83, 9.86],
[9.87, 9.94, 9.77, 9.93],
[9.82, 9.9, 9.76, 9.87],
[9.8, 9.85, 9.77, 9.82],
[9.84, 9.86, 9.71, 9.72],
[9.83, 9.93, 9.81, 9.86],
[9.7, 9.87, 9.7, 9.82],
[9.83, 9.86, 9.69, 9.79],
[9.8, 9.94, 9.8, 9.86]]
target_price_600748 = [[5.68, 5.68, 5.32, 5.37],
[5.62, 5.68, 5.46, 5.65],
[5.72, 5.72, 5.61, 5.62],
[5.76, 5.77, 5.6, 5.73],
[5.78, 5.84, 5.73, 5.75],
[5.89, 5.91, 5.76, 5.77],
[6.03, 6.04, 5.87, 5.89],
[5.94, 6.07, 5.94, 6.02],
[5.96, 5.98, 5.88, 5.97],
[6.04, 6.06, 5.95, 5.96],
[5.98, 6.04, 5.96, 6.03],
[6.1, 6.11, 5.89, 5.94],
[6.02, 6.12, 6., 6.1],
[5.96, 6.05, 5.88, 6.01],
[6.03, 6.03, 5.95, 5.99],
[6.02, 6.12, 5.99, 5.99]]
target_price_000040 = [[3.63, 3.83, 3.63, 3.65],
[3.99, 4.07, 3.97, 4.03],
[4.1, 4.11, 3.93, 3.95],
[4.12, 4.13, 4.06, 4.11],
[4.13, 4.19, 4.07, 4.13],
[4.27, 4.28, 4.11, 4.12],
[4.37, 4.38, 4.25, 4.29],
[4.34, 4.5, 4.32, 4.41],
[4.28, 4.35, 4.2, 4.34],
[4.41, 4.43, 4.29, 4.31],
[4.42, 4.45, 4.36, 4.41],
[4.51, 4.56, 4.33, 4.35],
[4.35, 4.55, 4.31, 4.55],
[4.3, 4.41, 4.22, 4.36],
[4.27, 4.44, 4.23, 4.34],
[4.23, 4.27, 4.18, 4.25]]
print(f'test get price type raw data with single thread')
df_list = get_price_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, freq='d')
self.assertIsInstance(df_list, dict)
self.assertEqual(len(df_list), 3)
self.assertTrue(np.allclose(df_list['000039.SZ'].values, np.array(target_price_000039)))
self.assertTrue(np.allclose(df_list['600748.SH'].values, np.array(target_price_600748)))
self.assertTrue(np.allclose(df_list['000040.SZ'].values, np.array(target_price_000040)))
print(f'in get financial report type raw data, got DataFrames: \n"000039.SZ":\n'
f'{df_list["000039.SZ"]}\n"600748.SH":\n'
f'{df_list["600748.SH"]}\n"000040.SZ":\n{df_list["000040.SZ"]}')
print(f'test get price type raw data with with multi threads')
df_list = get_price_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, freq='d', parallel=10)
self.assertIsInstance(df_list, dict)
self.assertEqual(len(df_list), 3)
self.assertTrue(np.allclose(df_list['000039.SZ'].values, np.array(target_price_000039)))
self.assertTrue(np.allclose(df_list['600748.SH'].values, np.array(target_price_600748)))
self.assertTrue(np.allclose(df_list['000040.SZ'].values, np.array(target_price_000040)))
print(f'in get financial report type raw data, got DataFrames: \n"000039.SZ":\n'
f'{df_list["000039.SZ"]}\n"600748.SH":\n'
f'{df_list["600748.SH"]}\n"000040.SZ":\n{df_list["000040.SZ"]}')
def test_get_financial_report_type_raw_data(self):
shares = '000039.SZ, 600748.SH, 000040.SZ'
start = '20160101'
end = '20201231'
htypes = 'eps,basic_eps,diluted_eps,total_revenue,revenue,total_share,' \
'cap_rese,undistr_porfit,surplus_rese,net_profit'
target_eps_000039 = [[1.41],
[0.1398],
[-0.0841],
[-0.1929],
[0.37],
[0.1357],
[0.1618],
[0.1191],
[1.11],
[0.759],
[0.3061],
[0.1409],
[0.81],
[0.4187],
[0.2554],
[0.1624],
[0.14],
[-0.0898],
[-0.1444],
[0.1291]]
target_eps_600748 = [[0.41],
[0.22],
[0.22],
[0.09],
[0.42],
[0.23],
[0.22],
[0.09],
[0.36],
[0.16],
[0.15],
[0.07],
[0.47],
[0.19],
[0.12],
[0.07],
[0.32],
[0.22],
[0.14],
[0.07]]
target_eps_000040 = [[-0.6866],
[-0.134],
[-0.189],
[-0.036],
[-0.6435],
[0.05],
[0.062],
[0.0125],
[0.8282],
[1.05],
[0.985],
[0.811],
[0.41],
[0.242],
[0.113],
[0.027],
[0.19],
[0.17],
[0.17],
[0.064]]
target_basic_eps_000039 = [[1.3980000e-01, 1.3980000e-01, 6.3591954e+10, 6.3591954e+10],
[-8.4100000e-02, -8.4100000e-02, 3.9431807e+10, 3.9431807e+10],
[-1.9290000e-01, -1.9290000e-01, 1.5852177e+10, 1.5852177e+10],
[3.7000000e-01, 3.7000000e-01, 8.5815341e+10, 8.5815341e+10],
[1.3570000e-01, 1.3430000e-01, 6.1660271e+10, 6.1660271e+10],
[1.6180000e-01, 1.6040000e-01, 4.2717729e+10, 4.2717729e+10],
[1.1910000e-01, 1.1900000e-01, 1.9099547e+10, 1.9099547e+10],
[1.1100000e+00, 1.1000000e+00, 9.3497622e+10, 9.3497622e+10],
[7.5900000e-01, 7.5610000e-01, 6.6906147e+10, 6.6906147e+10],
[3.0610000e-01, 3.0380000e-01, 4.3560398e+10, 4.3560398e+10],
[1.4090000e-01, 1.4050000e-01, 1.9253639e+10, 1.9253639e+10],
[8.1000000e-01, 8.1000000e-01, 7.6299930e+10, 7.6299930e+10],
[4.1870000e-01, 4.1710000e-01, 5.3962706e+10, 5.3962706e+10],
[2.5540000e-01, 2.5440000e-01, 3.3387152e+10, 3.3387152e+10],
[1.6240000e-01, 1.6200000e-01, 1.4675987e+10, 1.4675987e+10],
[1.4000000e-01, 1.4000000e-01, 5.1111652e+10, 5.1111652e+10],
[-8.9800000e-02, -8.9800000e-02, 3.4982614e+10, 3.4982614e+10],
[-1.4440000e-01, -1.4440000e-01, 2.3542843e+10, 2.3542843e+10],
[1.2910000e-01, 1.2860000e-01, 1.0412416e+10, 1.0412416e+10],
[7.2000000e-01, 7.1000000e-01, 5.8685804e+10, 5.8685804e+10]]
target_basic_eps_600748 = [[2.20000000e-01, 2.20000000e-01, 5.29423397e+09, 5.29423397e+09],
[2.20000000e-01, 2.20000000e-01, 4.49275653e+09, 4.49275653e+09],
[9.00000000e-02, 9.00000000e-02, 1.59067065e+09, 1.59067065e+09],
[4.20000000e-01, 4.20000000e-01, 8.86555586e+09, 8.86555586e+09],
[2.30000000e-01, 2.30000000e-01, 5.44850143e+09, 5.44850143e+09],
[2.20000000e-01, 2.20000000e-01, 4.34978927e+09, 4.34978927e+09],
[9.00000000e-02, 9.00000000e-02, 1.73793793e+09, 1.73793793e+09],
[3.60000000e-01, 3.60000000e-01, 8.66375241e+09, 8.66375241e+09],
[1.60000000e-01, 1.60000000e-01, 4.72875116e+09, 4.72875116e+09],
[1.50000000e-01, 1.50000000e-01, 3.76879016e+09, 3.76879016e+09],
[7.00000000e-02, 7.00000000e-02, 1.31785454e+09, 1.31785454e+09],
[4.70000000e-01, 4.70000000e-01, 7.23391685e+09, 7.23391685e+09],
[1.90000000e-01, 1.90000000e-01, 3.76072215e+09, 3.76072215e+09],
[1.20000000e-01, 1.20000000e-01, 2.35845364e+09, 2.35845364e+09],
[7.00000000e-02, 7.00000000e-02, 1.03831865e+09, 1.03831865e+09],
[3.20000000e-01, 3.20000000e-01, 6.48880919e+09, 6.48880919e+09],
[2.20000000e-01, 2.20000000e-01, 3.72209142e+09, 3.72209142e+09],
[1.40000000e-01, 1.40000000e-01, 2.22563924e+09, 2.22563924e+09],
[7.00000000e-02, 7.00000000e-02, 8.96647052e+08, 8.96647052e+08],
[4.80000000e-01, 4.80000000e-01, 6.61917508e+09, 6.61917508e+09]]
target_basic_eps_000040 = [[-1.34000000e-01, -1.34000000e-01, 2.50438755e+09, 2.50438755e+09],
[-1.89000000e-01, -1.89000000e-01, 1.32692347e+09, 1.32692347e+09],
[-3.60000000e-02, -3.60000000e-02, 5.59073338e+08, 5.59073338e+08],
[-6.43700000e-01, -6.43700000e-01, 6.80576162e+09, 6.80576162e+09],
[5.00000000e-02, 5.00000000e-02, 6.38891620e+09, 6.38891620e+09],
[6.20000000e-02, 6.20000000e-02, 5.23267082e+09, 5.23267082e+09],
[1.25000000e-02, 1.25000000e-02, 2.22420874e+09, 2.22420874e+09],
[8.30000000e-01, 8.30000000e-01, 8.67628947e+09, 8.67628947e+09],
[1.05000000e+00, 1.05000000e+00, 5.29431716e+09, 5.29431716e+09],
[9.85000000e-01, 9.85000000e-01, 3.56822382e+09, 3.56822382e+09],
[8.11000000e-01, 8.11000000e-01, 1.06613439e+09, 1.06613439e+09],
[4.10000000e-01, 4.10000000e-01, 8.13102532e+09, 8.13102532e+09],
[2.42000000e-01, 2.42000000e-01, 5.17971521e+09, 5.17971521e+09],
[1.13000000e-01, 1.13000000e-01, 3.21704120e+09, 3.21704120e+09],
[2.70000000e-02, 2.70000000e-02, 8.41966738e+08, 8.24272235e+08],
[1.90000000e-01, 1.90000000e-01, 3.77350171e+09, 3.77350171e+09],
[1.70000000e-01, 1.70000000e-01, 2.38643892e+09, 2.38643892e+09],
[1.70000000e-01, 1.70000000e-01, 1.29127117e+09, 1.29127117e+09],
[6.40000000e-02, 6.40000000e-02, 6.03256858e+08, 6.03256858e+08],
[1.30000000e-01, 1.30000000e-01, 1.66572918e+09, 1.66572918e+09]]
target_total_share_000039 = [[3.5950140e+09, 4.8005360e+09, 2.1573660e+10, 3.5823430e+09],
[3.5860750e+09, 4.8402300e+09, 2.0750827e+10, 3.5823430e+09],
[3.5860750e+09, 4.9053550e+09, 2.0791307e+10, 3.5823430e+09],
[3.5845040e+09, 4.8813110e+09, 2.1482857e+10, 3.5823430e+09],
[3.5831490e+09, 4.9764250e+09, 2.0926816e+10, 3.2825850e+09],
[3.5825310e+09, 4.8501270e+09, 2.1020418e+10, 3.2825850e+09],
[2.9851110e+09, 5.4241420e+09, 2.2438350e+10, 3.2825850e+09],
[2.9849890e+09, 4.1284000e+09, 2.2082769e+10, 3.2825850e+09],
[2.9849610e+09, 4.0838010e+09, 2.1045994e+10, 3.2815350e+09],
[2.9849560e+09, 4.2491510e+09, 1.9694345e+10, 3.2815350e+09],
[2.9846970e+09, 4.2351600e+09, 2.0016361e+10, 3.2815350e+09],
[2.9828890e+09, 4.2096630e+09, 1.9734494e+10, 3.2815350e+09],
[2.9813960e+09, 3.4564240e+09, 1.8562738e+10, 3.2793790e+09],
[2.9803530e+09, 3.0759650e+09, 1.8076208e+10, 3.2793790e+09],
[2.9792680e+09, 3.1376690e+09, 1.7994776e+10, 3.2793790e+09],
[2.9785770e+09, 3.1265850e+09, 1.7495053e+10, 3.2793790e+09],
[2.9783640e+09, 3.1343850e+09, 1.6740840e+10, 3.2035780e+09],
[2.9783590e+09, 3.1273880e+09, 1.6578389e+10, 3.2035780e+09],
[2.9782780e+09, 3.1169280e+09, 1.8047639e+10, 3.2035780e+09],
[2.9778200e+09, 3.1818630e+09, 1.7663145e+10, 3.2035780e+09]]
target_total_share_600748 = [[1.84456289e+09, 2.60058426e+09, 5.72443733e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.72096899e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.65738237e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.50257806e+09, 4.58026529e+08],
[1.84456289e+09, 2.59868164e+09, 5.16741523e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 5.14677280e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 4.94955591e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 4.79001451e+09, 4.44998882e+08],
[1.84456289e+09, 3.11401684e+09, 4.46326988e+09, 4.01064256e+08],
[1.84456289e+09, 3.11596723e+09, 4.45419136e+09, 4.01064256e+08],
[1.84456289e+09, 3.11596723e+09, 4.39652948e+09, 4.01064256e+08],
[1.84456289e+09, 3.18007783e+09, 4.26608403e+09, 4.01064256e+08],
[1.84456289e+09, 3.10935622e+09, 3.78417688e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.65806574e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.62063090e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.50063915e+09, 3.65651701e+08],
[1.41889453e+09, 3.55940850e+09, 3.22272993e+09, 3.62124939e+08],
[1.41889453e+09, 3.56129650e+09, 3.11477476e+09, 3.62124939e+08],
[1.41889453e+09, 3.59632888e+09, 3.06836903e+09, 3.62124939e+08],
[1.08337087e+09, 3.37400726e+07, 3.00918704e+09, 3.62124939e+08]]
target_total_share_000040 = [[1.48687387e+09, 1.06757900e+10, 8.31900755e+08, 2.16091994e+08],
[1.48687387e+09, 1.06757900e+10, 7.50177302e+08, 2.16091994e+08],
[1.48687387e+09, 1.06757899e+10, 9.90255974e+08, 2.16123282e+08],
[1.48687387e+09, 1.06757899e+10, 1.03109866e+09, 2.16091994e+08],
[1.48687387e+09, 1.06757910e+10, 2.07704745e+09, 2.16123282e+08],
[1.48687387e+09, 1.06757910e+10, 2.09608665e+09, 2.16123282e+08],
[1.48687387e+09, 1.06803833e+10, 2.13354083e+09, 2.16123282e+08],
[1.48687387e+09, 1.06804090e+10, 2.11489364e+09, 2.16123282e+08],
[1.33717327e+09, 8.87361727e+09, 2.42939924e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 2.34220254e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 2.16390368e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 1.07961915e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 8.58866066e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 6.87024393e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 5.71554565e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 5.54241222e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361726e+09, 5.10059576e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361726e+09, 4.59351639e+08, 1.88489589e+08],
[4.69593364e+08, 2.78355875e+08, 4.13430814e+08, 1.88489589e+08],
[4.69593364e+08, 2.74235459e+08, 3.83557678e+08, 1.88489589e+08]]
target_net_profit_000039 = [[np.nan],
[2.422180e+08],
[np.nan],
[2.510113e+09],
[np.nan],
[1.102220e+09],
[np.nan],
[4.068455e+09],
[np.nan],
[1.315957e+09],
[np.nan],
[3.158415e+09],
[np.nan],
[1.066509e+09],
[np.nan],
[7.349830e+08],
[np.nan],
[-5.411600e+08],
[np.nan],
[2.271961e+09]]
target_net_profit_600748 = [[np.nan],
[4.54341757e+08],
[np.nan],
[9.14476670e+08],
[np.nan],
[5.25360283e+08],
[np.nan],
[9.24502415e+08],
[np.nan],
[4.66560302e+08],
[np.nan],
[9.15265285e+08],
[np.nan],
[2.14639674e+08],
[np.nan],
[7.45093049e+08],
[np.nan],
[2.10967312e+08],
[np.nan],
[6.04572711e+08]]
target_net_profit_000040 = [[np.nan],
[-2.82458846e+08],
[np.nan],
[-9.57130872e+08],
[np.nan],
[9.22114527e+07],
[np.nan],
[1.12643819e+09],
[np.nan],
[1.31715269e+09],
[np.nan],
[5.39940093e+08],
[np.nan],
[1.51440838e+08],
[np.nan],
[1.75339071e+08],
[np.nan],
[8.04740415e+07],
[np.nan],
[6.20445815e+07]]
print('test get financial data, in multi thread mode')
df_list = get_financial_report_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, parallel=4)
self.assertIsInstance(df_list, tuple)
self.assertEqual(len(df_list), 4)
self.assertEqual(len(df_list[0]), 3)
self.assertEqual(len(df_list[1]), 3)
self.assertEqual(len(df_list[2]), 3)
self.assertEqual(len(df_list[3]), 3)
# 检查确认所有数据类型正确
self.assertTrue(all(isinstance(item, pd.DataFrame) for subdict in df_list for item in subdict.values()))
# 检查是否有空数据
print(all(item.empty for subdict in df_list for item in subdict.values()))
# 检查获取的每组数据正确,且所有数据的顺序一致, 如果取到空数据,则忽略
if df_list[0]['000039.SZ'].empty:
print(f'income data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000039.SZ'].values, target_basic_eps_000039))
if df_list[0]['600748.SH'].empty:
print(f'income data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['600748.SH'].values, target_basic_eps_600748))
if df_list[0]['000040.SZ'].empty:
print(f'income data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000040.SZ'].values, target_basic_eps_000040))
if df_list[1]['000039.SZ'].empty:
print(f'indicator data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000039.SZ'].values, target_eps_000039))
if df_list[1]['600748.SH'].empty:
print(f'indicator data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['600748.SH'].values, target_eps_600748))
if df_list[1]['000040.SZ'].empty:
print(f'indicator data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000040.SZ'].values, target_eps_000040))
if df_list[2]['000039.SZ'].empty:
print(f'balance data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000039.SZ'].values, target_total_share_000039))
if df_list[2]['600748.SH'].empty:
print(f'balance data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['600748.SH'].values, target_total_share_600748))
if df_list[2]['000040.SZ'].empty:
print(f'balance data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000040.SZ'].values, target_total_share_000040))
if df_list[3]['000039.SZ'].empty:
print(f'cash flow data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000039.SZ'].values, target_net_profit_000039, equal_nan=True))
if df_list[3]['600748.SH'].empty:
print(f'cash flow data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['600748.SH'].values, target_net_profit_600748, equal_nan=True))
if df_list[3]['000040.SZ'].empty:
print(f'cash flow data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000040.SZ'].values, target_net_profit_000040, equal_nan=True))
print('test get financial data, in single thread mode')
df_list = get_financial_report_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, parallel=0)
self.assertIsInstance(df_list, tuple)
self.assertEqual(len(df_list), 4)
self.assertEqual(len(df_list[0]), 3)
self.assertEqual(len(df_list[1]), 3)
self.assertEqual(len(df_list[2]), 3)
self.assertEqual(len(df_list[3]), 3)
# 检查确认所有数据类型正确
self.assertTrue(all(isinstance(item, pd.DataFrame) for subdict in df_list for item in subdict.values()))
# 检查是否有空数据,因为网络问题,有可能会取到空数据
self.assertFalse(all(item.empty for subdict in df_list for item in subdict.values()))
# 检查获取的每组数据正确,且所有数据的顺序一致, 如果取到空数据,则忽略
if df_list[0]['000039.SZ'].empty:
print(f'income data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000039.SZ'].values, target_basic_eps_000039))
if df_list[0]['600748.SH'].empty:
print(f'income data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['600748.SH'].values, target_basic_eps_600748))
if df_list[0]['000040.SZ'].empty:
print(f'income data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000040.SZ'].values, target_basic_eps_000040))
if df_list[1]['000039.SZ'].empty:
print(f'indicator data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000039.SZ'].values, target_eps_000039))
if df_list[1]['600748.SH'].empty:
print(f'indicator data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['600748.SH'].values, target_eps_600748))
if df_list[1]['000040.SZ'].empty:
print(f'indicator data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000040.SZ'].values, target_eps_000040))
if df_list[2]['000039.SZ'].empty:
print(f'balance data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000039.SZ'].values, target_total_share_000039))
if df_list[2]['600748.SH'].empty:
print(f'balance data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['600748.SH'].values, target_total_share_600748))
if df_list[2]['000040.SZ'].empty:
print(f'balance data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000040.SZ'].values, target_total_share_000040))
if df_list[3]['000039.SZ'].empty:
print(f'cash flow data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000039.SZ'].values, target_net_profit_000039, equal_nan=True))
if df_list[3]['600748.SH'].empty:
print(f'cash flow data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['600748.SH'].values, target_net_profit_600748, equal_nan=True))
if df_list[3]['000040.SZ'].empty:
print(f'cash flow data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000040.SZ'].values, target_net_profit_000040, equal_nan=True))
def test_get_composite_type_raw_data(self):
pass
class TestUtilityFuncs(unittest.TestCase):
def setUp(self):
pass
def test_str_to_list(self):
self.assertEqual(str_to_list('a,b,c,d,e'), ['a', 'b', 'c', 'd', 'e'])
self.assertEqual(str_to_list('a, b, c '), ['a', 'b', 'c'])
self.assertEqual(str_to_list('a, b: c', sep_char=':'), ['a,b', 'c'])
def test_list_or_slice(self):
str_dict = {'close': 0, 'open': 1, 'high': 2, 'low': 3}
self.assertEqual(qt.list_or_slice(slice(1, 2, 1), str_dict), slice(1, 2, 1))
self.assertEqual(qt.list_or_slice('open', str_dict), [1])
self.assertEqual(list(qt.list_or_slice('close, high, low', str_dict)), [0, 2, 3])
self.assertEqual(list(qt.list_or_slice('close:high', str_dict)), [0, 1, 2])
self.assertEqual(list(qt.list_or_slice(['open'], str_dict)), [1])
self.assertEqual(list(qt.list_or_slice(['open', 'high'], str_dict)), [1, 2])
self.assertEqual(list(qt.list_or_slice(0, str_dict)), [0])
self.assertEqual(list(qt.list_or_slice([0, 2], str_dict)), [0, 2])
self.assertEqual(list(qt.list_or_slice([True, False, True, False], str_dict)), [0, 2])
def test_label_to_dict(self):
target_list = [0, 1, 10, 100]
target_dict = {'close': 0, 'open': 1, 'high': 2, 'low': 3}
target_dict2 = {'close': 0, 'open': 2, 'high': 1, 'low': 3}
self.assertEqual(qt.labels_to_dict('close, open, high, low', target_list), target_dict)
self.assertEqual(qt.labels_to_dict(['close', 'open', 'high', 'low'], target_list), target_dict)
self.assertEqual(qt.labels_to_dict('close, high, open, low', target_list), target_dict2)
self.assertEqual(qt.labels_to_dict(['close', 'high', 'open', 'low'], target_list), target_dict2)
def test_regulate_date_format(self):
self.assertEqual(regulate_date_format('2019/11/06'), '20191106')
self.assertEqual(regulate_date_format('2019-11-06'), '20191106')
self.assertEqual(regulate_date_format('20191106'), '20191106')
self.assertEqual(regulate_date_format('191106'), '20061119')
self.assertEqual(regulate_date_format('830522'), '19830522')
self.assertEqual(regulate_date_format(datetime.datetime(2010, 3, 15)), '20100315')
self.assertEqual(regulate_date_format(pd.Timestamp('2010.03.15')), '20100315')
self.assertRaises(ValueError, regulate_date_format, 'abc')
self.assertRaises(ValueError, regulate_date_format, '2019/13/43')
def test_list_to_str_format(self):
self.assertEqual(list_to_str_format(['close', 'open', 'high', 'low']),
'close,open,high,low')
self.assertEqual(list_to_str_format(['letters', ' ', '123 4', 123, ' kk l']),
'letters,,1234,kkl')
self.assertEqual(list_to_str_format('a string input'),
'a,string,input')
self.assertEqual(list_to_str_format('already,a,good,string'),
'already,a,good,string')
self.assertRaises(AssertionError, list_to_str_format, 123)
def test_is_trade_day(self):
"""test if the funcion maybe_trade_day() and is_market_trade_day() works properly
"""
date_trade = '20210401'
date_holiday = '20210102'
date_weekend = '20210424'
date_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertTrue(maybe_trade_day(date_trade))
self.assertFalse(maybe_trade_day(date_holiday))
self.assertFalse(maybe_trade_day(date_weekend))
self.assertTrue(maybe_trade_day(date_seems_trade_day))
self.assertTrue(maybe_trade_day(date_too_early))
self.assertTrue(maybe_trade_day(date_too_late))
self.assertTrue(maybe_trade_day(date_christmas))
self.assertTrue(is_market_trade_day(date_trade))
self.assertFalse(is_market_trade_day(date_holiday))
self.assertFalse(is_market_trade_day(date_weekend))
self.assertFalse(is_market_trade_day(date_seems_trade_day))
self.assertFalse(is_market_trade_day(date_too_early))
self.assertFalse(is_market_trade_day(date_too_late))
self.assertTrue(is_market_trade_day(date_christmas))
self.assertFalse(is_market_trade_day(date_christmas, exchange='XHKG'))
date_trade = pd.to_datetime('20210401')
date_holiday = pd.to_datetime('20210102')
date_weekend = pd.to_datetime('20210424')
self.assertTrue(maybe_trade_day(date_trade))
self.assertFalse(maybe_trade_day(date_holiday))
self.assertFalse(maybe_trade_day(date_weekend))
def test_prev_trade_day(self):
"""test the function prev_trade_day()
"""
date_trade = '20210401'
date_holiday = '20210102'
prev_holiday = pd.to_datetime(date_holiday) - pd.Timedelta(2, 'd')
date_weekend = '20210424'
prev_weekend = pd.to_datetime(date_weekend) - pd.Timedelta(1, 'd')
date_seems_trade_day = '20210217'
prev_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertEqual(pd.to_datetime(prev_trade_day(date_trade)),
pd.to_datetime(date_trade))
self.assertEqual(pd.to_datetime(prev_trade_day(date_holiday)),
pd.to_datetime(prev_holiday))
self.assertEqual(pd.to_datetime(prev_trade_day(date_weekend)),
pd.to_datetime(prev_weekend))
self.assertEqual(pd.to_datetime(prev_trade_day(date_seems_trade_day)),
pd.to_datetime(prev_seems_trade_day))
self.assertEqual(pd.to_datetime(prev_trade_day(date_too_early)),
pd.to_datetime(date_too_early))
self.assertEqual(pd.to_datetime(prev_trade_day(date_too_late)),
pd.to_datetime(date_too_late))
self.assertEqual(pd.to_datetime(prev_trade_day(date_christmas)),
pd.to_datetime(date_christmas))
def test_next_trade_day(self):
""" test the function next_trade_day()
"""
date_trade = '20210401'
date_holiday = '20210102'
next_holiday = pd.to_datetime(date_holiday) + pd.Timedelta(2, 'd')
date_weekend = '20210424'
next_weekend = pd.to_datetime(date_weekend) + pd.Timedelta(2, 'd')
date_seems_trade_day = '20210217'
next_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertEqual(pd.to_datetime(next_trade_day(date_trade)),
pd.to_datetime(date_trade))
self.assertEqual(pd.to_datetime(next_trade_day(date_holiday)),
pd.to_datetime(next_holiday))
self.assertEqual(pd.to_datetime(next_trade_day(date_weekend)),
pd.to_datetime(next_weekend))
self.assertEqual(pd.to_datetime(next_trade_day(date_seems_trade_day)),
pd.to_datetime(next_seems_trade_day))
self.assertEqual(pd.to_datetime(next_trade_day(date_too_early)),
pd.to_datetime(date_too_early))
self.assertEqual(pd.to_datetime(next_trade_day(date_too_late)),
pd.to_datetime(date_too_late))
self.assertEqual(pd.to_datetime(next_trade_day(date_christmas)),
pd.to_datetime(date_christmas))
def test_prev_market_trade_day(self):
""" test the function prev_market_trade_day()
"""
date_trade = '20210401'
date_holiday = '20210102'
prev_holiday = pd.to_datetime(date_holiday) - pd.Timedelta(2, 'd')
date_weekend = '20210424'
prev_weekend = pd.to_datetime(date_weekend) - pd.Timedelta(1, 'd')
date_seems_trade_day = '20210217'
prev_seems_trade_day = pd.to_datetime(date_seems_trade_day) - pd.Timedelta(7, 'd')
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
prev_christmas_xhkg = '20201224'
self.assertEqual(pd.to_datetime(prev_market_trade_day(date_trade)),
pd.to_datetime(date_trade))
self.assertEqual(pd.to_datetime(prev_market_trade_day(date_holiday)),
pd.to_datetime(prev_holiday))
self.assertEqual(pd.to_datetime(prev_market_trade_day(date_weekend)),
pd.to_datetime(prev_weekend))
self.assertEqual(pd.to_datetime(prev_market_trade_day(date_seems_trade_day)),
pd.to_datetime(prev_seems_trade_day))
self.assertEqual(pd.to_datetime(prev_market_trade_day(date_too_early)),
None)
self.assertEqual(pd.to_datetime(prev_market_trade_day(date_too_late)),
None)
self.assertEqual(pd.to_datetime(prev_market_trade_day(date_christmas, 'SSE')),
pd.to_datetime(date_christmas))
self.assertEqual(pd.to_datetime(prev_market_trade_day(date_christmas, 'XHKG')),
pd.to_datetime(prev_christmas_xhkg))
def test_next_market_trade_day(self):
""" test the function next_market_trade_day()
"""
date_trade = '20210401'
date_holiday = '20210102'
next_holiday = pd.to_datetime(date_holiday) + pd.Timedelta(2, 'd')
date_weekend = '20210424'
next_weekend = pd.to_datetime(date_weekend) + pd.Timedelta(2, 'd')
date_seems_trade_day = '20210217'
next_seems_trade_day = pd.to_datetime(date_seems_trade_day) + pd.Timedelta(1, 'd')
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
next_christmas_xhkg = '20201228'
self.assertEqual(pd.to_datetime(next_market_trade_day(date_trade)),
|
pd.to_datetime(date_trade)
|
pandas.to_datetime
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import math
from pathlib import Path
import numpy as np
import pandas as pd
from pyspark.sql import SparkSession
import pytest
from pysarplus import SARPlus, SARModel
def assert_compare(expected_id, expected_score, actual_prediction):
assert expected_id == actual_prediction.id
assert math.isclose(
expected_score, actual_prediction.score, rel_tol=1e-3, abs_tol=1e-3
)
@pytest.fixture(scope="module")
def spark(tmp_path_factory, app_name="Sample", url="local[*]", memory="1G"):
"""Start Spark if not started
Args:
app_name (str): sets name of the application
url (str): url for spark master
memory (str): size of memory for spark driver
"""
try:
sarplus_jar_path = next(
Path(__file__)
.parents[2]
.joinpath("scala", "target")
.glob("**/sarplus*.jar")
).absolute()
except StopIteration:
raise Exception("Could not find Sarplus JAR file")
spark = (
SparkSession.builder.appName(app_name)
.master(url)
.config("spark.jars", sarplus_jar_path)
.config("spark.driver.memory", memory)
.config("spark.sql.shuffle.partitions", "1")
.config("spark.default.parallelism", "1")
.config("spark.sql.crossJoin.enabled", True)
.config("spark.ui.enabled", False)
.config("spark.sql.warehouse.dir", str(tmp_path_factory.mktemp("spark")))
# .config("spark.eventLog.enabled", True) # only for local debugging, breaks on build server
.getOrCreate()
)
return spark
@pytest.fixture(scope="module")
def sample_cache(spark):
df = spark.read.csv("tests/sample-input.txt", header=True, inferSchema=True)
path = "tests/sample-output.sar"
df.coalesce(1).write.format("com.microsoft.sarplus").mode("overwrite").save(path)
return path
@pytest.fixture(scope="module")
def pandas_dummy_dataset(header):
"""Load sample dataset in pandas for testing; can be used to create a Spark dataframe
Returns:
single Pandas dataframe
"""
ratings_dict = {
header["col_user"]: [1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3],
header["col_item"]: [1, 2, 3, 4, 1, 2, 7, 8, 9, 10, 1, 2],
header["col_rating"]: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
}
return pd.DataFrame(ratings_dict)
@pytest.mark.spark
def test_good(spark, sample_cache):
model = SARModel(sample_cache)
y = model.predict([0, 1], [10, 20], top_k=10, remove_seen=False)
assert_compare(0, 5, y[0])
assert_compare(1, 44, y[1])
assert_compare(2, 64, y[2])
@pytest.mark.spark
def test_good_less(spark, sample_cache):
model = SARModel(sample_cache)
y = model.predict([0, 2], [10, 3], top_k=5, remove_seen=False)
assert_compare(0, 1, y[0])
assert_compare(1, 11.6, y[1])
assert_compare(2, 12.3, y[2])
@pytest.mark.spark
def test_good_require_sort(spark, sample_cache):
model = SARModel(sample_cache)
y = model.predict([1, 0], [20, 10], top_k=10, remove_seen=False)
assert_compare(0, 5, y[0])
assert_compare(1, 44, y[1])
assert_compare(2, 64, y[2])
assert 3 == len(y)
@pytest.mark.spark
def test_good_require_sort_remove_seen(spark, sample_cache):
model = SARModel(sample_cache)
y = model.predict([1, 0], [20, 10], top_k=10, remove_seen=True)
assert_compare(2, 64, y[0])
assert 1 == len(y)
@pytest.mark.spark
def test_pandas(spark, sample_cache):
item_scores = pd.DataFrame([(0, 2.3), (1, 3.1)], columns=["itemID", "score"])
model = SARModel(sample_cache)
y = model.predict(
item_scores["itemID"].values,
item_scores["score"].values,
top_k=10,
remove_seen=False,
)
assert_compare(0, 0.85, y[0])
assert_compare(1, 6.9699, y[1])
assert_compare(2, 9.92, y[2])
@pytest.mark.spark
def test_e2e(spark, pandas_dummy_dataset, header):
sar = SARPlus(spark, **header, cache_path="tests/test_e2e_cache")
df = spark.createDataFrame(pandas_dummy_dataset)
sar.fit(df)
test_df = spark.createDataFrame(
|
pd.DataFrame({header["col_user"]: [3], header["col_item"]: [2]})
|
pandas.DataFrame
|
'''
@ Author : <NAME>
@ E-mail : <EMAIL>
@ Github : https://github.com/WooilJeong/PublicDataReader
@ Blog : https://wooiljeong.github.io
'''
import pandas as pd
import numpy as np
import datetime
import requests
from bs4 import BeautifulSoup
from PublicDataReader.PublicDataPortal.__init__ import *
class AptTradeReader(Common):
def __init__(self, serviceKey):
super().__init__(serviceKey)
# ServiceKey 유효성 검사
api_url = "http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcAptTrade?serviceKey=" + self.serviceKey
super().test(api_url)
def CodeFinder(self, name):
'''
국토교통부 실거래가 정보 오픈API는 법정동코드 10자리 중 앞 5자리인 구를 나타내는 지역코드를 사용합니다.
API에 사용할 구 별 코드를 조회하는 메소드이며, 문자열 지역 명을 입력받고, 조회 결과를 Pandas DataFrame형식으로 출력합니다.
'''
result = self.code[self.code['법정동명'].str.contains(name)][['법정동명','법정구코드']]
result.index = range(len(result))
return result
def DataReader(self, LAWD_CD, DEAL_YMD):
'''
지역코드와 계약월을 입력받고, 아파트 실거래 정보를 Pandas DataFrame 형식으로 출력합니다.
'''
# URL
url_1="http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcAptTrade?LAWD_CD="+LAWD_CD
url_2="&DEAL_YMD=" + DEAL_YMD
url_3="&serviceKey=" + self.serviceKey
url_4="&numOfRows=99999"
url = url_1+url_2+url_3+url_4
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, 'lxml-xml')
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = ['법정동','지역코드','아파트','지번','년','월','일','건축년도','전용면적','층','거래금액']
for t in te:
for variable in variables:
try :
globals()[variable] = t.find(variable).text
except :
globals()[variable] = np.nan
data = pd.DataFrame(
[[법정동,지역코드,아파트,지번,년,월,일,건축년도,전용면적,층,거래금액]],
columns = variables
)
df = pd.concat([df, data])
# Set Columns
colNames = ['지역코드','법정동','거래일','아파트','지번','전용면적','층','건축년도','거래금액']
# Feature Engineering
try:
if len(df['년']!=0) & len(df['월']!=0) & len(df['일']!=0):
df['거래일'] = df['년'] + '-' + df['월'] + '-' + df['일']
df['거래일'] = pd.to_datetime(df['거래일'])
df['거래금액'] = pd.to_numeric(df['거래금액'].str.replace(',',''))
except:
df = pd.DataFrame(columns=colNames)
print("조회할 자료가 없습니다.")
# Arange Columns
df = df[colNames]
df = df.sort_values(['법정동','거래일'])
df['법정동'] = df['법정동'].str.strip()
df.index = range(len(df))
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, 'lxml-xml')
# Filtering
te = xmlsoup.findAll("header")
# 정상 요청시 에러 발생 -> Python 코드 에러
if te[0].find('resultCode').text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API 서비스 제공처 오류
else:
print(">>> Open API Error: {}".format(te[0].find['resultMsg']))
def DataCollector(self, LAWD_CD, start_date, end_date):
'''
특정 기간 동안의 데이터 수집 메소드
'''
end_date = datetime.datetime.strptime(end_date, "%Y-%m")
end_date = end_date + datetime.timedelta(days=31)
end_date = datetime.datetime.strftime(end_date, "%Y-%m")
ts = pd.date_range(start=start_date, end=end_date, freq='m')
date_list = list(ts.strftime('%Y%m'))
df = pd.DataFrame()
df_sum =
|
pd.DataFrame()
|
pandas.DataFrame
|
import lightgbm as lgb
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import numpy as np
import pandas as pd
df_x = pd.read_csv('../../resource/Term2/train_processed.csv') # x:説明変数
df_test = pd.read_csv('../../resource/Term2/test_processed.csv') # testデータ
df =
|
pd.concat([df_x, df_test], axis=0)
|
pandas.concat
|
from core.models.metrics import cross_compute, avg_gain_ratio, gain_mean, rejection_ratio, loss_sum, MAX_GAIN
def get_infos(min_offer, offer, metrics=None, do_cross_compute=False):
if metrics is None:
metrics = [avg_gain_ratio, gain_mean, rejection_ratio, loss_sum]
#df = pd.DataFrame()
size1, size2 = len(min_offer), len(offer)
if size1 != size2:
print("WARNING: different shapes!!!", size1, size2)
min_size = min(size1, size2)
min_offer = min_offer[:min_size]
offer = offer[:min_size]
infos = dict()
for idx, metric in enumerate(metrics):
if do_cross_compute:
infos[metric.__name__] = cross_compute(min_offer, offer, metric)
else:
infos[metric.__name__] = metric(min_offer, offer)
return infos
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import pandas as pd
import numpy as np
def get_dfs_full_prop():
dfs = {}
dfs_full = {}
result_df = pd.DataFrame(index=range(105))
index=["Proposer", "Proposer + DSS"]
stats = pd.DataFrame(index=index)
#TREATMENTS = {"t00", "t10a", "t10b", "t11a", "t11b", "t11c"}
TREATMENTS_MAPPING = {
"t00": "T0",
"t10a": "t1.0",
"t10b": "t1.1",
"t11a": "t1.2",
"t11b": "t1.3",
"t11c": "t1.4",
}
TREATMENTS = TREATMENTS_MAPPING.values()
for treatment, new_treatment in TREATMENTS_MAPPING.items():
# Read and sanitize the data
df_full =
|
pd.read_csv(f"../data/{treatment}/export/result__{treatment}_prop.csv")
|
pandas.read_csv
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 13 10:08:05 2021
@author: babraham
"""
from django.db.models import IntegerField
from django.db.models.functions import Cast
from django.db.models import F
import pandas as pd
import numpy as np
import json
import os
import time
from va_explorer.va_data_management.models import Location
from va_explorer.va_data_management.utils.loading import get_va_summary_stats
# ============ GEOJSON Data (for map) =================
# load geojson data from flat file (will likely migrate to a database later)
def load_geojson_data(json_file):
geojson = None
if os.path.isfile(json_file):
raw_json = open(json_file, "r")
geojson = json.loads(raw_json.read())
raw_json.close()
# add min and max coordinates for mapping
for i, g in enumerate(geojson["features"]):
coordinate_list = g["geometry"]["coordinates"]
coordinate_stat_tables = []
for coords in coordinate_list:
if len(coords) == 1:
coords = coords[0]
coordinate_stat_tables.append(
pd.DataFrame(coords, columns=["lon", "lat"]).describe()
)
g["properties"]["area_name"] += " {}".format(
g["properties"]["area_level_label"]
)
g["properties"]["min_x"] = min(
[stat_df["lon"]["min"] for stat_df in coordinate_stat_tables]
)
g["properties"]["max_x"] = max(
[stat_df["lon"]["max"] for stat_df in coordinate_stat_tables]
)
g["properties"]["min_y"] = min(
[stat_df["lat"]["min"] for stat_df in coordinate_stat_tables]
)
g["properties"]["max_y"] = max(
[stat_df["lat"]["max"] for stat_df in coordinate_stat_tables]
)
geojson["features"][i] = g
# save total districts and provinces for future use
geojson["district_count"] = len(
[
f
for f in geojson["features"]
if f["properties"]["area_level_label"] == "District"
]
)
geojson["province_count"] = len(
[
f
for f in geojson["features"]
if f["properties"]["area_level_label"] == "Province"
]
)
return geojson
# ============ VA Data =================
def load_va_data(user, geographic_levels=None, date_cutoff="1901-01-01"):
# the dashboard requires date of death, exclude if the date is unknown
# Using .values at the end lets us do select_related("causes") which drastically speeds up the query.
user_vas = user.verbal_autopsies(date_cutoff=date_cutoff)
# get stats on last update and last va submission date
update_stats = get_va_summary_stats(user_vas)
all_vas = user_vas\
.only(
"id",
"Id10019",
"Id10058",
"Id10023",
"ageInYears",
"age_group",
"isNeonatal1",
"isChild1",
"isAdult1",
"location",
) \
.exclude(Id10023__in=["dk", "DK"]) \
.exclude(location__isnull=True) \
.select_related("location") \
.select_related("causes") \
.values(
"id",
"Id10019",
"Id10058",
"age_group",
"isNeonatal1",
"isChild1",
"isAdult1",
'location__id',
'location__name',
'ageInYears',
date=F("Id10023"),
cause=F("causes__cause"),
)
if not all_vas:
return json.dumps({"data": {"valid": pd.DataFrame().to_json(), "invalid": pd.DataFrame().to_json()}, "update_stats": {update_stats}})
# Build a dictionary of location ancestors for each facility
# TODO: This is not efficient (though it"s better than 2 DB queries per VA)
# TODO: This assumes that all VAs will occur in a facility, ok?
# TODO: if there is no location data, we could use the location associated with the interviewer
location_types = dict()
locations = {}
location_ancestors = {
location.id: location.get_ancestors()
for location in Location.objects.filter(location_type="facility")
}
for va in all_vas:
# Find parents (likely district and province).
for ancestor in location_ancestors[va['location__id']]:
va[ancestor.location_type] = ancestor.name
#location_types.add(ancestor.location_type)
location_types[ancestor.depth] = ancestor.location_type
locations[ancestor.name] = ancestor.location_type
# Clean up location fields.
va["location"] = va["location__name"]
del va["location__name"]
del va["location__id"]
# Convert list to dataframe.
va_df = pd.DataFrame.from_records(all_vas)
# convert dates to datetimes
va_df["date"] = pd.to_datetime(va_df["date"])
va_df["age"] =
|
pd.to_numeric(va_df["ageInYears"], errors="coerce")
|
pandas.to_numeric
|
import streamlit as st
import pandas as pd
import numpy as np
import datetime
import plotly.express as px
import base64
def app():
LOGO_IMAGE_IBM = "apps/ibm.png"
LOGO_IMAGE_U_OF_F = "apps/u_of_f.svg.png"
LOGO_IMAGE_BRIGHTER = "apps/brighter_potential_logo.png"
st.markdown(
"""
<style>
.container {
display: flex;
}
.logo-text {
font-weight:700 !important;
font-size:50px !important;
color: #f9a01b !important;
padding-top: 75px !important;
}
.logo-img {
float: left;
position: relative;
margin-top: 600px;
}
#logo {
position: absolute;
float: right;
}
</style>
""",
unsafe_allow_html=True
)
st.markdown(
f"""
<img class="logo-img" src="data:image/png;base64,{base64.b64encode(open(LOGO_IMAGE_IBM, "rb").read()).decode()}" width="100x`" height="40" style="border:20px;margin:0px" />
<img class="logo-img" src="data:image/png;base64,{base64.b64encode(open(LOGO_IMAGE_U_OF_F, "rb").read()).decode()}" width="200" height="40" style="border:20px;margin:0px"/>
              
          
<img class="logo" src="data:image/png;base64,{base64.b64encode(open(LOGO_IMAGE_BRIGHTER, "rb").read()).decode()}" width="100" height="100" />
""",
unsafe_allow_html=True
)
st.markdown('---')
st.header("Solar Rooftop Potential Prediction")
# Sidebar
st.sidebar.header('Choose Time Range to View:')
min_date = st.sidebar.date_input('Min Date', datetime.datetime(2019, 1, 1))
min_date = min_date.strftime('%m/%d/%y')
max_date = st.sidebar.date_input('Max Date', datetime.datetime(2019, 12, 31))
max_date = max_date.strftime('%m/%d/%y')
st.sidebar.header('Choose Zipcode to View:')
# Declare zipcode list
zipcodes = [33131,33040,34112,33916,33407,33935,33471,33950,
34266,34994,34972,34236,34950,34205,33873,32960,33830,33606,33755,34741,33525,32806,34601,
32796,33513,32778,32771,34453,32720,34471,32621,32110,32601,32177,32456,32080,32091,32054,
32066,32347,32401,32327,
32025,32064,32063,32202,32502,32503,32424,32321,32304,32340,32344,
32351,32570,32034,32433,32536,32428,32448,32425,32602,32603,32604,32605,32606,
32607,32608,32609,32610,32611,32612,32614,32627,32641,32653,32402,32404,32405,32406,
32412,32073,32081,32099,32201,32203,32204,32205,32206,32207,32208,32209,
32210,32211,32212,32214,32216,32217,32218,32219,32220,32221,32222,32223,32224,
32225,32226,32227,32228,32229,32233,32234,32235,32236,32237,32238,32239,
32241,32244,32245,32246,32247,32250,32254,32255,32256,32257,32258,32266,32277,
32501,32504,32505,32514,32520,32522,32523,32524,32591,33601,33602,33603,33604,
33605,33607,33608,33609,33610,33611,33612,33613,33615,33616,33617,33619,
33620,33621,33622,33623,33629,33630,33631,33633,33634,33637,33646,33647,33650,33655,33660,33661,
33662,33664,33672,33673,33674,33675,33677,33679,33680,33681,33686,33901,
33902,33903,33905,33906,33907,33911,33912,33913,33917,33919,33966,33971,33990,32301,32302,32303,32305,32306,
32307,32308,32309,32310,32311,32312,32313,32314,32316,32317,32395,32399,
33101,33109,33111,33114,33125,33126,33127,33128,33129,33130,33132,33133,33134,33135,33136,33137,33138,
33139,33140,33142,33144,33145,33146,33147,33149,33150,33151,33159,33222,33233,33234,
33238,33242,33245,33255,32789,32801,32802,32803,32804,32805,32807,32808,32809,
32810,32811,32812,32814,32819,32822,32824,32827,32829,32832,32834,
32835,32839,32853,32854,32855,32856,32861,32862,32878,32885,32886,
32891,33401,33402,33403,33405,33409,33411,33412,33417,33756,33757,33758,
33759,33761,33763,33764,33765,33766,33767,33769,33302,
33303,33304,33305,33306,33307,33308,33309,33311,33312,33315,33316,33334,33338,33339,33348,
33394
]
# Put client and date options in the sidebar
selected_zip = st.sidebar.selectbox(
'Choose Zipcode:',
zipcodes,
key='zipcodes'
)
st.markdown("""
* Renewables currently account for roughly only 4% of energy production in Florida.
* Stakeholders need to know how solar energy sources can supplement the power grid.
* The sunburst chart below shows the daily potential of energy demand that could be supplied by rooftop solar energy for 2019.
* This projection for 2019 is based on predictive modeling that predicts the daily rooftop solar energy potential and the energy demand based on the weather.
""")
# area_stats = pd.read_csv('data/RPMSZips.csv', dtype={'zip':str})
area_stats =
|
pd.read_csv('apps/florida_weather_w_predictions_and_zip_codes.csv', dtype={'zipcode':str})
|
pandas.read_csv
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pytest
import numpy as np
import pandas
import matplotlib
import modin.pandas as pd
import io
from modin.pandas.test.utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
test_data_values,
test_data_keys,
create_test_dfs,
test_data,
)
from modin.config import NPartitions
NPartitions.put(4)
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
@pytest.mark.parametrize("method", ["items", "iteritems", "iterrows"])
def test_items_iteritems_iterrows(method):
data = test_data["float_nan_data"]
modin_df, pandas_df = pd.DataFrame(data), pandas.DataFrame(data)
for modin_item, pandas_item in zip(
getattr(modin_df, method)(), getattr(pandas_df, method)()
):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("name", [None, "NotPandas"])
def test_itertuples_name(name):
data = test_data["float_nan_data"]
modin_df, pandas_df = pd.DataFrame(data), pandas.DataFrame(data)
modin_it_custom = modin_df.itertuples(name=name)
pandas_it_custom = pandas_df.itertuples(name=name)
for modin_row, pandas_row in zip(modin_it_custom, pandas_it_custom):
np.testing.assert_equal(modin_row, pandas_row)
def test_itertuples_multiindex():
data = test_data["int_data"]
modin_df, pandas_df = pd.DataFrame(data), pandas.DataFrame(data)
new_idx = pd.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))]
)
modin_df.columns = new_idx
pandas_df.columns = new_idx
modin_it_custom = modin_df.itertuples()
pandas_it_custom = pandas_df.itertuples()
for modin_row, pandas_row in zip(modin_it_custom, pandas_it_custom):
np.testing.assert_equal(modin_row, pandas_row)
def test___iter__():
modin_df = pd.DataFrame(test_data_values[0])
pandas_df = pandas.DataFrame(test_data_values[0])
modin_iterator = modin_df.__iter__()
# Check that modin_iterator implements the iterator interface
assert hasattr(modin_iterator, "__iter__")
assert hasattr(modin_iterator, "next") or hasattr(modin_iterator, "__next__")
pd_iterator = pandas_df.__iter__()
assert list(modin_iterator) == list(pd_iterator)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___contains__(request, data):
modin_df = pd.DataFrame(data)
pandas_df =
|
pandas.DataFrame(data)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 15:39:02 2018
@author: joyce
"""
import pandas as pd
import numpy as np
from numpy.matlib import repmat
from stats import get_stockdata_from_sql,get_tradedate,Corr,Delta,Rank,Cross_max,\
Cross_min,Delay,Sum,Mean,STD,TsRank,TsMax,TsMin,DecayLinear,Count,SMA,Cov,DTM,DBM,\
Highday,Lowday,HD,LD,RegBeta,RegResi,SUMIF,get_indexdata_from_sql,timer,get_fama
class stAlpha(object):
def __init__(self,begin,end):
self.begin = begin
self.end = end
self.close = get_stockdata_from_sql(1,self.begin,self.end,'Close')
self.open = get_stockdata_from_sql(1,self.begin,self.end,'Open')
self.high = get_stockdata_from_sql(1,self.begin,self.end,'High')
self.low = get_stockdata_from_sql(1,self.begin,self.end,'Low')
self.volume = get_stockdata_from_sql(1,self.begin,self.end,'Vol')
self.amt = get_stockdata_from_sql(1,self.begin,self.end,'Amount')
self.vwap = get_stockdata_from_sql(1,self.begin,self.end,'Vwap')
self.ret = get_stockdata_from_sql(1,begin,end,'Pctchg')
self.close_index = get_indexdata_from_sql(1,begin,end,'close','000001.SH')
self.open_index = get_indexdata_from_sql(1,begin,end,'open','000001.SH')
# self.mkt = get_fama_from_sql()
@timer
def alpha1(self):
volume = self.volume
ln_volume = np.log(volume)
ln_volume_delta = Delta(ln_volume,1)
close = self.close
Open = self.open
price_temp = pd.concat([close,Open],axis = 1,join = 'outer')
price_temp['ret'] = (price_temp['Close'] - price_temp['Open'])/price_temp['Open']
del price_temp['Close'],price_temp['Open']
r_ln_volume_delta = Rank(ln_volume_delta)
r_ret = Rank(price_temp)
rank = pd.concat([r_ln_volume_delta,r_ret],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,6)
alpha = corr
alpha.columns = ['alpha1']
return alpha
@timer
def alpha2(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
temp['alpha'] = (2 * temp['Close'] - temp['Low'] - temp['High']) \
/ (temp['High'] - temp['Low'])
del temp['Close'],temp['Low'],temp['High']
alpha = -1 * Delta(temp,1)
alpha.columns = ['alpha2']
return alpha
@timer
def alpha3(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
close_delay = Delay(pd.DataFrame(temp['Close']),1)
close_delay.columns = ['close_delay']
temp = pd.concat([temp,close_delay],axis = 1,join = 'inner')
temp['min'] = Cross_max(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['Low']))
temp['max'] = Cross_min(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['High']))
temp['alpha_temp'] = 0
temp['alpha_temp'][temp['Close'] > temp['close_delay']] = temp['Close'] - temp['min']
temp['alpha_temp'][temp['Close'] < temp['close_delay']] = temp['Close'] - temp['max']
alpha = Sum(pd.DataFrame(temp['alpha_temp']),6)
alpha.columns = ['alpha3']
return alpha
@timer
def alpha4(self):
close = self.close
volume = self.volume
close_mean_2 = Mean(close,2)
close_mean_8 = Mean(close,8)
close_std = STD(close,8)
volume_mean_20 = Mean(volume,20)
data = pd.concat([close_mean_2,close_mean_8,close_std,volume_mean_20,volume],axis = 1,join = 'inner')
data.columns = ['close_mean_2','close_mean_8','close_std','volume_mean_20','volume']
data['alpha'] = -1
data['alpha'][data['close_mean_2'] < data['close_mean_8'] - data['close_std']] = 1
data['alpha'][data['volume']/data['volume_mean_20'] >= 1] = 1
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha4']
return alpha
@timer
def alpha5(self):
volume = self.volume
high = self.high
r1 = TsRank(volume,5)
r2 = TsRank(high,5)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(corr,5)
alpha.columns = ['alpha5']
return alpha
@timer
def alpha6(self):
Open = self.open
high = self.high
df = pd.concat([Open,high],axis = 1,join = 'inner')
df['price'] = df['Open'] * 0.85 + df['High'] * 0.15
df_delta = Delta(pd.DataFrame(df['price']),1)
alpha = Rank(np.sign(df_delta))
alpha.columns = ['alpha6']
return alpha
@timer
def alpha7(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_delta = Delta(volume,3)
data = pd.concat([close,vwap],axis = 1,join = 'inner')
data['diff'] = data['Vwap'] - data['Close']
r1 = Rank(TsMax(pd.DataFrame(data['diff']),3))
r2 = Rank(TsMin(pd.DataFrame(data['diff']),3))
r3 = Rank(volume_delta)
rank = pd.concat([r1,r2,r3],axis = 1,join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = (rank['r1'] + rank['r2'])* rank['r3']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha7']
return alpha
@timer
def alpha8(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
data_price = (data['High'] + data['Low'])/2 * 0.2 + data['Vwap'] * 0.2
data_price_delta = Delta(pd.DataFrame(data_price),4) * -1
alpha = Rank(data_price_delta)
alpha.columns = ['alpha8']
return alpha
@timer
def alpha9(self):
high = self.high
low = self.low
volume = self.volume
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['price']= (data['High'] + data['Low'])/2
data['price_delay'] = Delay(pd.DataFrame(data['price']),1)
alpha_temp = (data['price'] - data['price_delay']) * (data['High'] - data['Low'])/data['Vol']
alpha_temp_unstack = alpha_temp.unstack(level = 'ID')
alpha = alpha_temp_unstack.ewm(span = 7, ignore_na = True, min_periods = 7).mean()
alpha_final = alpha.stack()
alpha = pd.DataFrame(alpha_final)
alpha.columns = ['alpha9']
return alpha
@timer
def alpha10(self):
ret = self.ret
close = self.close
ret_std = STD(pd.DataFrame(ret),20)
ret_std.columns = ['ret_std']
data = pd.concat([ret,close,ret_std],axis = 1, join = 'inner')
temp1 = pd.DataFrame(data['ret_std'][data['Pctchg'] < 0])
temp2 = pd.DataFrame(data['Close'][data['Pctchg'] >= 0])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0,join = 'outer')
temp_order = pd.concat([data,temp],axis = 1)
temp_square = pd.DataFrame(np.power(temp_order['temp'],2))
alpha_temp = TsMax(temp_square,5)
alpha = Rank(alpha_temp)
alpha.columns = ['alpha10']
return alpha
@timer
def alpha11(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume],axis = 1,join = 'inner')
data_temp = (data['Close'] - data['Low']) -(data['High'] - data['Close'])\
/(data['High'] - data['Low']) * data['Vol']
alpha = Sum(pd.DataFrame(data_temp),6)
alpha.columns = ['alpha11']
return alpha
@timer
def alpha12(self):
Open = self.open
vwap = self.vwap
close = self.close
data = pd.concat([Open,vwap,close],axis = 1, join = 'inner')
data['p1'] = data['Open'] - Mean(data['Open'],10)
data['p2'] = data['Close'] - data['Vwap']
r1 = Rank(pd.DataFrame(data['p1']))
r2 = Rank(pd.DataFrame(np.abs(data['p2'])))
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
alpha = rank['r1'] - rank['r2']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha12']
return alpha
@timer
def alpha13(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
alpha = (data['High'] + data['Low'])/2 - data['Vwap']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha13']
return alpha
@timer
def alpha14(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close'] - data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha14']
return alpha
@timer
def alpha15(self):
Open = self.open
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([Open,close_delay],axis = 1,join = 'inner')
alpha = data['Open']/data['close_delay'] - 1
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha15']
return alpha
@timer
def alpha16(self):
vwap = self.vwap
volume = self.volume
data = pd.concat([vwap,volume],axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vol']))
r2 = Rank(pd.DataFrame(data['Vwap']))
rank = pd.concat([r1,r2],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(Rank(corr),5)
alpha.columns = ['alpha16']
return alpha
@timer
def alpha17(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close],axis = 1, join = 'inner')
data['vwap_max15'] = TsMax(data['Vwap'],15)
data['close_delta5'] = Delta(data['Close'],5)
temp = np.power(data['vwap_max15'],data['close_delta5'])
alpha = Rank(pd.DataFrame(temp))
alpha.columns = ['alpha17']
return alpha
@timer
def alpha18(self):
"""
this one is similar with alpha14
"""
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close']/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha18']
return alpha
@timer
def alpha19(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data['temp1'] = (data['Close'] - data['close_delay'])/data['close_delay']
data['temp2'] = (data['Close'] - data['close_delay'])/data['Close']
temp1 = pd.DataFrame(data['temp1'][data['Close'] < data['close_delay']])
temp2 = pd.DataFrame(data['temp2'][data['Close'] >= data['close_delay']])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0)
data = pd.concat([data,temp],axis = 1,join = 'outer')
alpha = pd.DataFrame(data['temp'])
alpha.columns = ['alpha19']
return alpha
@timer
def alpha20(self):
close = self.close
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha20']
return alpha
@timer
def alpha21(self):
close = self.close
close_mean = Mean(close,6)
alpha = RegBeta(0,close_mean,None,6)
alpha.columns = ['alpha21']
return alpha
@timer
def alpha22(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1,join = 'inner')
data.columns = ['close','close_mean']
temp = pd.DataFrame((data['close'] - data['close_mean'])/data['close_mean'])
temp_delay = Delay(temp,3)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
temp2 = pd.DataFrame(data_temp['temp'] - data_temp['temp_delay'])
alpha = SMA(temp2,12,1)
alpha.columns = ['alpha22']
return alpha
@timer
def alpha23(self):
close = self.close
close_std = STD(close,20)
close_delay = Delay(close,1)
data = pd.concat([close,close_std,close_delay],axis = 1, join = 'inner')
data.columns = ['Close','close_std','close_delay']
data['temp'] = data['close_std']
data['temp'][data['Close'] <= data['close_delay']] = 0
temp = pd.DataFrame(data['temp'])
sma1 = SMA(temp,20,1)
sma2 = SMA(pd.DataFrame(data['close_std']),20,1)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'])
alpha.columns = ['alpha23']
return alpha
@timer
def alpha24(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis=1 ,join = 'inner' )
temp = data['Close'] - data['close_delay']
temp = pd.DataFrame(temp)
alpha = SMA(temp,5,1)
alpha.columns = ['alpha24']
return alpha
@timer
def alpha25(self):
close = self.close
close_delta = Delta(close,7)
ret = self.ret
r1 = Rank(close_delta)
r3 = Rank(Sum(ret,250))
volume = self.volume
volume_mean = Mean(pd.DataFrame(volume['Vol']),20)
volume_mean.columns = ['volume_mean']
data = pd.concat([volume,volume_mean],axis = 1,join = 'inner')
temp0 = pd.DataFrame(data['Vol']/data['volume_mean'])
temp = DecayLinear(temp0,9)
r2 = Rank(temp)
rank = pd.concat([r1,r2,r3],axis = 1, join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = pd.DataFrame(-1 * rank['r1'] * (1 - rank['r2']) * rank['r3'])
alpha.columns = ['alpha25']
return alpha
@timer
def alpha26(self):
close = self.close
vwap = self.vwap
close_mean7 = Mean(close,7)
close_mean7.columns = ['close_mean7']
close_delay5 = Delay(close,5)
close_delay5.columns = ['close_delay5']
data = pd.concat([vwap,close_delay5],axis = 1,join = 'inner')
corr = Corr(data,230)
corr.columns = ['corr']
data_temp = pd.concat([corr,close_mean7,close],axis = 1,join = 'inner')
alpha = data_temp['close_mean7'] - data_temp['Close'] + data_temp['corr']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha26']
return alpha
@timer
def alpha27(self):
"""
uncompleted
"""
close = self.close
close_delay3 = Delay(close,3)
close_delay6 = Delay(close,6)
data = pd.concat([close,close_delay3,close_delay6],axis = 1,join = 'inner')
data.columns = ['close','close_delay3','close_delay6']
temp1 = pd.DataFrame((data['close'] - data['close_delay3'])/data['close_delay3'] * 100)
temp2 = pd.DataFrame((data['close'] - data['close_delay6'])/data['close_delay6'] * 100)
data_temp = pd.concat([temp1,temp2],axis = 1,join = 'inner')
data_temp.columns = ['temp1','temp2']
temp = pd.DataFrame(data_temp['temp1'] + data_temp['temp2'])
alpha = DecayLinear(temp,12)
alpha.columns = ['alpha27']
return alpha
@timer
def alpha28(self):
close = self.close
low = self.low
high = self.high
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['Close','low_min','high_max']
temp1 = pd.DataFrame((data['Close'] - data['low_min']) /(data['high_max'] - data['low_min']))
sma1 = SMA(temp1,3,1)
sma2 = SMA(sma1,3,1)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1'] * 2 - sma['sma2'] * 3)
alpha.columns = ['alpha28']
return alpha
@timer
def alpha29(self):
close = self.close
volume = self.volume
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay'] * data['Vol']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha29']
return alpha
@timer
def alpha30(self):
close = self.close
close_delay = Delay(close,1)
@timer
def alpha31(self):
close = self.close
close_delay = Delay(close,12)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha31']
return alpha
@timer
def alpha32(self):
volume = self.volume
high = self.high
r1 = Rank(volume)
r2 = Rank(high)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
corr = Corr(rank,3)
r = Rank(corr)
alpha = -1 * Sum(r,3)
alpha.columns = ['alpha32']
return alpha
@timer
def alpha33(self):
low = self.low
volume = self.volume
ret = self.ret
low_min = TsMin(low,5)
low_min_delay = Delay(low_min,5)
data1 = pd.concat([low_min,low_min_delay],axis = 1,join = 'inner')
data1.columns = ['low_min','low_min_delay']
ret_sum240 = Sum(ret,240)
ret_sum20 = Sum(ret,20)
ret_temp = pd.concat([ret_sum240,ret_sum20],axis = 1, join = 'inner')
ret_temp.columns = ['ret240','ret20']
temp1 = pd.DataFrame(data1['low_min_delay'] - data1['low_min'])
temp2 = pd.DataFrame((ret_temp['ret240'] - ret_temp['ret20'])/220)
r_temp2 = Rank(temp2)
r_volume = TsRank(volume,5)
temp = pd.concat([temp1,r_temp2,r_volume],axis = 1,join = 'inner')
temp.columns = ['temp1','r_temp2','r_volume']
alpha = temp['temp1'] * temp['r_temp2'] * temp['r_volume']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha33']
return alpha
@timer
def alpha34(self):
close = self.close
close_mean = Mean(close,12)
close_mean.columns = ['close_mean']
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
alpha = pd.DataFrame(data['close_mean']/data['Close'])
alpha.columns = ['alpha34']
return alpha
@timer
def alpha35(self):
volume = self.volume
Open = self.open
open_delay = Delay(Open,1)
open_delay.columns = ['open_delay']
open_linear = DecayLinear(Open,17)
open_linear.columns = ['open_linear']
open_delay_temp = DecayLinear(open_delay,15)
r1 = Rank(open_delay_temp)
data = pd.concat([Open,open_linear],axis = 1,join = 'inner')
Open_temp = data['Open'] * 0.65 + 0.35 * data['open_linear']
rank = pd.concat([volume,Open_temp],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,7)
r2 = Rank(-1 * corr)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = Cross_min(pd.DataFrame(r['r1']),pd.DataFrame(r['r2']))
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha35']
return alpha
@timer
def alpha36(self):
volume = self.volume
vwap = self.vwap
r1 = Rank(volume)
r2 = Rank(vwap)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
corr = Corr(rank,6)
temp = Sum(corr,2)
alpha = Rank(temp)
alpha.columns = ['alpha36']
return alpha
@timer
def alpha37(self):
Open = self.open
ret = self.ret
open_sum = Sum(Open,5)
ret_sum = Sum(ret,5)
data = pd.concat([open_sum,ret_sum],axis = 1,join = 'inner')
data.columns = ['open_sum','ret_sum']
temp = data['open_sum'] * data['ret_sum']
temp_delay = Delay(temp,10)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
alpha = -1 * Rank(pd.DataFrame(data_temp['temp'] - data_temp['temp_delay']))
alpha.columns = ['alpha37']
return alpha
@timer
def alpha38(self):
high = self.high
high_mean = Mean(high,20)
high_delta = Delta(high,2)
data = pd.concat([high,high_mean,high_delta],axis = 1,join = 'inner')
data.columns = ['high','high_mean','high_delta']
data['alpha'] = -1 * data['high_delta']
data['alpha'][data['high_mean'] >= data['high']] = 0
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha38']
return alpha
@timer
def alpha39(self):
close = self.close
Open = self.open
vwap = self.vwap
volume = self.volume
close_delta2 = Delta(close,2)
close_delta2_decay = DecayLinear(close_delta2,8)
r1 = Rank(close_delta2_decay)
price_temp = pd.concat([vwap,Open],axis = 1,join = 'inner')
price = pd.DataFrame(price_temp['Vwap'] * 0.3 + price_temp['Open'] * 0.7)
volume_mean = Mean(volume,180)
volume_mean_sum = Sum(volume_mean,37)
rank = pd.concat([price,volume_mean_sum],axis = 1,join = 'inner')
corr = Corr(rank,14)
corr_decay = DecayLinear(corr,12)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r2'] - r['r1'])
alpha.columns = ['alpha39']
return alpha
@timer
def alpha40(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
data = pd.concat([close,volume,close_delay],axis = 1, join = 'inner')
data.columns = ['close','volume','close_delay']
data['temp1'] = data['volume']
data['temp2'] = data['volume']
data['temp1'][data['close'] <= data['close_delay']] = 0
data['temp2'][data['close'] > data['close_delay']] = 0
s1 = Sum(pd.DataFrame(data['temp1']),26)
s2 = Sum(pd.DataFrame(data['temp2']),26)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'] * 100)
alpha.columns = ['alpha40']
return alpha
@timer
def alpha41(self):
vwap = self.vwap
vwap_delta = Delta(vwap,3)
vwap_delta_max = TsMax(vwap_delta,5)
alpha = -1 * Rank(vwap_delta_max)
alpha.columns = ['alpha41']
return alpha
@timer
def alpha42(self):
high = self.high
volume = self.volume
high_std = STD(high,10)
r1 = Rank(high_std)
data = pd.concat([high,volume],axis = 1,join = 'inner')
corr = Corr(data,10)
r = pd.concat([r1,corr],axis = 1,join = 'inner')
r.columns = ['r1','corr']
alpha = pd.DataFrame(-1 * r['r1'] * r['corr'])
alpha.columns = ['alpha42']
return alpha
@timer
def alpha43(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,6)
alpha.columns = ['alpha43']
return alpha
@timer
def alpha44(self):
volume = self.volume
vwap = self.vwap
low = self.low
volume_mean = Mean(volume,10)
rank = pd.concat([low,volume_mean],axis = 1,join = 'inner')
corr = Corr(rank,7)
corr_decay = DecayLinear(corr,6)
r1 = TsRank(corr_decay,4)
vwap_delta = Delta(vwap,3)
vwap_delta_decay = DecayLinear(vwap_delta,10)
r2 = TsRank(vwap_delta_decay,15)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha =
|
pd.DataFrame(r['r1'] + r['r2'])
|
pandas.DataFrame
|
from datetime import datetime
import json
from os.path import join, exists
from tempfile import TemporaryDirectory
import numpy as np
import pandas as pd
from delphi_utils import read_params
from delphi_cdc_covidnet.update_sensor import update_sensor
params = read_params()
STATIC_DIR = params["static_file_dir"]
class TestUpdateSensor:
def test_syn_update_sensor(self):
with TemporaryDirectory() as temp_dir:
# Create synthetic data
state_1 = {"datadownload": [
{
"catchment": "California", "network": "Network A", "age_category": "Overall",
"year": "2020", "mmwr-year": "2020", "mmwr-week": "10",
"cumulative-rate": 2.5, "weekly-rate": 0.7
}, {
"catchment": "California", "network": "Network A", "age_category": "Overall",
"year": "2020", "mmwr-year": "2020", "mmwr-week": "11",
"cumulative-rate": 3.5, "weekly-rate": 1.4
}, {
"catchment": "California", "network": "Network A", "age_category": "Overall",
"year": "2020", "mmwr-year": "2020", "mmwr-week": "12",
"cumulative-rate": 4.2, "weekly-rate": 1.9
}]}
state_2 = {"datadownload": [
{
"catchment": "Pennsylvania", "network": "Network B", "age_category": "Overall",
"year": "2020", "mmwr-year": "2020", "mmwr-week": "10",
"cumulative-rate": 10.3, "weekly-rate": 0.9
}, {
"catchment": "Pennsylvania", "network": "Network B", "age_category": "Overall",
"year": "2020", "mmwr-year": "2020", "mmwr-week": "11",
"cumulative-rate": 11.2, "weekly-rate": 4.5
}, {
"catchment": "Pennsylvania", "network": "Network B", "age_category": "Overall",
"year": "2020", "mmwr-year": "2020", "mmwr-week": "12",
"cumulative-rate": 11.8, "weekly-rate": 1.2
}]}
state_files = [join(temp_dir, state) for state in ["state_1.json", "state_2.json"]]
with open(state_files[0], "w") as f_json:
json.dump(state_1, f_json)
with open(state_files[1], "w") as f_json:
json.dump(state_2, f_json)
for state_file in state_files:
assert exists(state_file)
mmwr_info = pd.DataFrame([
{
"mmwrid": 3036, "weekend": "2020-03-07", "weeknumber": 10,
"weekstart": "2020-03-01", "year": 2020, "seasonid": 59
}, {
"mmwrid": 3037, "weekend": "2020-03-14", "weeknumber": 11,
"weekstart": "2020-03-08", "year": 2020, "seasonid": 59
}, {
"mmwrid": 3038, "weekend": "2020-03-21", "weeknumber": 12,
"weekstart": "2020-03-15", "year": 2020, "seasonid": 59
}])
mmwr_info["weekstart"] = pd.to_datetime(mmwr_info["weekstart"])
mmwr_info["weekend"] =
|
pd.to_datetime(mmwr_info["weekend"])
|
pandas.to_datetime
|
"""
@ author: Maiyaozong
@ date: 2020.12.02
@ function: 打标,创建 BCG_down,ECG_down等
@ output: BCG_down ECG_down label_down (按10s对齐)
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from Preprocessing import BCG_Operation
import os
data_dir = "D:/研究生/数据统计/玮玮师姐/对齐/result201912/新增数据/" #修改这个路径就可以了
def read_data(test_person,posture):
"""
:param test_person: 测试者名字(文件名)
:param posture: 测试者姿势
:return: BCG,ECG,RR location_R,location_J
"""
filename = data_dir
file_dir = os.path.join(filename, test_person)
file_dir = os.path.join(file_dir, posture)
BCG_dir = os.path.join(file_dir, "new_orgData.txt")
ECG_dir = os.path.join(file_dir, "new_ECG.txt")
# RR_dir = os.path.join(file_dir, "RR.txt")
location_R_dir = os.path.join(file_dir,"location_R.txt")
location_J_dir = os.path.join(file_dir, "location_J.txt")
# label_dir = os.path.join(file_dir,"label.txt")
BCG = np.array(pd.read_csv(BCG_dir, header=None)).reshape(-1)
ECG = np.array(pd.read_csv(ECG_dir, header=None)).reshape(-1)
# RR = np.array(pd.read_csv(RR_dir, delimiter='\t', header=None)).reshape(-1)
location_R = np.array(pd.read_csv(location_R_dir)["Rpeak"]).reshape(-1)
location_J = np.array(pd.read_csv(location_J_dir)["Jpeak"]).reshape(-1)
# label = np.array(pd.read_csv(label_dir, header=None).reshape(-1))
return BCG, ECG, location_R,location_J
def get_Rpeak(test_person, posture, d=0, sample=1000, write_data=False):
"""
读取文件中的R峰坐标
:param lable_dir: 文件地址
:param d: 读取第d列的数据
:param sample: 采样频率
:param write_data: 是否输出csv文件
:param file_name: 文件名
:return: R峰位置
"""
# ecg_lable = pd.read_csv(lable_dir,encoding='utf-8',sep='\s+',error_bad_lines=False)['Time'].to_numpy().reshape(-1)
lable_dir = data_dir
file_dir = os.path.join(lable_dir, test_person)
file_dir = os.path.join(file_dir, posture)
ECGlabel_dir = os.path.join(file_dir, "label.txt")
location_R_dir = data_dir+"/%s/%s/location_R.txt" % (test_person, posture)
print(location_R_dir)
ecg_lable = []
f = open(ECGlabel_dir, encoding='gbk') # 读取含有中文的txt文件时,需要加上encoding='utf-8'或者其他类型
for line in f.readlines()[1:]:
c = line.strip().split() # strip()是删除文本的前后空格,split()是把每个字符按空格分割,组成一个列表
e = c[d] # c是一个list,c[1]代表读取第一列的数据
ecg_lable.append(int(float(e) * sample))
ecg_lable.sort(key=None, reverse=False)
if write_data:
data = {
"Rpeak": ecg_lable
}
pd.DataFrame(data).to_csv(location_R_dir, index=False)
# print(ecg_lable)
return ecg_lable
def find_TPeak(data, peaks, th=50):
"""
找出真实的J峰或R峰
:param data: BCG或ECG数据
:param peaks: 初步峰值(从label中导出的location_R)
:param th: 范围阈值
:return: 真实峰值
"""
return_peak = []
for peak in peaks:
if peak > len(data): continue
min_win, max_win = max(0, int(peak - th)), min(len(data), int(peak + th))
return_peak.append(np.argmax(data[min_win:max_win]) + min_win)
return return_peak
if __name__ == '__main__':
write_data = 0 #write_data = 1 输出标签
write_data1 = 0 #write_data1 = 1 输出BCG和ECG降采样的数据,一般两个都要改成1,同时输出
test_person = "MAJIAXIN"
posture = "YOU"
location_R_dir = data_dir+"%s/%s/location_R_down.txt" % (test_person, posture)
location_J_dir = data_dir+"%s/%s/location_J_down.txt" % (test_person, posture)
orgData_down_dir = data_dir+"%s/%s/orgData_down.txt" % (test_person, posture)
label_down_dir = data_dir+"%s/%s/label_down.txt" % (test_person, posture)
ECG_down_dir = data_dir+"%s/%s/ECG_down.txt" % (test_person, posture)
preprocessing = BCG_Operation(sample_rate=100)
preprocessing_donw = BCG_Operation(sample_rate=1000)
BCG, ECG, location_R,location_J = read_data(test_person,posture)
BCG_down = preprocessing_donw.down_sample(BCG, down_radio=10)
ECG_down = preprocessing_donw.down_sample(ECG, down_radio=10)
data_end = len(ECG_down) // 1000 * 1000
BCG_down1 = BCG_down[:data_end]
ECG_down1 = ECG_down[:data_end]
print("BCG长度:",BCG.shape,ECG.shape,"BCG_down长度:",BCG_down1.shape,ECG_down1.shape)
print("时长:",len(BCG)/60000,"min")
if write_data1:
pd.DataFrame(BCG_down1.reshape(-1)).to_csv(orgData_down_dir, index=False,header=None)
pd.DataFrame(ECG_down1.reshape(-1)).to_csv(ECG_down_dir, index=False, header=None)
# location_J = np.array([num for num in location_J if num < len(BCG)])
print(location_J.shape,location_R.shape)
# location_R = np.array([num for num in location_R if num < len(ECG)])
location_J = location_J // 10
location_R = location_R // 10
location_R_down = find_TPeak(ECG_down1, location_R, th=3)
location_J_down = find_TPeak(BCG_down1, location_J, th=3) #再次找降采样之后的J峰
location_R_down = np.array(location_R_down)
location_J_down = np.array(location_J_down)
print(location_J_down.shape, location_R_down.shape)
if write_data:
pd.DataFrame(location_R_down.reshape(-1)).to_csv(location_R_dir, index=False,header=None)
pd.DataFrame(location_J_down.reshape(-1)).to_csv(location_J_dir, index=False, header=None)
bcg = preprocessing.Butterworth(BCG_down1, "bandpass", low_cut=1, high_cut=10, order=2)
label = np.zeros(len(BCG_down))
for J in location_J_down:
if J < 3 or J > len(BCG_down) - 3:
continue
label[J - 3:J + 3] = 1
if write_data:
|
pd.DataFrame(label)
|
pandas.DataFrame
|
"""
Functions for comparing and visualizing model performance
"""
import os
import sys
import pdb
import pandas as pd
import numpy as np
import matplotlib
import logging
import json
from collections import OrderedDict
from atomsci.ddm.utils import datastore_functions as dsf
from atomsci.ddm.pipeline import mlmt_client_wrapper as mlmt_client_wrapper
from atomsci.ddm.pipeline import model_tracker as trkr
import atomsci.ddm.pipeline.model_pipeline as mp
#matplotlib.style.use('ggplot')
matplotlib.rc('xtick', labelsize=12)
matplotlib.rc('ytick', labelsize=12)
matplotlib.rc('axes', labelsize=12)
logging.basicConfig(format='%(asctime)-15s %(message)s')
nan = np.float32('nan')
client_wrapper = mlmt_client_wrapper.MLMTClientWrapper(ds_client=dsf.config_client())
client_wrapper.instantiate_mlmt_client()
#------------------------------------------------------------------------------------------------------------------
def get_collection_datasets(collection_name):
"""
Returns a list of training (dataset_key, bucket) tuples for models in the given collection.
"""
model_filter = {}
#models = list(trkr.get_full_metadata(model_filter, client_wrapper,
# collection_name=collection_name))
#if models == []:
# print("No matching models returned")
# return
#else:
# print("Found %d matching models" % len(models))
dataset_set = set()
models = trkr.get_metadata(model_filter, client_wrapper,
collection_name=collection_name)
for i, metadata_dict in enumerate(models):
if i % 10 == 0:
print("Looking at model %d" % i)
dataset_key = metadata_dict['ModelMetadata']['TrainingDataset']['dataset_key']
bucket = metadata_dict['ModelMetadata']['TrainingDataset']['bucket']
dataset_set.add((dataset_key, bucket))
return sorted(dataset_set)
#------------------------------------------------------------------------------------------------------------------
def extract_collection_perf_metrics(collection_name, output_dir, pred_type='regression'):
"""
Obtain list of training datasets with models in the given collection. Get performance metrics for
models on each dataset and save them as CSV files in the given output directory.
"""
datasets = get_collection_datasets(collection_name)
os.makedirs(output_dir, exist_ok=True)
for dset_key, bucket in datasets:
dset_perf_df = get_training_perf_table(dset_key, bucket, collection_name, pred_type=pred_type)
dset_perf_file = '%s/%s_%s_model_perf_metrics.csv' % (output_dir, os.path.basename(dset_key).replace('.csv', ''), collection_name)
dset_perf_df.to_csv(dset_perf_file, index=False)
print('Wrote file %s' % dset_perf_file)
#------------------------------------------------------------------------------------------------------------------
def get_training_perf_table(dataset_key, bucket, collection_name, pred_type='regression', other_filters = {}):
"""
Load performance metrics from model tracker for all models saved in the model tracker DB under
a given collection that were trained against a particular dataset. Identify training parameters
that vary between models, and generate plots of performance vs particular combinations of
parameters.
"""
model_filter = {"ModelMetadata.TrainingDataset.dataset_key" : dataset_key,
"ModelMetadata.TrainingDataset.bucket" : bucket,
"ModelMetrics.TrainingRun.label" : "best",}
model_filter.update(other_filters)
print("Finding models trained on %s dataset %s" % (bucket, dataset_key))
models = list(trkr.get_full_metadata(model_filter, client_wrapper,
collection_name=collection_name))
if models == []:
print("No matching models returned")
return
else:
print("Found %d matching models" % len(models))
model_uuid_list = []
model_type_list = []
max_epochs_list = []
learning_rate_list = []
dropouts_list = []
layer_sizes_list = []
featurizer_list = []
splitter_list = []
rf_estimators_list = []
rf_max_features_list = []
rf_max_depth_list = []
xgb_learning_rate_list = []
xgb_gamma_list = []
best_epoch_list = []
max_epochs_list = []
subsets = ['train', 'valid', 'test']
score_dict = {}
for subset in subsets:
score_dict[subset] = []
if pred_type == 'regression':
metric_type = 'r2_score'
else:
metric_type = 'roc_auc_score'
for metadata_dict in models:
model_uuid = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_uuid)
# Get model metrics for this model
metrics_dicts = metadata_dict['ModelMetrics']['TrainingRun']
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
if len(metrics_dicts) < 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
continue
# TODO: get_full_metadata() seems to ignore label='best' constraint; below is workaround
#if len(metrics_dicts) > 3:
# raise Exception('Got more than one set of best epoch metrics for model %s' % model_uuid)
subset_metrics = {}
for metrics_dict in metrics_dicts:
if metrics_dict['label'] == 'best':
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['PredictionResults']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['ModelMetadata']['ModelParameters']
model_type = model_params['model_type']
model_type_list.append(model_type)
featurizer = model_params['featurizer']
featurizer_list.append(featurizer)
split_params = metadata_dict['ModelMetadata']['SplittingParameters']['Splitting']
splitter_list.append(split_params['splitter'])
dataset_key = metadata_dict['ModelMetadata']['TrainingDataset']['dataset_key']
if model_type == 'NN':
nn_params = metadata_dict['ModelMetadata']['NNSpecific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
rf_estimators_list.append(nan)
rf_max_features_list.append(nan)
rf_max_depth_list.append(nan)
xgb_learning_rate_list.append(nan)
xgb_gamma_list.append(nan)
if model_type == 'RF':
rf_params = metadata_dict['ModelMetadata']['RFSpecific']
rf_estimators_list.append(rf_params['rf_estimators'])
rf_max_features_list.append(rf_params['rf_max_features'])
rf_max_depth_list.append(rf_params['rf_max_depth'])
max_epochs_list.append(nan)
best_epoch_list.append(nan)
learning_rate_list.append(nan)
layer_sizes_list.append(nan)
dropouts_list.append(nan)
xgb_learning_rate_list.append(nan)
xgb_gamma_list.append(nan)
if model_type == 'xgboost':
xgb_params = metadata_dict['ModelMetadata']['xgbSpecific']
rf_estimators_list.append(nan)
rf_max_features_list.append(nan)
rf_max_depth_list.append(nan)
max_epochs_list.append(nan)
best_epoch_list.append(nan)
learning_rate_list.append(nan)
layer_sizes_list.append(nan)
dropouts_list.append(nan)
xgb_learning_rate_list.append(xgb_params["xgb_learning_rate"])
xgb_gamma_list.append(xgb_params["xgb_gamma"])
for subset in subsets:
score_dict[subset].append(subset_metrics[subset][metric_type])
perf_df = pd.DataFrame(dict(
model_uuid=model_uuid_list,
model_type=model_type_list,
dataset_key=dataset_key,
featurizer=featurizer_list,
splitter=splitter_list,
max_epochs=max_epochs_list,
best_epoch=best_epoch_list,
learning_rate=learning_rate_list,
layer_sizes=layer_sizes_list,
dropouts=dropouts_list,
rf_estimators=rf_estimators_list,
rf_max_features=rf_max_features_list,
rf_max_depth=rf_max_depth_list,
xgb_learning_rate = xgb_learning_rate_list,
xgb_gamma = xgb_gamma_list))
for subset in subsets:
metric_col = '%s_%s' % (metric_type, subset)
perf_df[metric_col] = score_dict[subset]
sort_metric = '%s_valid' % metric_type
perf_df = perf_df.sort_values(sort_metric, ascending=False)
return perf_df
# ------------------------------------------------------------------------------------------------------------------
def get_best_perf_table(col_name, metric_type, model_uuid=None, metadata_dict=None, PK_pipe=False):
"""
Load performance metrics from model tracker for all models saved in the model tracker DB under
a given collection that were trained against a particular dataset. Identify training parameters
that vary between models, and generate plots of performance vs particular combinations of
parameters.
"""
if metadata_dict is None:
if model_uuid is None:
print("Have to specify either metadatadict or model_uuid")
return
# Right now this subsetting of metrics does not work, so need to do manually below.
model_filter = {"model_uuid": model_uuid,
# "ModelMetrics.TrainingRun.label" : "best"
}
models = list(trkr.get_full_metadata(model_filter, client_wrapper, collection_name=col_name))
if models == []:
print("No matching models returned")
return
elif len(models) > 1:
print("Found %d matching models, which is too many" % len(models))
return
metadata_dict = models[0]
model_info = {}
model_info['model_uuid'] = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_info['model_uuid'])
# Get model metrics for this model
metrics_dicts = metadata_dict['ModelMetrics']['TrainingRun']
# workaround for now
# metrics_dicts = [m for m in metrics_dicts if m['label'] == 'best']
# print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
if len(metrics_dicts) < 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
return
if len(metrics_dicts) > 3:
metrics_dicts = [m for m in metrics_dicts if m['label'] == 'best']
# raise Exception('Got more than one set of best epoch metrics for model %s' % model_uuid)
model_params = metadata_dict['ModelMetadata']['ModelParameters']
model_info['model_type'] = model_params['model_type']
model_info['featurizer'] = model_params['featurizer']
split_params = metadata_dict['ModelMetadata']['SplittingParameters']['Splitting']
model_info['splitter'] = split_params['splitter']
if 'split_uuid' in split_params:
model_info['split_uuid'] = split_params['split_uuid']
model_info['dataset_key'] = metadata_dict['ModelMetadata']['TrainingDataset']['dataset_key']
model_info['bucket'] = metadata_dict['ModelMetadata']['TrainingDataset']['bucket']
if PK_pipe:
model_info['collection_name']=col_name
model_info['assay_name'] = metadata_dict['ModelMetadata']['TrainingDataset']['DatasetMetadata'][
'assay_category']
model_info['response_col'] = metadata_dict['ModelMetadata']['TrainingDataset']['DatasetMetadata'][
'response_col']
if model_info['featurizer'] == 'descriptors':
model_info['descriptor_type'] = metadata_dict['ModelMetadata']['DescriptorSpecific']['descriptor_type']
else:
model_info['descriptor_type'] = 'N/A'
try:
model_info['descriptor_type'] = metadata_dict['ModelMetadata']['DescriptorSpecific']['descriptor_type']
except:
model_info['descriptor_type'] = None
try:
model_info['num_samples'] = metadata_dict['ModelMetadata']['TrainingDataset']['DatasetMetadata']['num_row']
except:
tmp_df = dsf.retrieve_dataset_by_datasetkey(model_info['dataset_key'], model_info['bucket'])
model_info['num_samples'] = tmp_df.shape[0]
if model_info['model_type'] == 'NN':
nn_params = metadata_dict['ModelMetadata']['NNSpecific']
model_info['max_epochs'] = nn_params['max_epochs']
model_info['best_epoch'] = nn_params['best_epoch']
model_info['learning_rate'] = nn_params['learning_rate']
model_info['layer_sizes'] = ','.join(['%d' % s for s in nn_params['layer_sizes']])
model_info['dropouts'] = ','.join(['%.2f' % d for d in nn_params['dropouts']])
model_info['rf_estimators'] = nan
model_info['rf_max_features'] = nan
model_info['rf_max_depth'] = nan
if model_info['model_type'] == 'RF':
rf_params = metadata_dict['ModelMetadata']['RFSpecific']
model_info['rf_estimators'] = rf_params['rf_estimators']
model_info['rf_max_features'] = rf_params['rf_max_features']
model_info['rf_max_depth'] = rf_params['rf_max_depth']
model_info['max_epochs'] = nan
model_info['best_epoch'] = nan
model_info['learning_rate'] = nan
model_info['layer_sizes'] = nan
model_info['dropouts'] = nan
for metrics_dict in metrics_dicts:
subset = metrics_dict['subset']
metric_col = '%s_%s' % (metric_type, subset)
model_info[metric_col] = metrics_dict['PredictionResults'][metric_type]
metric_col = 'rms_score_%s' % subset
model_info[metric_col] = metrics_dict['PredictionResults']['rms_score']
return model_info
# ---------------------------------------------------------------------------------------------------------
def get_best_models_info(col_names, bucket, pred_type, PK_pipeline=False, output_dir='/usr/local/data',
shortlist_key=None, input_dset_keys=None, save_results=False, subset='valid',
metric_type=None, selection_type='max', other_filters={}):
"""
Get results for models in the given collection.
"""
top_models_info = []
if metric_type is None:
if pred_type == 'regression':
metric_type = 'r2_score'
else:
metric_type = 'roc_auc_score'
if other_filters is None:
other_filters = {}
if type(col_names) == str:
col_names = [col_names]
for col_name in col_names:
res_dir = os.path.join(output_dir, '%s_perf' % col_name)
plt_dir = '%s/Plots' % res_dir
os.makedirs(plt_dir, exist_ok=True)
res_files = os.listdir(res_dir)
suffix = '_%s_model_perf_metrics.csv' % col_name
if input_dset_keys is None:
dset_keys = dsf.retrieve_dataset_by_datasetkey(shortlist_key, bucket)
# Need to figure out how to handle an unknown column name for dataset_keys
if 'dataset_key' in dset_keys.columns:
dset_keys = dset_keys['dataset_key']
elif 'task_name' in dset_keys.columns:
dset_keys = dset_keys['task_name']
else:
dset_keys = dset_keys.values
else:
if type(input_dset_keys) == str:
dset_keys = [input_dset_keys]
else:
dset_keys = input_dset_keys
for dset_key in dset_keys:
dset_key = dset_key.strip()
try:
# TODO: get dataset bucket
model_filter = {"ModelMetadata.TrainingDataset.dataset_key": dset_key,
"ModelMetadata.TrainingDataset.bucket": bucket,
"ModelMetrics.TrainingRun.label": "best",
'ModelMetrics.TrainingRun.subset': subset,
'ModelMetrics.TrainingRun.PredictionResults.%s' % metric_type: [selection_type, None]
}
model_filter.update(other_filters)
try:
models = list(trkr.get_full_metadata(model_filter, client_wrapper, collection_name=col_name))
except Exception as e:
print("Error returned when querying the best model for dataset %s" % dset_key)
print(e)
continue
if models == []:
#print("No matching models returned for dset_key {0} and bucket {1}".format(dset_key, bucket))
continue
elif len(models) > 1:
print("Found %d models with the same %s value, saving all." % (len(models), metric_type))
for model in models:
res_df = pd.DataFrame.from_records(
[get_best_perf_table(col_name, metric_type, metadata_dict=model, PK_pipe=PK_pipeline)])
top_models_info.append(res_df)
except Exception as e:
print(e)
continue
if top_models_info == []:
print("No metadata found")
return
top_models_df = pd.concat(top_models_info, ignore_index=True)
selection_col = '%s_%s' % (metric_type, subset)
if selection_type == 'max':
top_models_df = top_models_df.loc[top_models_df.groupby('dataset_key')[selection_col].idxmax()]
else:
top_models_df = top_models_df.loc[top_models_df.groupby('dataset_key')[selection_col].idxmin()]
#TODO: Update res_dirs
if save_results:
if shortlist_key is not None:
# Not including shortlist key right now because some are weirdly formed and have .csv in the middle
top_models_df.to_csv(os.path.join(res_dir, 'best_models_metadata.csv'), index=False)
else:
for dset_key in input_dset_keys:
shortened_key = dset_key.rstrip('.csv')
top_models_df.to_csv(os.path.join(res_dir, 'best_models_metadata_%s.csv' % shortened_key), index=False)
return top_models_df
'''
#---------------------------------------------------------------------------------------------------------
def get_best_grouped_models_info(collection='pilot_fixed', pred_type='regression', top_n=1, subset='test'):
"""
Get results for models in the given collection.
"""
res_dir = '/usr/local/data/%s_perf' % collection
plt_dir = '%s/Plots' % res_dir
os.makedirs(plt_dir, exist_ok=True)
res_files = os.listdir(res_dir)
suffix = '_%s_model_perf_metrics.csv' % collection
if pred_type == 'regression':
metric_type = 'r2_score'
else:
metric_type = 'roc_auc_score'
for res_file in res_files:
try:
if not res_file.endswith(suffix):
continue
res_path = os.path.join(res_dir, res_file)
res_df = pd.read_csv(res_path, index_col=False)
res_df['combo'] = ['%s/%s' % (m,f) for m, f in zip(res_df.model_type.values, res_df.featurizer.values)]
dset_name = res_file.replace(suffix, '')
datasets.append(dset_name)
res_df['dataset'] = dset_name
print(dset_name)
res_df = res_df.sort_values('{0}_{1}'.format(metric_type, subset), ascending=False)
res_df['model_type/feat'] = ['%s/%s' % (m,f) for m, f in zip(res_df.model_type.values, res_df.featurizer.values)]
res_df = res_df.sort_values('{0}_{1}'.format(metric_type, subset), ascending=False)
grouped_df = res_df.groupby('model_type/feat').apply(
lambda t: t.head(top_n)
).reset_index(drop=True)
top_grouped_models.append(grouped_df)
top_combo = res_df['model_type/feat'].values[0]
top_combo_dsets.append(top_combo + dset_name.lstrip('ATOM_GSK_dskey'))
top_score = res_df['{0}_{1}'.format(metric_type, subset)].values[0]
top_model_feat.append(top_combo)
top_scores.append(top_score)
num_samples.append(res_df['Dataset Size'][0])
'''
#------------------------------------------------------------------------------------------------------------------
def get_umap_nn_model_perf_table(dataset_key, bucket, collection_name, pred_type='regression'):
"""
Load performance metrics from model tracker for all NN models with the given prediction_type saved in
the model tracker DB under a given collection that were trained against a particular dataset. Show
parameter settings for UMAP transformer for models where they are available.
"""
model_filter = {"ModelMetadata.TrainingDataset.dataset_key" : dataset_key,
"ModelMetadata.TrainingDataset.bucket" : bucket,
"ModelMetrics.TrainingRun.label" : "best",
"ModelMetadata.ModelParameters.model_type" : "NN",
"ModelMetadata.ModelParameters.prediction_type" : pred_type
}
print("Finding models trained on %s dataset %s" % (bucket, dataset_key))
models = list(trkr.get_full_metadata(model_filter, client_wrapper,
collection_name=collection_name))
if models == []:
print("No matching models returned")
return
else:
print("Found %d matching models" % len(models))
model_uuid_list = []
learning_rate_list = []
dropouts_list = []
layer_sizes_list = []
featurizer_list = []
best_epoch_list = []
max_epochs_list = []
feature_transform_type_list = []
umap_dim_list = []
umap_targ_wt_list = []
umap_neighbors_list = []
umap_min_dist_list = []
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
sort_metric = 'r2_score'
metrics = ['r2_score', 'rms_score', 'mae_score']
else:
sort_metric = 'roc_auc_score'
metrics = ['roc_auc_score', 'prc_auc_score', 'matthews_cc', 'kappa', 'confusion_matrix']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = []
for metadata_dict in models:
model_uuid = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_uuid)
# Get model metrics for this model
metrics_dicts = metadata_dict['ModelMetrics']['TrainingRun']
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
if len(metrics_dicts) < 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
continue
if len(metrics_dicts) > 3:
raise Exception('Got more than one set of best epoch metrics for model %s' % model_uuid)
subset_metrics = {}
for metrics_dict in metrics_dicts:
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['PredictionResults']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['ModelMetadata']['ModelParameters']
model_type = model_params['model_type']
if model_type != 'NN':
continue
featurizer = model_params['featurizer']
featurizer_list.append(featurizer)
feature_transform_type = metadata_dict['ModelMetadata']['TrainingDataset']['feature_transform_type']
feature_transform_type_list.append(feature_transform_type)
nn_params = metadata_dict['ModelMetadata']['NNSpecific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
for subset in subsets:
for metric in metrics:
score_dict[subset][metric].append(subset_metrics[subset][metric])
if 'UmapSpecific' in metadata_dict['ModelMetadata']:
umap_params = metadata_dict['ModelMetadata']['UmapSpecific']
umap_dim_list.append(umap_params['umap_dim'])
umap_targ_wt_list.append(umap_params['umap_targ_wt'])
umap_neighbors_list.append(umap_params['umap_neighbors'])
umap_min_dist_list.append(umap_params['umap_min_dist'])
else:
umap_dim_list.append(nan)
umap_targ_wt_list.append(nan)
umap_neighbors_list.append(nan)
umap_min_dist_list.append(nan)
perf_df = pd.DataFrame(dict(
model_uuid=model_uuid_list,
learning_rate=learning_rate_list,
dropouts=dropouts_list,
layer_sizes=layer_sizes_list,
featurizer=featurizer_list,
best_epoch=best_epoch_list,
max_epochs=max_epochs_list,
feature_transform_type=feature_transform_type_list,
umap_dim=umap_dim_list,
umap_targ_wt=umap_targ_wt_list,
umap_neighbors=umap_neighbors_list,
umap_min_dist=umap_min_dist_list ))
for subset in subsets:
for metric in metrics:
metric_col = '%s_%s' % (metric, subset)
perf_df[metric_col] = score_dict[subset][metric]
sort_by = '%s_valid' % sort_metric
perf_df = perf_df.sort_values(sort_by, ascending=False)
return perf_df
#------------------------------------------------------------------------------------------------------------------
def get_filesystem_perf_results(result_dir, hyper_id=None, dataset_name='GSK_Amgen_Combined_BSEP_PIC50',
pred_type='classification'):
"""
Retrieve model metadata and performance metrics stored in the filesystem from a hyperparameter search run.
"""
model_uuid_list = []
model_type_list = []
max_epochs_list = []
learning_rate_list = []
dropouts_list = []
layer_sizes_list = []
featurizer_list = []
splitter_list = []
rf_estimators_list = []
rf_max_features_list = []
rf_max_depth_list = []
best_epoch_list = []
model_score_type_list = []
feature_transform_type_list = []
umap_dim_list = []
umap_targ_wt_list = []
umap_neighbors_list = []
umap_min_dist_list = []
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
metrics = ['r2_score', 'r2_std', 'rms_score', 'mae_score']
else:
metrics = ['roc_auc_score', 'roc_auc_std', 'prc_auc_score', 'precision', 'recall_score',
'accuracy_score', 'npv', 'matthews_cc', 'kappa', 'cross_entropy', 'confusion_matrix']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = []
score_dict['valid']['model_choice_score'] = []
# Navigate the results directory tree
model_list = []
metrics_list = []
if hyper_id is None:
# hyper_id not specified, so let's do all that exist under the given result_dir
subdirs = os.listdir(result_dir)
hyper_ids = list(set(subdirs) - {'logs', 'slurm_files'})
else:
hyper_ids = [hyper_id]
for hyper_id in hyper_ids:
topdir = os.path.join(result_dir, hyper_id, dataset_name)
if not os.path.isdir(topdir):
continue
# Next component of path is a random UUID added by hyperparam script for each run. Iterate over runs.
run_uuids = [fname for fname in os.listdir(topdir) if not fname.startswith('.')]
for run_uuid in run_uuids:
run_path = os.path.join(topdir, run_uuid, dataset_name)
# Next path component is a combination of various model parameters
param_dirs = os.listdir(run_path)
for param_str in param_dirs:
new_path = os.path.join(topdir, run_uuid, dataset_name, param_str)
model_dirs = [dir for dir in os.listdir(new_path) if not dir.startswith('.')]
model_uuid = model_dirs[0]
meta_path = os.path.join(new_path, model_uuid, 'model_metadata.json')
metrics_path = os.path.join(new_path, model_uuid, 'training_model_metrics.json')
if not (os.path.exists(meta_path) and os.path.exists(metrics_path)):
continue
with open(meta_path, 'r') as meta_fp:
meta_dict = json.load(meta_fp)
model_list.append(meta_dict)
with open(metrics_path, 'r') as metrics_fp:
metrics_dict = json.load(metrics_fp)
metrics_list.append(metrics_dict)
print("Found data for %d models under %s" % (len(model_list), result_dir))
for metadata_dict, metrics_dict in zip(model_list, metrics_list):
model_uuid = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_uuid)
# Get list of prediction run metrics for this model
pred_dicts = metrics_dict['ModelMetrics']['TrainingRun']
#print("Got %d metrics dicts for model %s" % (len(pred_dicts), model_uuid))
if len(pred_dicts) < 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
continue
subset_metrics = {}
for metrics_dict in pred_dicts:
if metrics_dict['label'] == 'best':
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['PredictionResults']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['ModelMetadata']['ModelParameters']
model_type = model_params['model_type']
model_type_list.append(model_type)
model_score_type = model_params['model_choice_score_type']
model_score_type_list.append(model_score_type)
featurizer = model_params['featurizer']
featurizer_list.append(featurizer)
split_params = metadata_dict['ModelMetadata']['SplittingParameters']['Splitting']
splitter_list.append(split_params['splitter'])
feature_transform_type = metadata_dict['ModelMetadata']['TrainingDataset']['feature_transform_type']
feature_transform_type_list.append(feature_transform_type)
if model_type == 'NN':
nn_params = metadata_dict['ModelMetadata']['NNSpecific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
rf_estimators_list.append(nan)
rf_max_features_list.append(nan)
rf_max_depth_list.append(nan)
if model_type == 'RF':
rf_params = metadata_dict['ModelMetadata']['RFSpecific']
rf_estimators_list.append(rf_params['rf_estimators'])
rf_max_features_list.append(rf_params['rf_max_features'])
rf_max_depth_list.append(rf_params['rf_max_depth'])
max_epochs_list.append(nan)
best_epoch_list.append(nan)
learning_rate_list.append(nan)
layer_sizes_list.append(nan)
dropouts_list.append(nan)
for subset in subsets:
for metric in metrics:
score_dict[subset][metric].append(subset_metrics[subset][metric])
score_dict['valid']['model_choice_score'].append(subset_metrics['valid']['model_choice_score'])
if 'UmapSpecific' in metadata_dict['ModelMetadata']:
umap_params = metadata_dict['ModelMetadata']['UmapSpecific']
umap_dim_list.append(umap_params['umap_dim'])
umap_targ_wt_list.append(umap_params['umap_targ_wt'])
umap_neighbors_list.append(umap_params['umap_neighbors'])
umap_min_dist_list.append(umap_params['umap_min_dist'])
else:
umap_dim_list.append(nan)
umap_targ_wt_list.append(nan)
umap_neighbors_list.append(nan)
umap_min_dist_list.append(nan)
perf_df = pd.DataFrame(dict(
model_uuid=model_uuid_list,
model_type=model_type_list,
featurizer=featurizer_list,
splitter=splitter_list,
model_score_type=model_score_type_list,
feature_transform_type=feature_transform_type_list,
umap_dim=umap_dim_list,
umap_targ_wt=umap_targ_wt_list,
umap_neighbors=umap_neighbors_list,
umap_min_dist=umap_min_dist_list,
learning_rate=learning_rate_list,
dropouts=dropouts_list,
layer_sizes=layer_sizes_list,
best_epoch=best_epoch_list,
max_epochs=max_epochs_list,
rf_estimators=rf_estimators_list,
rf_max_features=rf_max_features_list,
rf_max_depth=rf_max_depth_list))
perf_df['model_choice_score'] = score_dict['valid']['model_choice_score']
for subset in subsets:
for metric in metrics:
metric_col = '%s_%s' % (metric, subset)
perf_df[metric_col] = score_dict[subset][metric]
sort_by = 'model_choice_score'
perf_df = perf_df.sort_values(sort_by, ascending=False)
return perf_df
#------------------------------------------------------------------------------------------------------------------
def get_summary_perf_tables(collection_names, filter_dict={}, prediction_type='regression'):
"""
Load model parameters and performance metrics from model tracker for all models saved in the model tracker DB under
the given collection names. Generate a pair of tables, one for regression models and one for classification, listing:
dataset (assay name, target, parameter, key, bucket)
dataset size (train/valid/test/total)
number of training folds
model type (NN or RF)
featurizer
transformation type
metrics: r2_score, mae_score and rms_score for regression, or ROC AUC for classification
"""
collection_list = []
model_uuid_list = []
time_built_list = []
model_type_list = []
dataset_key_list = []
bucket_list = []
param_list = []
featurizer_list = []
desc_type_list = []
transform_list = []
dset_size_list = []
splitter_list = []
split_strategy_list = []
split_uuid_list = []
rf_estimators_list = []
rf_max_features_list = []
rf_max_depth_list = []
best_epoch_list = []
max_epochs_list = []
learning_rate_list = []
layer_sizes_list = []
dropouts_list = []
umap_dim_list = []
umap_targ_wt_list = []
umap_neighbors_list = []
umap_min_dist_list = []
split_uuid_list=[]
if prediction_type == 'regression':
score_types = ['r2_score', 'mae_score', 'rms_score']
else:
# TODO: add more classification metrics later
score_types = ['roc_auc_score', 'prc_auc_score', 'accuracy_score', 'precision', 'recall_score', 'npv', 'matthews_cc']
subsets = ['train', 'valid', 'test']
score_dict = {}
ncmpd_dict = {}
for subset in subsets:
score_dict[subset] = {}
for score_type in score_types:
score_dict[subset][score_type] = []
ncmpd_dict[subset] = []
filter_dict['ModelMetadata.ModelParameters.prediction_type'] = prediction_type
for collection_name in collection_names:
print("Finding models in collection %s" % collection_name)
models = trkr.get_full_metadata(filter_dict, client_wrapper, collection_name=collection_name)
for i, metadata_dict in enumerate(models):
if i % 10 == 0:
print('Processing collection %s model %d' % (collection_name, i))
# Check that model has metrics before we go on
if not 'ModelMetrics' in metadata_dict:
continue
collection_list.append(collection_name)
model_uuid = metadata_dict['model_uuid']
model_uuid_list.append(model_uuid)
time_built = metadata_dict['time_built']
time_built_list.append(time_built)
#print("Got metadata for model UUID %s" % model_uuid)
model_params = metadata_dict['ModelMetadata']['ModelParameters']
model_type = model_params['model_type']
model_type_list.append(model_type)
featurizer = model_params['featurizer']
featurizer_list.append(featurizer)
if 'DescriptorSpecific' in metadata_dict['ModelMetadata']:
desc_type = metadata_dict['ModelMetadata']['DescriptorSpecific']['descriptor_type']
else:
desc_type = ''
desc_type_list.append(desc_type)
dataset_key = metadata_dict['ModelMetadata']['TrainingDataset']['dataset_key']
bucket = metadata_dict['ModelMetadata']['TrainingDataset']['bucket']
dataset_key_list.append(dataset_key)
bucket_list.append(bucket)
dset_metadata = metadata_dict['ModelMetadata']['TrainingDataset']['DatasetMetadata']
param = metadata_dict['ModelMetadata']['TrainingDataset']['response_cols'][0]
param_list.append(param)
transform_type = metadata_dict['ModelMetadata']['TrainingDataset']['feature_transform_type']
transform_list.append(transform_type)
split_params = metadata_dict['ModelMetadata']['SplittingParameters']['Splitting']
splitter_list.append(split_params['splitter'])
split_uuid_list.append(split_params['split_uuid'])
split_strategy = split_params['split_strategy']
split_strategy_list.append(split_strategy)
if 'UmapSpecific' in metadata_dict['ModelMetadata']:
umap_params = metadata_dict['ModelMetadata']['UmapSpecific']
umap_dim_list.append(umap_params['umap_dim'])
umap_targ_wt_list.append(umap_params['umap_targ_wt'])
umap_neighbors_list.append(umap_params['umap_neighbors'])
umap_min_dist_list.append(umap_params['umap_min_dist'])
else:
umap_dim_list.append(nan)
umap_targ_wt_list.append(nan)
umap_neighbors_list.append(nan)
umap_min_dist_list.append(nan)
if model_type == 'NN':
nn_params = metadata_dict['ModelMetadata']['NNSpecific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
rf_estimators_list.append(nan)
rf_max_features_list.append(nan)
rf_max_depth_list.append(nan)
elif model_type == 'RF':
rf_params = metadata_dict['ModelMetadata']['RFSpecific']
rf_estimators_list.append(rf_params['rf_estimators'])
rf_max_features_list.append(rf_params['rf_max_features'])
rf_max_depth_list.append(rf_params['rf_max_depth'])
max_epochs_list.append(nan)
best_epoch_list.append(nan)
learning_rate_list.append(nan)
layer_sizes_list.append(nan)
dropouts_list.append(nan)
elif model_type == 'xgboost':
# TODO: Add xgboost parameters
max_epochs_list.append(nan)
best_epoch_list.append(nan)
learning_rate_list.append(nan)
layer_sizes_list.append(nan)
dropouts_list.append(nan)
rf_estimators_list.append(nan)
rf_max_features_list.append(nan)
rf_max_depth_list.append(nan)
else:
raise Exception('Unexpected model type %s' % model_type)
# Get model metrics for this model
metrics_dicts = metadata_dict['ModelMetrics']['TrainingRun']
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
subset_metrics = {}
for metrics_dict in metrics_dicts:
if metrics_dict['label'] == 'best':
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['PredictionResults']
if split_strategy == 'k_fold_cv':
dset_size = subset_metrics['train']['num_compounds'] + subset_metrics['test']['num_compounds']
else:
dset_size = subset_metrics['train']['num_compounds'] + subset_metrics['valid']['num_compounds'] + subset_metrics['test']['num_compounds']
for subset in subsets:
subset_size = subset_metrics[subset]['num_compounds']
for score_type in score_types:
try:
score = subset_metrics[subset][score_type]
except KeyError:
score = float('nan')
score_dict[subset][score_type].append(score)
ncmpd_dict[subset].append(subset_size)
dset_size_list.append(dset_size)
col_dict = dict(
collection=collection_list,
model_uuid=model_uuid_list,
time_built=time_built_list,
model_type=model_type_list,
featurizer=featurizer_list,
descr_type=desc_type_list,
transformer=transform_list,
splitter=splitter_list,
split_strategy=split_strategy_list,
split_uuid=split_uuid_list,
umap_dim=umap_dim_list,
umap_targ_wt=umap_targ_wt_list,
umap_neighbors=umap_neighbors_list,
umap_min_dist=umap_min_dist_list,
layer_sizes=layer_sizes_list,
dropouts=dropouts_list,
learning_rate=learning_rate_list,
best_epoch=best_epoch_list,
max_epochs=max_epochs_list,
rf_estimators=rf_estimators_list,
rf_max_features=rf_max_features_list,
rf_max_depth=rf_max_depth_list,
dataset_bucket=bucket_list,
dataset_key=dataset_key_list,
dataset_size=dset_size_list,
parameter=param_list
)
perf_df = pd.DataFrame(col_dict)
for subset in subsets:
ncmpds_col = '%s_size' % subset
perf_df[ncmpds_col] = ncmpd_dict[subset]
for score_type in score_types:
metric_col = '%s_%s' % (subset, score_type)
perf_df[metric_col] = score_dict[subset][score_type]
return perf_df
#------------------------------------------------------------------------------------------------------------------
def get_summary_metadata_table(uuids, collections=None):
if isinstance(uuids,str):
uuids = [uuids]
if isinstance(collections,str):
collections = [collections] * len(uuids)
mlist = []
for idx,uuid in enumerate(uuids):
if collections is not None:
collection_name = collections[idx]
else:
collection_name = trkr.get_model_collection_by_uuid(uuid,client_wrapper)
model_meta = trkr.get_metadata_by_uuid(uuid,client_wrapper=client_wrapper,collection_name=collection_name)
mdl_params = model_meta['ModelMetadata']['ModelParameters']
data_params = model_meta['ModelMetadata']['TrainingDataset']
# Get model metrics for this model
metrics = pd.DataFrame(model_meta['ModelMetrics']['TrainingRun'])
metrics = metrics[metrics['label']=='best']
train_metrics = metrics[metrics['subset']=='train']['PredictionResults'].values[0]
valid_metrics = metrics[metrics['subset']=='valid']['PredictionResults'].values[0]
test_metrics = metrics[metrics['subset']=='test']['PredictionResults'].values[0]
# Try to name the model something intelligible in the table
name = 'NA'
if 'target' in data_params['DatasetMetadata']:
name = data_params['DatasetMetadata']['target']
if (name == 'NA') & ('assay_endpoint' in data_params['DatasetMetadata']):
name = data_params['DatasetMetadata']['assay_endpoint']
if (name == 'NA') & ('response_col' in data_params['DatasetMetadata']):
name = data_params['DatasetMetadata']['response_col']
if name != 'NA':
if 'param' in data_params['DatasetMetadata'].keys():
name = name + ' ' + data_params['DatasetMetadata']['param']
else:
name = 'unknown'
transform = 'None'
if 'transformation' in data_params['DatasetMetadata'].keys():
transform = data_params['DatasetMetadata']['transformation']
if mdl_params['featurizer'] == 'computed_descriptors':
featurizer = model_meta['ModelMetadata']['DescriptorSpecific']['descriptor_type']
else:
featurizer = mdl_params['featurizer']
try:
split_uuid = model_meta['ModelMetadata']['SplittingParameters']['Splitting']['split_uuid']
except:
split_uuid = 'Not Avaliable'
if mdl_params['model_type'] == 'NN':
nn_params = model_meta['ModelMetadata']['NNSpecific']
minfo = {'Name': name,
'Transformation': transform,
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'r^2 (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['r2_score'], valid_metrics['r2_score'], test_metrics['r2_score']),
'MAE (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['mae_score'], valid_metrics['mae_score'], test_metrics['mae_score']),
'RMSE(Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['rms_score'], valid_metrics['rms_score'], test_metrics['rms_score']),
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['ModelMetadata']['SplittingParameters']['Splitting']['splitter'],
'Layer Sizes': nn_params['layer_sizes'],
'Optimizer': nn_params['optimizer_type'],
'Learning Rate': nn_params['learning_rate'],
'Dropouts': nn_params['dropouts'],
'Best Epoch (Max)': '%i (%i)' % (nn_params['best_epoch'],nn_params['max_epochs']),
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
elif mdl_params['model_type'] == 'RF':
rf_params = model_meta['ModelMetadata']['RFSpecific']
minfo = {'Name': name,
'Transformation': transform,
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'Max Depth': rf_params['rf_max_depth'],
'Max Features': rf_params['rf_max_depth'],
'RF Estimators': rf_params['rf_estimators'],
'r^2 (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['r2_score'], valid_metrics['r2_score'], test_metrics['r2_score']),
'MAE (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['mae_score'], valid_metrics['mae_score'], test_metrics['mae_score']),
'RMSE(Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['rms_score'], valid_metrics['rms_score'], test_metrics['rms_score']),
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['ModelMetadata']['SplittingParameters']['Splitting']['splitter'],
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
else:
architecture = 'unknown'
mlist.append(OrderedDict(minfo))
return pd.DataFrame(mlist).set_index('Name').transpose()
#------------------------------------------------------------------------------------------------------------------
def get_model_datasets(collection_names, filter_dict={}):
"""
Query the model tracker for all models saved in the model tracker DB under the given collection names. Returns a dictionary
mapping (dataset_key,bucket) pairs to the list of model_uuids trained on the corresponding datasets.
"""
result_dict = {}
for collection_name in collection_names:
if collection_name.endswith('_metrics'):
continue
models = trkr.get_full_metadata(filter_dict, client_wrapper, collection_name=collection_name)
for i, metadata_dict in enumerate(models):
if i % 10 == 0:
print('Processing collection %s model %d' % (collection_name, i))
# Check that model has metrics before we go on
if not 'ModelMetrics' in metadata_dict:
continue
try:
model_uuid = metadata_dict['model_uuid']
dataset_key = metadata_dict['ModelMetadata']['TrainingDataset']['dataset_key']
bucket = metadata_dict['ModelMetadata']['TrainingDataset']['bucket']
result_dict.setdefault((dataset_key,bucket), []).append(model_uuid)
except KeyError:
continue
return result_dict
#-------------------------------------------------------------------------------------------------------------------
def aggregate_predictions(datasets, bucket, col_names, client_wrapper, result_dir):
results = []
for dset_key, bucket in datasets:
for model_type in ['NN', 'RF']:
for split_type in ['scaffold', 'random']:
for descriptor_type in ['mordred_filtered', 'moe']:
model_filter = {"ModelMetadata.TrainingDataset.dataset_key" : dset_key,
"ModelMetadata.TrainingDataset.bucket" : bucket,
"ModelMetrics.TrainingRun.label" : "best",
'ModelMetrics.TrainingRun.subset': 'valid',
'ModelMetrics.TrainingRun.PredictionResults.r2_score': ['max', None],
'ModelMetadata.ModelParameters.model_type': model_type,
'ModelMetadata.ModelParameters.featurizer': 'descriptors',
'ModelMetadata.DescriptorSpecific.descriptor_type': descriptor_type,
'ModelMetadata.SplittingParameters.Splitting.splitter': split_type
}
for col_name in col_names:
model = list(trkr.get_full_metadata(model_filter, client_wrapper, collection_name=col_name))
if model:
model = model[0]
result_dir = '/usr/local/data/%s/%s' % (col_name, dset_key.rstrip('.csv'))
result_df = mp.regenerate_results(result_dir, metadata_dict=model)
result_df['dset_key'] = dset_key
actual_col = [col for col in result_df.columns if 'actual' in col][0]
pred_col = [col for col in result_df.columns if 'pred' in col][0]
result_df['error'] = abs(result_df[actual_col] - result_df[pred_col])
result_df['cind'] = pd.Categorical(result_df['dset_key']).labels
results.append(result_df)
results_df = pd.concat(results).reset_index(drop=True)
results_df.to_csv(os.path.join(result_dir, 'predictions_%s_%s_%s_%s.csv' % (dset_key, model_type, split_type, descriptor_type)), index=False)
for featurizer in ['graphconv', 'ecfp']:
model_filter = {"ModelMetadata.TrainingDataset.dataset_key" : dset_key,
"ModelMetadata.TrainingDataset.bucket" : bucket,
"ModelMetrics.TrainingRun.label" : "best",
'ModelMetrics.TrainingRun.subset': 'valid',
'ModelMetrics.TrainingRun.PredictionResults.r2_score': ['max', None],
'ModelMetadata.ModelParameters.model_type': model_type,
'ModelMetadata.ModelParameters.featurizer': featurizer,
'ModelMetadata.SplittingParameters.Splitting.splitter': split_type
}
for col_name in col_names:
model = list(trkr.get_full_metadata(model_filter, client_wrapper, collection_name=col_name))
if model:
model = model[0]
result_dir = '/usr/local/data/%s/%s' % (col_name, dset_key.rstrip('.csv'))
result_df = mp.regenerate_results(result_dir, metadata_dict=model)
result_df['dset_key'] = dset_key
actual_col = [col for col in result_df.columns if 'actual' in col][0]
pred_col = [col for col in result_df.columns if 'pred' in col][0]
result_df['error'] = abs(result_df[actual_col] - result_df[pred_col])
result_df['cind'] =
|
pd.Categorical(result_df['dset_key'])
|
pandas.Categorical
|
import os
import sys
import json
from datetime import datetime
from os import system, name
from time import sleep
import copy
import threading
import imp
import numpy as np
import altair as alt
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
import pandasql as psql
from libraries.utility import Utility
class import_export_data(Utility):
ALL_COUNTRIES_DATA_FRAME = None
ALL_COUNTRIES_BY_TYPE_DF = None
ALL_COUNTRIES_GDP_DATA = None
EXCHANGE_RATE_DATA = None
def __init__(self,load_data_from_url=False):
super().__init__()
global ALL_COUNTRIES_DATA_FRAME
global ALL_COUNTRIES_BY_TYPE_DF
global ALL_COUNTRIES_GDP_DATA
global EXCHANGE_RATE_DATA
if load_data_from_url == False:
EXCHANGE_RATE_DATA = self.load_exchange_rate_data()
ALL_COUNTRIES_DATA_FRAME = self.load_and_clean_up_top_20_file()
ALL_COUNTRIES_BY_TYPE_DF = self.load_and_clean_up_WTO_file()
ALL_COUNTRIES_GDP_DATA = self.load_and_clean_up_GDP_file()
else:
ALL_COUNTRIES_DATA_FRAME = self.load_and_clean_up_top_20_file_fromurl()
ALL_COUNTRIES_BY_TYPE_DF = self.load_and_clean_up_WTO_file_fromurl()
def print_internal_directory(self):
for k,v in self.__dict__.items():
print("{} is \"{}\"".format(k,v))
def get_world_event_data(self) :
data_directory = "data"
trade_balance_sub_dir = "world_events"
file_name = "world_events.txt"
return_file_name = os.path.join(self.get_this_dir(),data_directory,trade_balance_sub_dir,file_name)
return pd.read_csv(return_file_name,sep='\t').replace(np.nan,'',regex=True)
def get_world_event_data_json(self):
return self.get_world_event_data().to_json(orient='records')
def get_top_20_full_file_name(self) :
data_directory = "data"
trade_balance_sub_dir = "trade_balance_datasets"
top_20_file_name = "top20_2014-2020_all.csv"
return_file_name = os.path.join(self.get_this_dir(),data_directory,trade_balance_sub_dir,top_20_file_name)
return return_file_name
def get_GDP_full_file_name(self):
data_directory = "data"
trade_balance_sub_dir = "trade_balance_datasets"
GDP_file_name = "wb_econind_gdp_data.csv"
return_file_name = os.path.join(self.get_this_dir(),data_directory,trade_balance_sub_dir,GDP_file_name)
return return_file_name
def get_WTO_individual_file_name(self) :
file_names = ['WtoData_services_imports.csv','WtoData_services_imports.csv','WtoData_merchandise_imports.csv','WtoData_merchandise_exports.csv']
data_directory = "data"
trade_balance_sub_dir = "trade_balance_datasets"
return_file_path_list = []
for file_name in file_names:
return_file_path_list.append(os.path.join(self.get_this_dir(),data_directory,trade_balance_sub_dir,file_name))
return return_file_path_list
def get_WTO_full_file_name(self) :
data_directory = "data"
trade_balance_sub_dir = "trade_balance_datasets"
WTO_file_name = "WtoData_all.csv"
return_file_name = os.path.join(self.get_this_dir(),data_directory,trade_balance_sub_dir,WTO_file_name)
return return_file_name
def get_world_countries_by_iso_label(self):
data_directory = "data"
file_name = "countries.tsv"
load_file_name = os.path.join(self.get_this_dir(),data_directory,file_name)
my_data = pd.read_csv(load_file_name,sep='\t')
return my_data
def get_exchange_rate_files(self) :
data_directory = "data"
exchange_rate_dir = "exchange_rates"
exchange_rate = "exchange_rates_from_oecd_website.csv"
country_codes = "wikipedia-iso-country-codes.csv"
exchange_rate_file = os.path.join(self.get_this_dir(),data_directory,exchange_rate_dir,exchange_rate)
country_code_file = os.path.join(self.get_this_dir(),data_directory,exchange_rate_dir,country_codes)
return exchange_rate_file, country_code_file
def load_exchange_rate_data(self):
exchange_rate_file, country_code_file = self.get_exchange_rate_files()
exchange_rates = pd.read_csv(exchange_rate_file)
country_codes = pd.read_csv(country_code_file)
mysql = '''
select
country_codes.[English short name lower case] as Country,
exchange_rates.TIME as year,
exchange_rates.Value as rate
from
exchange_rates
join country_codes
on
country_codes.[Alpha-3 code] = exchange_rates.LOCATION
'''
return psql.sqldf(mysql)
def load_and_clean_up_top_20_file_fromurl(self):
#url = "https://tuneman7.github.io/WtoData_all.csv"
url = "https://tuneman7.github.io/top20_2014-2020_all.csv"
my_data = pd.read_csv(url)
return my_data
def load_and_clean_up_top_20_file(self):
global EXCHANGE_RATE_DATA
file_to_load = self.get_top_20_full_file_name()
my_data = pd.read_csv(file_to_load)
sql = '''
select
my_data.*,
EXCHANGE_RATE_DATA_1.rate as country_exchange_rate,
EXCHANGE_RATE_DATA_2.rate as trading_partner_exchange_rate
from my_data
left join EXCHANGE_RATE_DATA as EXCHANGE_RATE_DATA_1
on
EXCHANGE_RATE_DATA_1.year = my_data.year
and
EXCHANGE_RATE_DATA_1.Country = my_data.Country
left join EXCHANGE_RATE_DATA as EXCHANGE_RATE_DATA_2
on
EXCHANGE_RATE_DATA_2.year = my_data.year
and
EXCHANGE_RATE_DATA_2.Country = my_data.[Trading Partner]
'''
my_data = psql.sqldf(sql)
return my_data
def load_and_clean_up_GDP_file(self):
file_to_load = self.get_GDP_full_file_name()
my_data = pd.read_csv(file_to_load)
global EXCHANGE_RATE_DATA
sql = '''
select
my_data.*,
EXCHANGE_RATE_DATA.rate as exchange_rate
from my_data
left join EXCHANGE_RATE_DATA
on
EXCHANGE_RATE_DATA.Country = my_data.Country
and
EXCHANGE_RATE_DATA.year = my_data.Year
'''
return psql.sqldf(sql)
def load_and_clean_up_EU_files(self):
#file_to_load = self.get_WTO_individual_file_name()
file_to_load = self.load_and_clean_up_top_20_file()
eu_countries=['Austria','Belgium','Croatia','Czech Republic','Denmark','Finland','France','Germany','Greece','Hungary',
'Italy','Netherlands','Poland','Portugal','Spain','Sweden','United Kingdom']
eu_df = file_to_load.loc[(file_to_load['Trading Partner'].isin(eu_countries)) & (file_to_load['country'].isin(eu_countries))]
eu_df_filtered = eu_df[['Trading Partner','country','Total Trade ($M)','year']].reset_index()
#df_concat = eu_df_filtered.pivot_table('Total Trade ($M)', ['Trading Partner','country'], 'year').reset_index()
return eu_df_filtered
def load_and_clean_up_WTO_file(self):
file_to_load = self.get_WTO_full_file_name()
my_data = pd.read_csv(file_to_load)
global EXCHANGE_RATE_DATA
# sql = '''
# select
# my_data.*,
# EXCHANGE_RATE_DATA_1.rate as reporting_economy_exchange_rate,
# EXCHANGE_RATE_DATA_2.rate as partner_economy_exchange_rate
# from my_data
# left join EXCHANGE_RATE_DATA as EXCHANGE_RATE_DATA_1
# on
# EXCHANGE_RATE_DATA_1.Country = my_data.[Reporting Economy]
# left join EXCHANGE_RATE_DATA as EXCHANGE_RATE_DATA_2
# on
# EXCHANGE_RATE_DATA_2.Country = my_data.[Partner Economy]
# '''
sql = '''
select
my_data.*,
EXCHANGE_RATE_DATA_1.rate as reporting_economy_exchange_rate
from my_data
left join EXCHANGE_RATE_DATA as EXCHANGE_RATE_DATA_1
on
EXCHANGE_RATE_DATA_1.Country = my_data.[Reporting Economy]
and EXCHANGE_RATE_DATA_1.year = my_data.Year
'''
return psql.sqldf(sql)
def load_and_clean_up_WTO_file_fromurl(self):
url = "https://tuneman7.github.io/WtoData_all.csv"
#url = "https://tuneman7.github.io/top20_2014-2020_all.csv"
my_data = pd.read_csv(url)
return my_data
def get_sql_for_world_or_region(self, source_country):
my_sql = '''
SELECT
'World' [Trading Partner],
sum([Total Trade ($M)]) [Total Trade ($M) ],
avg([RtW (%)]) [RtW (%)],
sum([Exports ($M)] )[Exports ($M)],
avg([RtW (%).1]) [RtW (%).1],
sum([Imports ($M)]) [Imports ($M)],
avg([RtW (%).2]) [RtW (%).2],
sum([Net Exports ($M)]) [Net Exports ($M)],
'' [Exports Ticker],
'' [Imports Ticker],
country,
year
FROM
my_data_frame
WHERE
country = \'''' + source_country + '''\'
and
[Trading Partner] <> \'''' + source_country + '''\'
GROUP BY
country, year
'''
#print(my_sql)
return my_sql
def get_sql_for_world_or_region(self, source_country):
my_sql = '''
SELECT
'World' [Trading Partner],
sum([Total Trade ($M)]) [Total Trade ($M)],
avg([RtW (%)]) [RtW (%)],
sum([Exports ($M)] )[Exports ($M)],
avg([RtW (%).1]) [RtW (%).1],
sum([Imports ($M)]) [Imports ($M)],
avg([RtW (%).2]) [RtW (%).2],
sum([Net Exports ($M)]) [Net Exports ($M)],
'' [Exports Ticker],
'' [Imports Ticker],
country,
year
FROM
my_data_frame
WHERE
country = \'''' + source_country + '''\'
and
[Trading Partner] <> \'''' + source_country + '''\'
GROUP BY
country, year
'''
#print(my_sql)
return my_sql
def get_data_by_source_and_target_country(self,source_country,target_country):
global ALL_COUNTRIES_DATA_FRAME
my_data_frame = ALL_COUNTRIES_DATA_FRAME
if target_country.lower() == "world":
my_sql = self.get_sql_for_world_or_region(source_country)
else:
my_sql = "SELECT * FROM my_data_frame WHERE country = '" + source_country + "' and [Trading Partner] = '" + target_country + "' "
my_return_data = psql.sqldf(my_sql)
return my_return_data
def get_top5data_by_source_country(self,source_country):
global ALL_COUNTRIES_DATA_FRAME
my_data_frame = ALL_COUNTRIES_DATA_FRAME
if source_country.lower() == 'world':
my_sql = '''
SELECT *
FROM (
SELECT *,
RANK() OVER(PARTITION BY year ORDER BY [Total Trade ($M)] DESC) AS rnk
FROM my_data_frame
where country in (select distinct country from my_data_frame)
) t
WHERE rnk <= 5
'''
else:
my_sql = '''
SELECT *
FROM (
SELECT *,
RANK() OVER(PARTITION BY year ORDER BY [Total Trade ($M)] DESC) AS rnk
FROM my_data_frame
WHERE country = ''' + "'" + source_country + '''\'
) t
WHERE rnk <= 5
'''
my_return_data = psql.sqldf(my_sql)
return my_return_data
def get_top5data_by_imports_exports(self,source_country, direction):
global ALL_COUNTRIES_BY_TYPE_DF
my_data_frame = ALL_COUNTRIES_BY_TYPE_DF
if source_country.lower() != "world":
my_sql = '''
SELECT *
FROM (
SELECT
Year, Value,
[Product/Sector-reformatted],
RANK() OVER(
PARTITION BY Year
ORDER BY Value DESC) AS rnk
FROM my_data_frame
WHERE
[Reporting Economy] = \'''' + source_country + '''\'
and
Direction = \'''' + direction + '''\'
and
[Product/Sector-reformatted] NOT LIKE '%Total%'
) t
WHERE rnk <= 5
'''
else:
my_sql = '''
SELECT *
FROM (
SELECT
Year, Value,
[Product/Sector-reformatted],
RANK() OVER(
PARTITION BY Year
ORDER BY Value DESC) AS rnk
FROM my_data_frame
WHERE
[Reporting Economy] in (select distinct [Reporting Economy] from my_data_frame)
and
Direction = \'''' + direction + '''\'
and
[Product/Sector-reformatted] NOT LIKE '%Total%'
) t
WHERE rnk <= 5
'''
my_return_data = psql.sqldf(my_sql)
return my_return_data
def imports_exports_by_sectors(self,source_country, target_country, direction):
global ALL_COUNTRIES_BY_TYPE_DF
my_data_frame = ALL_COUNTRIES_BY_TYPE_DF
if source_country.lower() != "world":
my_sql = '''
SELECT
Year, Value,
[Product/Sector-reformatted],
[Reporting Economy]
FROM my_data_frame
WHERE
([Reporting Economy] = \'''' + source_country + '''\'
or
[Reporting Economy] = \'''' + target_country + '''\'
)
and
Direction = \'''' + direction + '''\'
and
[Product/Sector-reformatted] NOT LIKE '%Total%'
'''
else:
my_sql = '''
SELECT
Year, Value,
[Product/Sector-reformatted],
[Reporting Economy]
FROM my_data_frame
WHERE
[Reporting Economy] in (select distinct [Reporting Economy] from my_data_frame)
and
Direction = \'''' + direction + '''\'
and
[Product/Sector-reformatted] NOT LIKE '%Total%'
'''
my_return_data = psql.sqldf(my_sql)
return my_return_data
def imports_exports_by_sectors_source(self):
global ALL_COUNTRIES_BY_TYPE_DF
my_data_frame = ALL_COUNTRIES_BY_TYPE_DF
my_sql = '''
SELECT
distinct
Year, Value,
[Product/Sector-reformatted],
[Direction],
[Type],
[Reporting Economy]
FROM my_data_frame
WHERE [Product/Sector-reformatted] NOT LIKE '%Total%'
'''
## and [Reporting Economy] = \'''' + source_country + '''\'
my_return_data = psql.sqldf(my_sql)
return my_return_data
def get_top_trading_and_net_value(self,source_country):
global ALL_COUNTRIES_DATA_FRAME
my_data_frame = ALL_COUNTRIES_DATA_FRAME
if source_country.lower() != "world":
my_sql = '''
SELECT
[Trading Partner],
[year],
[Total Trade ($M)],
[Exports ($M)]-[Imports ($M)] as net_trade,
'Net: ' || '$' || printf("%,d",cast([Exports ($M)]-[Imports ($M)] as text)) as net_trade_text,
[Exports ($M)],
[Imports ($M)],
''' + "'" + source_country + "'" + ''' as 'source_country'
FROM (
SELECT *,
RANK() OVER(PARTITION BY year ORDER BY [Total Trade ($M)] DESC) AS rnk
FROM my_data_frame
WHERE country = ''' + "'" + source_country + '''\'
) t
WHERE rnk <= 5
'''
else:
my_sql = '''
SELECT
distinct
[Trading Partner],
[year],
[Total Trade ($M)],
[Exports ($M)]-[Imports ($M)] as net_trade,
'Net: ' || '$' || printf("%,d",cast([Exports ($M)]-[Imports ($M)] as text)) as net_trade_text,
[Exports ($M)],
[Imports ($M)],
'World' as source_country
FROM (
SELECT
[Trading Partner],
sum([Total Trade ($M)]) as [Total Trade ($M)],
sum([Exports ($M)]) as [Exports ($M)],
sum([Imports ($M)]) as [Imports ($M)],
year,
RANK() OVER(PARTITION BY year ORDER BY sum([Total Trade ($M)]) DESC) AS rnk
FROM my_data_frame
WHERE country in (select distinct country from my_data_frame )
--and [Trading Partner] <> 'European Union'
group by [Trading Partner],year
) t
WHERE rnk <= 5
group by [Trading Partner], [year]
'''
my_return_data = psql.sqldf(my_sql)
return my_return_data
def get_eu_trade_data(self):
global ALL_COUNTRIES_DATA_FRAME
my_data_frame = ALL_COUNTRIES_DATA_FRAME
eu_countries=['Austria','Belgium','Croatia','Czech Republic','Denmark','Finland','France','Germany','Greece','Hungary',
'Italy','Netherlands','Poland','Portugal','Spain','Sweden']
eu_countries=pd.DataFrame(eu_countries)
eu_countries.columns=['Country']
eu_countries
my_sql = '''
SELECT t.*,
t.Exports*-1 ExportsN
FROM
(
select
[country],
[year],
case
WHEN [Trading Partner] in (select distinct Country from eu_countries) then 'EU'
WHEN [Trading Partner] not in (select distinct Country from eu_countries) then 'World'
ELSE 'World'
END [Trade Group],
sum([Imports ($M)]) Imports,
sum([Exports ($M)]) Exports,
sum([Total Trade ($M)]) TotalTrade,
sum([Net Exports ($M)]) NetTrade,
sum(case WHEN [Total Trade ($M)] > 0 then 1 else 0 end) as [Trade Partners]
from my_data_frame
group by [country],[year],[Trade Group]
) t
'''
my_return_data = psql.sqldf(my_sql)
return my_return_data
def get_eu_trade_data_pcts(self):
global ALL_COUNTRIES_DATA_FRAME
eu_countries=['Austria','Belgium','Croatia','Czech Republic','Denmark','Finland','France','Germany','Greece','Hungary',
'Italy','Netherlands','Poland','Portugal','Spain','Sweden']
eu_countries=pd.DataFrame(eu_countries)
eu_countries.columns=['Country']
eu_countries
my_data_frame = ALL_COUNTRIES_DATA_FRAME
eu_data_frame=self.get_eu_trade_data()
my_sql = '''
SELECT
t.*,
g.[Imports] TotalTop20Imports,
g.[Exports] TotalTop20Exports,
g.[TotalTrade] TotalTop20Trade,
g.[NetTrade] TotalTop20NetTrade,
t.[Imports]/g.[Imports] EUvWorld_Imports_tradepct,
t.[Exports]/g.[Exports] EUvWorld_Exports_tradepct,
t.[TotalTrade]/g.[TotalTrade] EUvWorld_TotalTrade_tradepct
FROM
(
select
case
WHEN [country] in ('United States') then 'US'
WHEN [country] in ('China') then [country]
WHEN [country] in (select distinct Country from eu_countries) then 'EU'
WHEN [country] not in (select distinct Country from eu_countries) then 'RoW'
ELSE 'RoW'
END [Top20group],
[year],
[Trade Group],
sum([Imports]) Imports,
sum([Exports]) Exports,
sum([TotalTrade]) TotalTrade,
sum([NetTrade]) NetTrade
from eu_data_frame
group by [Top20group],[year],[Trade Group]
) t
left join
(
SELECT
[year],
case
WHEN [country] in ('United States') then 'US'
WHEN [country] in ('China') then [country]
WHEN [country] in (select distinct Country from eu_countries) then 'EU'
WHEN [country] not in (select distinct Country from eu_countries) then 'RoW'
ELSE 'RoW'
END [Top20group],
sum([Imports]) Imports,
sum([Exports]) Exports,
sum([TotalTrade]) TotalTrade,
sum([NetTrade]) NetTrade
from eu_data_frame
group by [year],[Top20group]
) g
ON
t.[Top20group]=g.[Top20group]
and
t.[year]=g.[year]
'''
my_return_data = psql.sqldf(my_sql)
my_return_data['EUvsWorld Imports %']=(my_return_data['EUvWorld_Imports_tradepct']*100).round(2)
my_return_data['EUvsWorld Exports %']=(my_return_data['EUvWorld_Exports_tradepct']*100).round(2)
my_return_data['EUvsWorld Total Trade %']=(my_return_data['EUvWorld_TotalTrade_tradepct']*100).round(2)
return my_return_data
def get_data_nafta_trade_continent_tool(self):
nafta_return_top5=self.get_top20_trade_continental_cont_data()
my_return_data_top5_continent=nafta_return_top5[nafta_return_top5['Continent Trade Rank']<=5]
my_return_data_top5_continent
my_dataframe=self.get_top20_trade_nafta_continental_cont_data()
my_dataframe
my_sql = '''
SELECT
country as [Trade Group],
[Continent TP] as [Continent],
[year] as [Year],
[Exports ($M)],
[Imports ($M)],
[Net Exports ($M)],
[Total Trade ($M)],
[Continent Trade Rank]
FROM my_return_data_top5_continent
UNION
SELECT
[group] as [Trade Group],
[Continent TP] as [Continent],
[year] [Year],
[Exports ($M)],
[Imports ($M)],
[Net Exports ($M)],
[Total Trade ($M)],
[Continent Trade Rank]
from my_dataframe
'''
my_return_data = psql.sqldf(my_sql)
return my_return_data
def get_nafta_trade_data(self):
global ALL_COUNTRIES_DATA_FRAME
my_data_frame = ALL_COUNTRIES_DATA_FRAME
nafta_countries=['United States','Mexico','Canada']
nafta_countries=
|
pd.DataFrame(nafta_countries)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1])])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike(object):
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key):
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like(mock):
class MockFile(object):
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
assert not is_file(mock.Mock())
@pytest.mark.parametrize(
"ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize(
"ll", [(1, 2, 3), 'a', Series({'pi': 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, compat.Hashable)
assert inference.is_hashable(c)
hash(c) # this will not raise
@pytest.mark.parametrize(
"ll", [re.compile('ad')])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize(
"ll", ['x', 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r'a', u('x'),
r'asdf',
re.compile('adsf'),
u(r'\u2233\s*'),
re.compile(r'')])
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize(
"ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference(object):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr) == compare
# object array of bytes with missing values
assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare
def test_isinf_scalar(self):
# GH 11352
assert libmissing.isposinf_scalar(float('inf'))
assert libmissing.isposinf_scalar(np.inf)
assert not libmissing.isposinf_scalar(-np.inf)
assert not libmissing.isposinf_scalar(1)
assert not libmissing.isposinf_scalar('a')
assert libmissing.isneginf_scalar(float('-inf'))
assert libmissing.isneginf_scalar(-np.inf)
assert not libmissing.isneginf_scalar(np.inf)
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = {'', 'NULL', 'nan'}
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
@pytest.mark.parametrize("arr", [
np.array([2**63, np.nan], dtype=object),
np.array([str(2**63), np.nan], dtype=object),
np.array([np.nan, 2**63], dtype=object),
np.array([np.nan, str(2**63)], dtype=object)])
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(arr, set(),
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2**63, 2**63 + 1], dtype=object)
na_values = {2**63}
expected = (np.array([np.nan, 2**63 + 1], dtype=float)
if coerce else arr.copy())
result = lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("case", [
np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)])
def test_convert_numeric_int64_uint64(self, case, coerce):
expected = case.astype(float) if coerce else case.copy()
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("value", [-2**63 - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2**63, -1], dtype=object)
exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],
dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference(object):
# Dummy class used for testing with Python objects
class Dummy():
pass
def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):
# see pandas/conftest.py
inferred_dtype, values = any_skipna_inferred_dtype
# make sure the inferred dtype of the fixture is as requested
assert inferred_dtype == lib.infer_dtype(values, skipna=True)
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
assert result == 'integer'
result = lib.infer_dtype([])
assert result == 'empty'
# GH 18004
arr = np.array([np.array([], dtype=object),
np.array([], dtype=object)])
result = lib.infer_dtype(arr)
assert result == 'empty'
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'integer'
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
assert result == 'integer'
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, np.nan, False], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
assert result == 'floating'
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'decimal'
def test_string(self):
pass
def test_unicode(self):
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr, skipna=True)
expected = 'unicode' if PY2 else 'string'
assert result == expected
@pytest.mark.parametrize('dtype, missing, skipna, expected', [
(float, np.nan, False, 'floating'),
(float, np.nan, True, 'floating'),
(object, np.nan, False, 'floating'),
(object, np.nan, True, 'empty'),
(object, None, False, 'mixed'),
(object, None, True, 'empty')
])
@pytest.mark.parametrize('box', [pd.Series, np.array])
def test_object_empty(self, box, missing, dtype, skipna, expected):
# GH 23421
arr = box([missing, missing], dtype=dtype)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == expected
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'datetime64'
def test_infer_dtype_datetime(self):
arr = np.array([Timestamp('2011-01-01'),
Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.datetime64('2011-01-01'),
np.datetime64('2011-01-01')], dtype=object)
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])
assert lib.infer_dtype(arr) == 'datetime'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1)])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, pd.Timestamp('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1), n])
assert lib.infer_dtype(arr) == 'datetime'
# different type of nat
arr = np.array([np.timedelta64('nat'),
np.datetime64('2011-01-02')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.datetime64('2011-01-02'),
np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
# mixed datetime
arr = np.array([datetime(2011, 1, 1),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
# should be datetime?
arr = np.array([np.datetime64('2011-01-01'),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Timestamp('2011-01-02'),
np.datetime64('2011-01-01')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1])
assert lib.infer_dtype(arr) == 'mixed-integer'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_timedelta(self):
arr = np.array([pd.Timedelta('1 days'),
pd.Timedelta('2 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([np.timedelta64(1, 'D'),
np.timedelta64(2, 'D')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([timedelta(1), timedelta(2)])
assert lib.infer_dtype(arr) == 'timedelta'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, Timedelta('1 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1)])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, pd.Timedelta('1 days'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1), n])
assert lib.infer_dtype(arr) == 'timedelta'
# different type of nat
arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_period(self):
# GH 13664
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='M')])
assert lib.infer_dtype(arr) == 'period'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Period('2011-01', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([n, pd.Period('2011-01', freq='D'), n])
assert lib.infer_dtype(arr) == 'period'
# different type of nat
arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
@pytest.mark.parametrize(
"data",
[
[datetime(2017, 6, 12, 19, 30), datetime(2017, 3, 11, 1, 15)],
[Timestamp("20170612"), Timestamp("20170311")],
[Timestamp("20170612", tz='US/Eastern'),
Timestamp("20170311", tz='US/Eastern')],
[date(2017, 6, 12),
Timestamp("20170311", tz='US/Eastern')],
[np.datetime64("2017-06-12"), np.datetime64("2017-03-11")],
[np.datetime64("2017-06-12"), datetime(2017, 3, 11, 1, 15)]
]
)
def test_infer_datetimelike_array_datetime(self, data):
assert lib.infer_datetimelike_array(data) == "datetime"
@pytest.mark.parametrize(
"data",
[
[timedelta(2017, 6, 12), timedelta(2017, 3, 11)],
[timedelta(2017, 6, 12), date(2017, 3, 11)],
[np.timedelta64(2017, "D"), np.timedelta64(6, "s")],
[np.timedelta64(2017, "D"), timedelta(2017, 3, 11)]
]
)
def test_infer_datetimelike_array_timedelta(self, data):
assert lib.infer_datetimelike_array(data) == "timedelta"
def test_infer_datetimelike_array_date(self):
arr = [date(2017, 6, 12), date(2017, 3, 11)]
assert lib.infer_datetimelike_array(arr) == "date"
@pytest.mark.parametrize(
"data",
[
["2017-06-12", "2017-03-11"],
[20170612, 20170311],
[20170612.5, 20170311.8],
[Dummy(), Dummy()],
[Timestamp("20170612"), Timestamp("20170311", tz='US/Eastern')],
[Timestamp("20170612"), 20170311],
[timedelta(2017, 6, 12), Timestamp("20170311", tz='US/Eastern')]
]
)
def test_infer_datetimelike_array_mixed(self, data):
assert lib.infer_datetimelike_array(data) == "mixed"
@pytest.mark.parametrize(
"first, expected",
[
[[None], "mixed"],
[[np.nan], "mixed"],
[[pd.NaT], "nat"],
[[datetime(2017, 6, 12, 19, 30), pd.NaT], "datetime"],
[[np.datetime64("2017-06-12"), pd.NaT], "datetime"],
[[date(2017, 6, 12), pd.NaT], "date"],
[[timedelta(2017, 6, 12), pd.NaT], "timedelta"],
[[np.timedelta64(2017, "D"), pd.NaT], "timedelta"]
]
)
@pytest.mark.parametrize("second", [None, np.nan])
def test_infer_datetimelike_array_nan_nat_like(self, first, second,
expected):
first.append(second)
assert lib.infer_datetimelike_array(first) == expected
def test_infer_dtype_all_nan_nat_like(self):
arr = np.array([np.nan, np.nan])
assert lib.infer_dtype(arr) == 'floating'
# nan and None mix are result in mixed
arr = np.array([np.nan, np.nan, None])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([None, np.nan, np.nan])
assert lib.infer_dtype(arr) == 'mixed'
# pd.NaT
arr = np.array([pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([None, pd.NaT, None])
assert lib.infer_dtype(arr) == 'datetime'
# np.datetime64(nat)
arr = np.array([np.datetime64('nat')])
assert lib.infer_dtype(arr) == 'datetime64'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([pd.NaT, n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([pd.NaT, n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
# datetime / timedelta mixed
arr = np.array([pd.NaT, np.datetime64('nat'),
np.timedelta64('nat'), np.nan])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64('nat'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_is_datetimelike_array_all_nan_nat_like(self):
arr = np.array([np.nan, pd.NaT, np.datetime64('nat')])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.datetime64('nat'),
np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, np.nan], dtype=object)
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
assert lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='US/Eastern')],
dtype=object))
assert not lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='CET')],
dtype=object))
@pytest.mark.parametrize(
"func",
[
'is_datetime_array',
'is_datetime64_array',
'is_bool_array',
'is_timedelta_or_timedelta64_array',
'is_date_array',
'is_time_array',
'is_interval_array',
'is_period_array'])
def test_other_dtypes_for_array(self, func):
func = getattr(lib, func)
arr = np.array(['foo', 'bar'])
assert not func(arr)
arr = np.array([1, 2])
assert not func(arr)
def test_date(self):
dates = [date(2012, 1, day) for day in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'date'
dates = [date(2012, 1, day) for day in range(1, 20)] + [np.nan]
result = lib.infer_dtype(dates)
assert result == 'mixed'
result = lib.infer_dtype(dates, skipna=True)
assert result == 'date'
def test_is_numeric_array(self):
assert lib.is_float_array(np.array([1, 2.0]))
assert lib.is_float_array(np.array([1, 2.0, np.nan]))
assert not lib.is_float_array(np.array([1, 2]))
assert lib.is_integer_array(np.array([1, 2]))
assert not lib.is_integer_array(np.array([1, 2.0]))
def test_is_string_array(self):
assert lib.is_string_array(np.array(['foo', 'bar']))
assert not lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=False)
assert lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=True)
assert not lib.is_string_array(np.array([1, 2]))
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
result = lib.to_object_array_tuples(values)
try:
# make sure record array works
from collections import namedtuple
record = namedtuple('record', 'x y')
r = record(5, 6)
values = [r]
result = lib.to_object_array_tuples(values) # noqa
except ImportError:
pass
def test_object(self):
# GH 7431
# cannot infer more than this as only a single element
arr = np.array([None], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
def test_to_object_array_width(self):
# see gh-13320
rows = [[1, 2, 3], [4, 5, 6]]
expected = np.array(rows, dtype=object)
out =
|
lib.to_object_array(rows)
|
pandas._libs.lib.to_object_array
|
import copy
from builtins import range
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from ..testing_utils import make_ecommerce_entityset
from featuretools import variable_types
from featuretools.entityset import EntitySet
@pytest.fixture()
def entityset():
return make_ecommerce_entityset()
@pytest.fixture
def entity(entityset):
return entityset['log']
class TestQueryFuncs(object):
def test_query_by_id(self, entityset):
df = entityset['log'].query_by_values(instance_vals=[0])
assert df['id'].values[0] == 0
def test_query_by_id_with_sort(self, entityset):
df = entityset['log'].query_by_values(
instance_vals=[2, 1, 3],
return_sorted=True)
assert df['id'].values.tolist() == [2, 1, 3]
def test_query_by_id_with_time(self, entityset):
df = entityset['log'].query_by_values(
instance_vals=[0, 1, 2, 3, 4],
time_last=datetime(2011, 4, 9, 10, 30, 2 * 6))
assert df['id'].get_values().tolist() == [0, 1, 2]
def test_query_by_variable_with_time(self, entityset):
df = entityset['log'].query_by_values(
instance_vals=[0, 1, 2], variable_id='session_id',
time_last=datetime(2011, 4, 9, 10, 50, 0))
true_values = [
i * 5 for i in range(5)] + [i * 1 for i in range(4)] + [0]
assert df['id'].get_values().tolist() == list(range(10))
assert df['value'].get_values().tolist() == true_values
def test_query_by_variable_with_training_window(self, entityset):
df = entityset['log'].query_by_values(
instance_vals=[0, 1, 2], variable_id='session_id',
time_last=datetime(2011, 4, 9, 10, 50, 0),
training_window='15m')
assert df['id'].get_values().tolist() == [9]
assert df['value'].get_values().tolist() == [0]
def test_query_by_indexed_variable(self, entityset):
df = entityset['log'].query_by_values(
instance_vals=['taco clock'],
variable_id='product_id')
assert df['id'].get_values().tolist() == [15, 16]
def test_query_by_non_unique_sort_raises(self, entityset):
with pytest.raises(ValueError):
entityset['log'].query_by_values(
instance_vals=[0, 2, 1],
variable_id='session_id', return_sorted=True)
class TestVariableHandling(object):
# TODO: rewrite now that ds and entityset are seperate
def test_check_variables_and_dataframe(self):
# matches
df = pd.DataFrame({'id': [0, 1, 2], 'category': ['a', 'b', 'a']})
vtypes = {'id': variable_types.Categorical,
'category': variable_types.Categorical}
entityset = EntitySet(id='test')
entityset.entity_from_dataframe('test_entity', df, index='id',
variable_types=vtypes)
assert entityset.entity_stores['test_entity'].variable_types['category'] == variable_types.Categorical
def test_make_index_variable_ordering(self):
df = pd.DataFrame({'id': [0, 1, 2], 'category': ['a', 'b', 'a']})
vtypes = {'id': variable_types.Categorical,
'category': variable_types.Categorical}
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity',
index='id1',
make_index=True,
variable_types=vtypes,
dataframe=df)
assert entityset.entity_stores['test_entity'].df.columns[0] == 'id1'
def test_extra_variable_type(self):
# more variables
df = pd.DataFrame({'id': [0, 1, 2], 'category': ['a', 'b', 'a']})
vtypes = {'id': variable_types.Categorical,
'category': variable_types.Categorical,
'category2': variable_types.Categorical}
with pytest.raises(LookupError):
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity',
index='id',
variable_types=vtypes, dataframe=df)
def test_unknown_index(self):
# more variables
df = pd.DataFrame({'category': ['a', 'b', 'a']})
vtypes = {'category': variable_types.Categorical}
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity',
index='id',
variable_types=vtypes, dataframe=df)
assert entityset['test_entity'].index == 'id'
assert entityset['test_entity'].df['id'].tolist() == list(range(3))
def test_bad_index_variables(self):
# more variables
df = pd.DataFrame({'id': [0, 1, 2], 'category': ['a', 'b', 'a']})
vtypes = {'id': variable_types.Categorical,
'category': variable_types.Categorical}
with pytest.raises(LookupError):
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity',
index='id',
variable_types=vtypes,
dataframe=df,
time_index='time')
def test_converts_variable_types_on_init(self):
df = pd.DataFrame({'id': [0, 1, 2],
'category': ['a', 'b', 'a'],
'category_int': [1, 2, 3],
'ints': ['1', '2', '3'],
'floats': ['1', '2', '3.0']})
df["category_int"] = df["category_int"].astype("category")
vtypes = {'id': variable_types.Categorical,
'ints': variable_types.Numeric,
'floats': variable_types.Numeric}
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity', index='id',
variable_types=vtypes, dataframe=df)
entity_df = entityset.get_dataframe('test_entity')
assert entity_df['ints'].dtype.name in variable_types.PandasTypes._pandas_numerics
assert entity_df['floats'].dtype.name in variable_types.PandasTypes._pandas_numerics
# this is infer from pandas dtype
e = entityset["test_entity"]
assert isinstance(e['category_int'], variable_types.Categorical)
def test_converts_variable_type_after_init(self):
df = pd.DataFrame({'id': [0, 1, 2],
'category': ['a', 'b', 'a'],
'ints': ['1', '2', '1']})
df["category"] = df["category"].astype("category")
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity', index='id',
dataframe=df)
e = entityset['test_entity']
df = entityset.get_dataframe('test_entity')
e.convert_variable_type('ints', variable_types.Numeric)
assert isinstance(e['ints'], variable_types.Numeric)
assert df['ints'].dtype.name in variable_types.PandasTypes._pandas_numerics
e.convert_variable_type('ints', variable_types.Categorical)
assert isinstance(e['ints'], variable_types.Categorical)
e.convert_variable_type('ints', variable_types.Ordinal)
assert isinstance(e['ints'], variable_types.Ordinal)
e.convert_variable_type('ints', variable_types.Boolean,
true_val=1, false_val=2)
assert isinstance(e['ints'], variable_types.Boolean)
assert df['ints'].dtype.name == 'bool'
def test_converts_datetime(self):
# string converts to datetime correctly
# This test fails without defining vtypes. Entityset
# infers time column should be numeric type
times = pd.date_range('1/1/2011', periods=3, freq='H')
time_strs = times.strftime('%Y-%m-%d')
df = pd.DataFrame({'id': [0, 1, 2], 'time': time_strs})
vtypes = {'id': variable_types.Categorical,
'time': variable_types.Datetime}
entityset = EntitySet(id='test')
entityset._import_from_dataframe(entity_id='test_entity', index='id',
time_index="time", variable_types=vtypes,
dataframe=df)
pd_col = entityset.get_column_data('test_entity', 'time')
# assert type(es['test_entity']['time']) == variable_types.Datetime
assert type(pd_col[0]) == pd.Timestamp
def test_handles_datetime_format(self):
# check if we load according to the format string
# pass in an ambigious date
datetime_format = "%d-%m-%Y"
actual = pd.Timestamp('Jan 2, 2011')
time_strs = [actual.strftime(datetime_format)] * 3
df = pd.DataFrame(
{'id': [0, 1, 2], 'time_format': time_strs, 'time_no_format': time_strs})
vtypes = {'id': variable_types.Categorical,
'time_format': (variable_types.Datetime, {"format": datetime_format}),
'time_no_format': variable_types.Datetime}
entityset = EntitySet(id='test')
entityset._import_from_dataframe(entity_id='test_entity', index='id',
variable_types=vtypes, dataframe=df)
col_format = entityset.get_column_data('test_entity', 'time_format')
col_no_format = entityset.get_column_data(
'test_entity', 'time_no_format')
# without formatting pandas gets it wrong
assert (col_no_format != actual).all()
# with formatting we correctly get jan2
assert (col_format == actual).all()
def test_handles_datetime_mismatch(self):
# can't convert arbitrary strings
df = pd.DataFrame({'id': [0, 1, 2], 'time': ['a', 'b', 'tomorrow']})
vtypes = {'id': variable_types.Categorical,
'time': variable_types.Datetime}
with pytest.raises(ValueError):
entityset = EntitySet(id='test')
entityset.entity_from_dataframe('test_entity', df, 'id',
time_index='time', variable_types=vtypes)
def test_calculates_statistics_on_init(self):
df = pd.DataFrame({'id': [0, 1, 2],
'time': [datetime(2011, 4, 9, 10, 31, 3 * i)
for i in range(3)],
'category': ['a', 'b', 'a'],
'number': [4, 5, 6],
'boolean': [True, False, True],
'boolean_with_nan': [True, False, np.nan]})
vtypes = {'id': variable_types.Categorical,
'time': variable_types.Datetime,
'category': variable_types.Categorical,
'number': variable_types.Numeric,
'boolean': variable_types.Boolean,
'boolean_with_nan': variable_types.Boolean}
entityset = EntitySet(id='test')
entityset.entity_from_dataframe('stats_test_entity', df, 'id',
variable_types=vtypes)
e = entityset["stats_test_entity"]
# numerics don't have nunique or percent_unique defined
for v in ['time', 'category', 'number']:
assert e[v].count == 3
for v in ['time', 'number']:
with pytest.raises(AttributeError):
e[v].nunique
with pytest.raises(AttributeError):
e[v].percent_unique
# 'id' column automatically parsed as id
assert e['id'].count == 3
# categoricals have nunique and percent_unique defined
assert e['category'].nunique == 2
assert e['category'].percent_unique == 2. / 3
# booleans have count and number of true/false labels defined
assert e['boolean'].count == 3
# assert e['boolean'].num_true == 3
assert e['boolean'].num_true == 2
assert e['boolean'].num_false == 1
# TODO: the below fails, but shouldn't
# boolean with nan have count and number of true/false labels defined
# assert e['boolean_with_nan'].count == 2
# assert e['boolean_with_nan'].num_true == 1
# assert e['boolean_with_nan'].num_false == 1
def test_column_funcs(self, entityset):
# Note: to convert the time column directly either the variable type
# or convert_date_columns must be specifie
df = pd.DataFrame({'id': [0, 1, 2],
'time': [datetime(2011, 4, 9, 10, 31, 3 * i)
for i in range(3)],
'category': ['a', 'b', 'a'],
'number': [4, 5, 6]})
vtypes = {'time': variable_types.Datetime}
entityset.entity_from_dataframe('test_entity', df, index='id',
time_index='time', variable_types=vtypes)
assert entityset.get_dataframe('test_entity').shape == df.shape
assert entityset.get_index('test_entity') == 'id'
assert entityset.get_time_index('test_entity') == 'time'
assert set(entityset.get_column_names(
'test_entity')) == set(df.columns)
assert entityset.get_column_max('test_entity', 'number') == 6
assert entityset.get_column_min('test_entity', 'number') == 4
assert entityset.get_column_std('test_entity', 'number') == 1
assert entityset.get_column_count('test_entity', 'number') == 3
assert entityset.get_column_mean('test_entity', 'number') == 5
assert entityset.get_column_nunique('test_entity', 'number') == 3
assert entityset.get_column_type(
'test_entity', 'time') == df['time'].dtype
assert set(entityset.get_column_data(
'test_entity', 'id')) == set(df['id'])
def test_combine_variables(self, entityset):
# basic case
entityset.combine_variables('log', 'comment+product_id',
['comments', 'product_id'])
assert entityset['log']['comment+product_id'].dtype == 'categorical'
assert 'comment+product_id' in entityset['log'].df
# one variable to combine
entityset.combine_variables('log', 'comment+',
['comments'])
assert entityset['log']['comment+'].dtype == 'categorical'
assert 'comment+' in entityset['log'].df
# drop columns
entityset.combine_variables('log', 'new_priority_level',
['priority_level'],
drop=True)
assert entityset['log']['new_priority_level'].dtype == 'categorical'
assert 'new_priority_level' in entityset['log'].df
assert 'priority_level' not in entityset['log'].df
assert 'priority_level' not in entityset['log'].variables
# hashed
entityset.combine_variables('log', 'hashed_comment_product',
['comments', 'product_id'],
hashed=True)
assert entityset['log']['comment+product_id'].dtype == 'categorical'
assert entityset['log'].df['hashed_comment_product'].dtype == 'int64'
assert 'comment+product_id' in entityset['log'].df
def test_add_parent_time_index(self, entityset):
entityset = copy.deepcopy(entityset)
entityset.add_parent_time_index(entity_id='sessions',
parent_entity_id='customers',
parent_time_index_variable=None,
child_time_index_variable='session_date',
include_secondary_time_index=True,
secondary_time_index_variables=['cancel_reason'])
sessions = entityset['sessions']
assert sessions.time_index == 'session_date'
assert sessions.secondary_time_index == {
'cancel_date': ['cancel_reason']}
true_session_dates = ([datetime(2011, 4, 6)] +
[datetime(2011, 4, 8)] * 3 +
[datetime(2011, 4, 9)] * 2)
for t, x in zip(true_session_dates, sessions.df['session_date']):
assert t == x.to_pydatetime()
true_cancel_dates = ([datetime(2012, 1, 6)] +
[datetime(2011, 6, 8)] * 3 +
[datetime(2011, 10, 9)] * 2)
for t, x in zip(true_cancel_dates, sessions.df['cancel_date']):
assert t == x.to_pydatetime()
true_cancel_reasons = (['reason_1'] +
['reason_1'] * 3 +
['reason_2'] * 2)
for t, x in zip(true_cancel_reasons, sessions.df['cancel_reason']):
assert t == x
def test_sort_time_id(self):
transactions_df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"transaction_time":
|
pd.date_range(start="10:00", periods=6, freq="10s")
|
pandas.date_range
|
import pandas as pd
import glob
class make_annotation_dataframe():
def __init__(self, fileName, direcrtoryPath):
self.fileName = fileName
self.direcrtoryPath = direcrtoryPath
def __getitem__(self, item):
return getattr(self, item)
def prepare_dataframe(self):
annotationsTrainDF = self.readCSVFileAsDataFrame()
dfTrain = self.readImagesFromDirectory()
train_df = dfTrain.merge(annotationsTrainDF, how='inner', left_on='imageName', right_on='Image_Name')
train_df = train_df.assign(image_path=self.direcrtoryPath)
from sklearn import preprocessing
# label_encoder object knows how to understand word labels.
label_encoder = preprocessing.LabelEncoder()
train_df['le_carName'] = label_encoder.fit_transform(train_df['carName'])
dfTrain_W_H =
|
pd.read_csv("D:\\GreatLearning\\Flask\\OBJECT_DETECTION_CAR\\files\\train_8144_images.csv")
|
pandas.read_csv
|
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
import datetime as dt
import os
from typing import Union
import numpy as np
import pandas as pd
import pytest
import pytz
from gs_quant.target.common import XRef, PricingLocation, Currency as CurrEnum
from numpy.testing import assert_allclose
from pandas.testing import assert_series_equal
from pandas.tseries.offsets import CustomBusinessDay
from pytz import timezone
from testfixtures import Replacer
from testfixtures.mock import Mock
import gs_quant.timeseries.measures as tm
import gs_quant.timeseries.measures_rates as tm_rates
from gs_quant.api.gs.assets import GsTemporalXRef, GsAssetApi, GsIdType, IdList, GsAsset
from gs_quant.api.gs.data import GsDataApi, MarketDataResponseFrame
from gs_quant.api.gs.data import QueryType
from gs_quant.data.core import DataContext
from gs_quant.data.dataset import Dataset
from gs_quant.data.fields import Fields
from gs_quant.errors import MqError, MqValueError, MqTypeError
from gs_quant.markets.securities import AssetClass, Cross, Index, Currency, SecurityMaster, Stock, \
Swap, CommodityNaturalGasHub
from gs_quant.session import GsSession, Environment
from gs_quant.test.timeseries.utils import mock_request
from gs_quant.timeseries import Returns
from gs_quant.timeseries.measures import BenchmarkType
_index = [pd.Timestamp('2019-01-01')]
_test_datasets = ('TEST_DATASET',)
def mock_empty_market_data_response():
df = MarketDataResponseFrame()
df.dataset_ids = ()
return df
def map_identifiers_default_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-LIBOR-BBA" in ids:
return {"USD-LIBOR-BBA": "MAPDB7QNB2TZVQ0E"}
elif "EUR-EURIBOR-TELERATE" in ids:
return {"EUR-EURIBOR-TELERATE": "MAJNQPFGN1EBDHAE"}
elif "GBP-LIBOR-BBA" in ids:
return {"GBP-LIBOR-BBA": "MAFYB8Z4R1377A19"}
elif "JPY-LIBOR-BBA" in ids:
return {"JPY-LIBOR-BBA": "MABMVE27EM8YZK33"}
elif "EUR OIS" in ids:
return {"EUR OIS": "MARFAGXDQRWM07Y2"}
def map_identifiers_swap_rate_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m" in ids:
return {"USD-3m": "MAAXGV0GZTW4GFNC"}
elif "EUR-6m" in ids:
return {"EUR-6m": "MA5WM2QWRVMYKDK0"}
elif "KRW" in ids:
return {"KRW": 'MAJ6SEQH3GT0GA2Z'}
def map_identifiers_inflation_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "CPI-UKRPI" in ids:
return {"CPI-UKRPI": "MAQ7ND0MBP2AVVQW"}
elif "CPI-CPXTEMU" in ids:
return {"CPI-CPXTEMU": "MAK1FHKH5P5GJSHH"}
def map_identifiers_cross_basis_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m/JPY-3m" in ids:
return {"USD-3m/JPY-3m": "MA99N6C1KF9078NM"}
elif "EUR-3m/USD-3m" in ids:
return {"EUR-3m/USD-3m": "MAXPKTXW2D4X6MFQ"}
elif "GBP-3m/USD-3m" in ids:
return {"GBP-3m/USD-3m": "MA8BZHQV3W32V63B"}
def get_data_policy_rate_expectation_mocker(
start: Union[dt.date, dt.datetime] = None,
end: Union[dt.date, dt.datetime] = None,
as_of: dt.datetime = None,
since: dt.datetime = None,
fields: Union[str, Fields] = None,
asset_id_type: str = None,
**kwargs) -> pd.DataFrame:
if 'meetingNumber' in kwargs:
if kwargs['meetingNumber'] == 0:
return mock_meeting_spot()
elif 'meeting_date' in kwargs:
if kwargs['meeting_date'] == dt.date(2019, 10, 24):
return mock_meeting_spot()
return mock_meeting_expectation()
def test_parse_meeting_date():
assert tm.parse_meeting_date(5) == ''
assert tm.parse_meeting_date('') == ''
assert tm.parse_meeting_date('test') == ''
assert tm.parse_meeting_date('2019-09-01') == dt.date(2019, 9, 1)
def test_currency_to_default_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
asset_id_list = ["MAZ7RWC904JYHYPS", "MAJNQPFGN1EBDHAE", "MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8",
"MA4B66MW5E27U8P32SB"]
correct_mapping = ["MAPDB7QNB2TZVQ0E", "MAJNQPFGN1EBDHAE", "MAFYB8Z4R1377A19", "MABMVE27EM8YZK33",
"MA4J1YB8XZP2BPT8", "MA4B66MW5E27U8P32SB"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_default_swap_rate_asset(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_swap_rate_mocker)
asset_id_list = ['MAZ7RWC904JYHYPS', 'MAJNQPFGN1EBDHAE', 'MAJ6SEQH3GT0GA2Z']
correct_mapping = ['MAAXGV0GZTW4GFNC', 'MA5WM2QWRVMYKDK0', 'MAJ6SEQH3GT0GA2Z']
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_swap_rate_asset(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_inflation_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_inflation_mocker)
asset_id_list = ["MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
correct_mapping = ["MAQ7ND0MBP2AVVQW", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_inflation_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.currency_to_inflation_benchmark_rate('MA66CZBQJST05XKG') == 'MA66CZBQJST05XKG'
def test_cross_to_basis(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_cross_basis_mocker)
asset_id_list = ["MAYJPCVVF2RWXCES", "MA4B66MW5E27U8P32SB", "nobbid"]
correct_mapping = ["MA99N6C1KF9078NM", "MA4B66MW5E27U8P32SB", "nobbid"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_basis(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.cross_to_basis('MAYJPCVVF2RWXCES') == 'MAYJPCVVF2RWXCES'
def test_currency_to_tdapi_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA25DW5ZGC1BSC8Y', 'NOK')
bbid_mock.return_value = 'NOK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAFRSWPAF5QPNTP2' == correct_id
bbid_mock.return_value = 'CHF'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAW25BGQJH9P6DPT' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAA9MVX15AJNQCVG' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA6QCAP9B7ABS9HA' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAEE219J5ZP0ZKRK' == correct_id
bbid_mock.return_value = 'SEK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAETMVTPNP3199A5' == correct_id
bbid_mock.return_value = 'HKD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MABRNGY8XRFVC36N' == correct_id
bbid_mock.return_value = 'NZD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAH16NHE1HBN0FBZ' == correct_id
bbid_mock.return_value = 'AUD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAY8147CRK0ZP53B' == correct_id
bbid_mock.return_value = 'CAD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MANJ8SS88WJ6N28Q' == correct_id
bbid_mock.return_value = 'KRW'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAP55AXG5SQVS6C5' == correct_id
bbid_mock.return_value = 'INR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA20JHJXN1PD5HGE' == correct_id
bbid_mock.return_value = 'CNY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA4K1D8HH2R0RQY5' == correct_id
bbid_mock.return_value = 'SGD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA5CQFHYBPH9E5BS' == correct_id
bbid_mock.return_value = 'DKK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAF131NKWVRESFYA' == correct_id
asset = Currency('MA890', 'PLN')
bbid_mock.return_value = 'PLN'
assert 'MA890' == tm_rates._currency_to_tdapi_swap_rate_asset(asset)
replace.restore()
def test_currency_to_tdapi_basis_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA890', 'NOK')
bbid_mock.return_value = 'NOK'
assert 'MA890' == tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAQB1PGEJFCET3GG' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAGRG2VT11GQ2RQ9' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAHCYNB3V75JC5Q8' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAXVRBEZCJVH0C4V' == correct_id
replace.restore()
def test_check_clearing_house():
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house('lch')
assert tm_rates._ClearingHouse.CME == tm_rates._check_clearing_house(tm_rates._ClearingHouse.CME)
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house(None)
invalid_ch = ['NYSE']
for ch in invalid_ch:
with pytest.raises(MqError):
tm_rates._check_clearing_house(ch)
def test_get_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
assert dict(csaTerms='USD-1') == tm_rates._get_swap_csa_terms('USD', fed_funds_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_swap_csa_terms('EUR', estr_index)
assert {} == tm_rates._get_swap_csa_terms('EUR', euribor_index)
assert {} == tm_rates._get_swap_csa_terms('USD', usd_libor_index)
def test_get_basis_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
sofr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.SOFR.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
eonia_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EONIA.value]
assert dict(csaTerms='USD-1') == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, sofr_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_basis_swap_csa_terms('EUR', estr_index, eonia_index)
assert {} == tm_rates._get_basis_swap_csa_terms('EUR', eonia_index, euribor_index)
assert {} == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, usd_libor_index)
def test_match_floating_tenors():
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='6m')
assert swap_args == tm_rates._match_floating_tenors(swap_args)
def test_get_term_struct_date(mocker):
today = datetime.datetime.today()
biz_day = CustomBusinessDay()
assert today == tm_rates._get_term_struct_date(tenor=today, index=today, business_day=biz_day)
date_index = datetime.datetime(2020, 7, 31, 0, 0)
assert date_index == tm_rates._get_term_struct_date(tenor='2020-07-31', index=date_index, business_day=biz_day)
assert date_index == tm_rates._get_term_struct_date(tenor='0b', index=date_index, business_day=biz_day)
assert datetime.datetime(2021, 7, 30, 0, 0) == tm_rates._get_term_struct_date(tenor='1y', index=date_index,
business_day=biz_day)
def test_cross_stored_direction_for_fx_vol(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_stored_direction_for_fx_vol(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_usd_based_cross_for_fx_forecast(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_usd_based_cross(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_used_based_cross(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_to_usd_based_cross(Cross('FUN', 'EURUSD'))
replace.restore()
def test_cross_stored_direction(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_stored_direction_for_fx_vol(Cross('FUN', 'EURUSD'))
replace.restore()
def test_get_tdapi_rates_assets(mocker):
mock_asset_1 = GsAsset(asset_class='Rate', id='MAW25BGQJH9P6DPT', type_='Swap', name='Test_asset')
mock_asset_2 = GsAsset(asset_class='Rate', id='MAA9MVX15AJNQCVG', type_='Swap', name='Test_asset')
mock_asset_3 = GsAsset(asset_class='Rate', id='MANQHVYC30AZFT7R', type_='BasisSwap', name='Test_asset')
replace = Replacer()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1]
assert 'MAW25BGQJH9P6DPT' == tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict(asset_parameters_termination_date='10y', asset_parameters_effective_date='0b')
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = []
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict()
assert ['MAW25BGQJH9P6DPT', 'MAA9MVX15AJNQCVG'] == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
# test case will test matching sofr maturity with libor leg and flipping legs to get right asset
kwargs = dict(type='BasisSwap', asset_parameters_termination_date='10y',
asset_parameters_payer_rate_option=BenchmarkType.LIBOR,
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=BenchmarkType.SOFR,
asset_parameters_receiver_designated_maturity='1y',
asset_parameters_clearing_house='lch', asset_parameters_effective_date='Spot',
asset_parameters_notional_currency='USD',
pricing_location='NYC')
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_3]
assert 'MANQHVYC30AZFT7R' == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
def test_get_swap_leg_defaults():
result_dict = dict(currency=CurrEnum.JPY, benchmark_type='JPY-LIBOR-BBA', floating_rate_tenor='6m',
pricing_location=PricingLocation.TKO)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.JPY)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.USD, benchmark_type='USD-LIBOR-BBA', floating_rate_tenor='3m',
pricing_location=PricingLocation.NYC)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.USD)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.EUR, benchmark_type='EUR-EURIBOR-TELERATE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.EUR)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.SEK, benchmark_type='SEK-STIBOR-SIDE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.SEK)
assert result_dict == defaults
def test_check_forward_tenor():
valid_tenors = [datetime.date(2020, 1, 1), '1y', 'imm2', 'frb2', '1m', '0b']
for tenor in valid_tenors:
assert tenor == tm_rates._check_forward_tenor(tenor)
invalid_tenors = ['5yr', 'imm5', 'frb0']
for tenor in invalid_tenors:
with pytest.raises(MqError):
tm_rates._check_forward_tenor(tenor)
def mock_commod(_cls, _q):
d = {
'price': [30, 30, 30, 30, 35.929686, 35.636039, 27.307498, 23.23177, 19.020833, 18.827291, 17.823749, 17.393958,
17.824999, 20.307603, 24.311249, 25.160103, 25.245728, 25.736873, 28.425206, 28.779789, 30.519996,
34.896348, 33.966973, 33.95489, 33.686348, 34.840307, 32.674163, 30.261665, 30, 30, 30]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-05-01', periods=31, freq='H', tz=timezone('UTC')))
df.dataset_ids = _test_datasets
return df
def mock_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
11.6311,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 9))
df.dataset_ids = _test_datasets
return df
def mock_fair_price(_cls, _q):
d = {
'fairPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_natgas_forward_price(_cls, _q):
d = {
'forwardPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_fair_price_swap(_cls, _q):
d = {'fairPrice': [2.880]}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)]))
df.dataset_ids = _test_datasets
return df
def mock_implied_volatility(_cls, _q):
d = {
'impliedVolatility': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_missing_bucket_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"J20",
"K20",
"M20",
]
}
return pd.DataFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 8))
def mock_fx_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
d = {
'strikeReference': ['delta', 'spot', 'forward'],
'relativeStrike': [25, 100, 100],
'impliedVolatility': [5, 1, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-01-01', periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_fx_forecast(_cls, _q):
d = {
'fxForecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_delta(_cls, _q):
d = {
'relativeStrike': [25, -25, 0],
'impliedVolatility': [1, 5, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_empty(_cls, _q):
d = {
'strikeReference': [],
'relativeStrike': [],
'impliedVolatility': []
}
df = MarketDataResponseFrame(data=d, index=[])
df.dataset_ids = _test_datasets
return df
def mock_fx_switch(_cls, _q, _n):
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_empty)
replace.restore()
return Cross('MA1889', 'ABC/XYZ')
def mock_curr(_cls, _q):
d = {
'swapAnnuity': [1, 2, 3],
'swapRate': [1, 2, 3],
'basisSwapRate': [1, 2, 3],
'swaptionVol': [1, 2, 3],
'atmFwdRate': [1, 2, 3],
'midcurveVol': [1, 2, 3],
'capFloorVol': [1, 2, 3],
'spreadOptionVol': [1, 2, 3],
'inflationSwapRate': [1, 2, 3],
'midcurveAtmFwdRate': [1, 2, 3],
'capFloorAtmFwdRate': [1, 2, 3],
'spreadOptionAtmFwdRate': [1, 2, 3],
'strike': [0.25, 0.5, 0.75]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_cross(_cls, _q):
d = {
'basis': [1, 2, 3],
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq(_cls, _q):
d = {
'relativeStrike': [0.75, 0.25, 0.5],
'impliedVolatility': [5, 1, 2],
'impliedCorrelation': [5, 1, 2],
'realizedCorrelation': [3.14, 2.71828, 1.44],
'averageImpliedVolatility': [5, 1, 2],
'averageImpliedVariance': [5, 1, 2],
'averageRealizedVolatility': [5, 1, 2],
'impliedVolatilityByDeltaStrike': [5, 1, 2],
'fundamentalMetric': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
idx = [pd.Timestamp(datetime.datetime.now(pytz.UTC))]
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=idx)
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.datetime.now(pytz.UTC).date() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_err(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
raise MqValueError('error while getting last')
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_empty(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame()
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_norm(_cls, _q):
d = {
'relativeStrike': [-4.0, 4.0, 0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_spot(_cls, _q):
d = {
'relativeStrike': [0.75, 1.25, 1.0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_inc(_cls, _q):
d = {
'relativeStrike': [0.25, 0.75],
'impliedVolatility': [5, 1]
}
df = MarketDataResponseFrame(data=d, index=_index * 2)
df.dataset_ids = _test_datasets
return df
def mock_meeting_expectation():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2020, 1, 29)],
'endingDate': [dt.date(2020, 1, 29)],
'meetingNumber': [2],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2020, 1, 23)],
'value': [-0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_spot():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2019, 10, 30)],
'endingDate': [dt.date(2019, 12, 18)],
'meetingNumber': [0],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2019, 10, 24)],
'value': [-0.004522570525]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_absolute():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2', 'MARFAGXDQRWM07Y2'],
'location': ['NYC', 'NYC'],
'rateType': ['Meeting Forward', 'Meeting Forward'],
'startingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'endingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'meetingNumber': [0, 2],
'valuationDate': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 10, 24), datetime.date(2020, 1, 23)],
'value': [-0.004522570525, -0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_ois_spot():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Spot'],
'startingDate': [datetime.date(2019, 12, 6)],
'endingDate': [datetime.date(2019, 12, 7)],
'meetingNumber': [-1],
'valuationDate': [datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 12, 6)],
'value': [-0.00455]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_esg(_cls, _q):
d = {
"esNumericScore": [2, 4, 6],
"esNumericPercentile": [81.2, 75.4, 65.7],
"esPolicyScore": [2, 4, 6],
"esPolicyPercentile": [81.2, 75.4, 65.7],
"esScore": [2, 4, 6],
"esPercentile": [81.2, 75.4, 65.7],
"esProductImpactScore": [2, 4, 6],
"esProductImpactPercentile": [81.2, 75.4, 65.7],
"gScore": [2, 4, 6],
"gPercentile": [81.2, 75.4, 65.7],
"esMomentumScore": [2, 4, 6],
"esMomentumPercentile": [81.2, 75.4, 65.7],
"gRegionalScore": [2, 4, 6],
"gRegionalPercentile": [81.2, 75.4, 65.7],
"controversyScore": [2, 4, 6],
"controversyPercentile": [81.2, 75.4, 65.7],
"esDisclosurePercentage": [49.2, 55.7, 98.4]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_index_positions_data(
asset_id,
start_date,
end_date,
fields=None,
position_type=None
):
return [
{'underlyingAssetId': 'MA3',
'netWeight': 0.1,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA1',
'netWeight': 0.6,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA2',
'netWeight': 0.3,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
}
]
def mock_rating(_cls, _q):
d = {
'rating': ['Buy', 'Sell', 'Buy', 'Neutral'],
'convictionList': [1, 0, 0, 0]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_gsdeer_gsfeer(_cls, assetId, start_date):
d = {
'gsdeer': [1, 1.2, 1.1],
'gsfeer': [2, 1.8, 1.9],
'year': [2000, 2010, 2020],
'quarter': ['Q1', 'Q2', 'Q3']
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
return df
def mock_factor_profile(_cls, _q):
d = {
'growthScore': [0.238, 0.234, 0.234, 0.230],
'financialReturnsScore': [0.982, 0.982, 0.982, 0.982],
'multipleScore': [0.204, 0.192, 0.190, 0.190],
'integratedScore': [0.672, 0.676, 0.676, 0.674]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_commodity_forecast(_cls, _q):
d = {
'forecastPeriod': ['3m', '3m', '3m', '3m'],
'forecastType': ['spotReturn', 'spotReturn', 'spotReturn', 'spotReturn'],
'commodityForecast': [1700, 1400, 1500, 1600]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def test_skew():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_norm)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.NORMALIZED, 4)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_spot)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock.return_value = mock_empty_market_data_response()
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert actual.empty
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_inc)
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
replace.restore()
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', None, 25)
def test_skew_fx():
replace = Replacer()
cross = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = cross
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_delta)
mock = cross
actual = tm.skew(mock, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.DELTA, 25, real_time=True)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.SPOT, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.FORWARD, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', None, 25)
replace.restore()
def test_implied_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol)
idx = pd.date_range(end=datetime.datetime.now(pytz.UTC).date(), periods=4, freq='D')
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_NEUTRAL)
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL)
replace.restore()
def test_implied_vol_no_last():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
idx = pd.date_range(end=datetime.date.today() - datetime.timedelta(days=1), periods=3, freq='D')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_err)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_empty)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace.restore()
def test_implied_vol_fx():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
# for different delta strikes
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_vol)
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL, 25)
expected = pd.Series([5, 1, 2, 3], index=pd.date_range('2019-01-01', periods=4, freq='D'), name='impliedVolatility')
assert_series_equal(expected,
|
pd.Series(actual)
|
pandas.Series
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import pandas as pd
import pandas_should # noqa
class TestEqualAccessorMixin(object):
def test_equal_true(self):
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([1, 2, 3])
assert s1.should.equal(s2)
def test_equal_false(self):
s1 =
|
pd.Series([1, 2, 3])
|
pandas.Series
|
import pandas as pd
c1 = pd.read_csv('machine/Calling/Sensors_1.csv')
c2 = pd.read_csv('machine/Calling/Sensors_2.csv')
c3 = pd.read_csv('machine/Calling/Sensors_3.csv')
c4 = pd.read_csv('machine/Calling/Sensors_4.csv')
c5 = pd.read_csv('machine/Calling/Sensors_5.csv')
c6 = pd.read_csv('machine/Calling/Sensors_6.csv')
c7 = pd.read_csv('machine/Calling/Sensors_7.csv')
c8 = pd.read_csv('machine/Calling/Sensors_8.csv')
c9 = pd.read_csv('machine/Calling/Sensors_9.csv')
c10 = pd.read_csv('machine/Calling/Sensors_10.csv')
calling = pd.concat([c1,c2,c3,c4,c5,c6,c7,c8,c9,c10], axis = 0)
t1 = pd.read_csv('machine/Texting/Sensors_1.csv')
t2 = pd.read_csv('machine/Texting/Sensors_2.csv')
t3 = pd.read_csv('machine/Texting/Sensors_3.csv')
t4 = pd.read_csv('machine/Texting/Sensors_4.csv')
t5 = pd.read_csv('machine/Texting/Sensors_5.csv')
t6 = pd.read_csv('machine/Texting/Sensors_6.csv')
t7 = pd.read_csv('machine/Texting/Sensors_7.csv')
t8 = pd.read_csv('machine/Texting/Sensors_8.csv')
t9 = pd.read_csv('machine/Texting/Sensors_9.csv')
t10 = pd.read_csv('machine/Texting/Sensors_10.csv')
texting = pd.concat([t1,t2,t3,t4,t5,t6,t7,t8,t9,t10], axis = 0)
s1 = pd.read_csv('machine/Swinging/Sensors_1.csv')
s2 = pd.read_csv('machine/Swinging/Sensors_2.csv')
s3 = pd.read_csv('machine/Swinging/Sensors_3.csv')
s4 = pd.read_csv('machine/Swinging/Sensors_4.csv')
s5 = pd.read_csv('machine/Swinging/Sensors_5.csv')
s6 = pd.read_csv('machine/Swinging/Sensors_6.csv')
s7 = pd.read_csv('machine/Swinging/Sensors_7.csv')
s8 = pd.read_csv('machine/Swinging/Sensors_8.csv')
s9 =
|
pd.read_csv('machine/Swinging/Sensors_9.csv')
|
pandas.read_csv
|
import streamlit as st
import pandas as pd
import numpy as np
import pickle
from sklearn.ensemble import RandomForestClassifier
st.write("""
# Penguin Prediction App
This app predicts the **Palmer Penguin** species!
Data obtained from the [palmerpenguins library](https://github.com/allisonhorst/palmerpenguins) in R by <NAME>.
""")
st.sidebar.header('User Input Features')
st.sidebar.markdown("""
[Example CSV input file](https://raw.githubusercontent.com/dataprofessor/data/master/penguins_example.csv)
""")
# Collects user input features into dataframe
uploaded_file = st.sidebar.file_uploader(
"Upload your input CSV file", type=["csv"])
if uploaded_file is not None:
input_df = pd.read_csv(uploaded_file)
else:
def user_input_features():
island = st.sidebar.selectbox(
'Island', ('Biscoe', 'Dream', 'Torgersen'))
sex = st.sidebar.selectbox('Sex', ('male', 'female'))
bill_length_mm = st.sidebar.slider(
'Bill length (mm)', 32.1, 59.6, 43.9)
bill_depth_mm = st.sidebar.slider('Bill depth (mm)', 13.1, 21.5, 17.2)
flipper_length_mm = st.sidebar.slider(
'Flipper length (mm)', 172.0, 231.0, 201.0)
body_mass_g = st.sidebar.slider(
'Body mass (g)', 2700.0, 6300.0, 4207.0)
data = {'island': island,
'bill_length_mm': bill_length_mm,
'bill_depth_mm': bill_depth_mm,
'flipper_length_mm': flipper_length_mm,
'body_mass_g': body_mass_g,
'sex': sex}
features = pd.DataFrame(data, index=[0])
return features
input_df = user_input_features()
# Combines user input features with entire penguins dataset
# This will be useful for the encoding phase
penguins_raw = pd.read_csv('penguins_cleaned.csv')
penguins = penguins_raw.drop(columns=['species'])
df = pd.concat([input_df, penguins], axis=0)
# Encoding of ordinal features
# https://www.kaggle.com/pratik1120/penguin-dataset-eda-classification-and-clustering
encode = ['sex', 'island']
for col in encode:
dummy =
|
pd.get_dummies(df[col], prefix=col)
|
pandas.get_dummies
|
"""
Input: Hsk data files.
Output: HSK infos merged file.
"""
import argparse
import os
import pandas
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
MERGE_FILEPATH = os.path.join(
SCRIPT_PATH,
'revised/merged_hanzi_data.csv',
)
HEADERS = [
'simplified', 'traditional', 'pinyin', 'translation',
'hsk2.0', 'hsk3.0',
]
HSK3_0_FILENAME = 'hsk_words_list.csv'
def merge_to_csv(csv_cedict_file, csv_cfdict_file, chinese_charfreq_file,
hsk2_0_wordlist_file, hsk3_0_wordlist_file,
radical_meaning):
cedict_df = pandas.read_csv(csv_cedict_file, sep='\t')
cfdict_df = pandas.read_csv(csv_cfdict_file, sep='\t')
radical_meaning = pandas.read_csv(radical_meaning, sep=',')
charfreq_df =
|
pandas.read_csv(chinese_charfreq_file, sep=',')
|
pandas.read_csv
|
import pathlib
import re
import pandas as pd
"""
Ok, so it seems there are actually three different file types:
msds
msms 2019-04 to 2019-08
msms < 2019-04
For now, just focus on the last one, it's got the most data.
"""
data_path = pathlib.Path(__file__).parents[1] / 'data'
def pull_csvs(glob_mask: str = '*.csv'):
"""
This convenience function loads the maternity csvs. I've made it add the file name as some of the files have
columns that are all over the place. It combines the csvs into a dataframe end on end.
:param glob_mask: str
this should be a glob expression to pull the required csvs
:return concatenated_dataframe : pd.Dataframe
"""
file_paths = data_path.glob(glob_mask)
df_list = []
for path in file_paths:
df =
|
pd.read_csv(path)
|
pandas.read_csv
|
#%%
import numpy as np
import pandas as pd
import diaux.io
import tqdm
from io import StringIO
import glob
# Load the list of turnover files
growth_samples = glob.glob('../../../data/2021-02-15_NCM3722_glucose_acetate_diauxic_shift/raw/*HPLC*diauxic*.txt')
cal_samples = glob.glob('../../../data/2021-02-15_NCM3722_glucose_acetate_diauxic_shift/raw/*HPLC*calibration*.txt')
# Instantiate storage lists for the peak tables and chromatogram
outputs = [[], []]
# Parse identifing information about the sample
for s in tqdm.tqdm(growth_samples, desc='Processing diauxic shift measurements'):
fname = s.split('/')[-1]
date, _, strain, _, _, _, sample_id = fname.split('_')
sample_id = int(sample_id.split('.')[0])
sample_id
if strain == 'NCM':
strain = 'NCM3722'
# Isolate the peak table and chromatogram
out = diaux.io.parse_HPLC_output(s)
# Add identifying information to each
for i, df in enumerate(out):
df['strain'] = strain
df['date'] = date
df['sample_id'] = sample_id
outputs[i].append(df)
# Concatenate the dataframes
peaks = pd.concat(outputs[0])
chroms = pd.concat(outputs[1])
# Save the files to disk
peaks.to_csv('../../../data/2021-02-15_NCM3722_glucose_acetate_diauxic_shift/processed/2021-02-15_diauxic_shift_peaks.csv', index=False)
chroms.to_csv('../../../data/2021-02-15_NCM3722_glucose_acetate_diauxic_shift/processed/2021-02-15_diauxic_shift_chromatograms.csv', index=False)
#%%
# Generate the files for the calibration
# Instantiate storage lists for the peak tables and chromatogram
outputs = [[], []]
# Parse identifing information about the sample
for s in tqdm.tqdm(cal_samples, desc='Processing calibration measurements'):
fname = s.split('/')[-1]
date, _, _, carbon, conc = fname.split('_')
conc = conc.split('mM')[0]
# Isolate the peak table and chromatogram
out = diaux.io.parse_HPLC_output(s)
# Add identifying information to each
for i, df in enumerate(out):
df['date'] = date
df['carbon_source'] = carbon
df['concentration_mM'] = conc
outputs[i].append(df)
# Concatenate the dataframes
peaks =
|
pd.concat(outputs[0])
|
pandas.concat
|
# Author : <NAME>
# Date : 01/03/2018
# Version : 1.2
import pandas as pd
import sys
import os
import sklearn.metrics
import pprint as pprint
import annotate
import debug
AMBIGUOUS_OK = True
SKIPPED_OK = False
UNKNOWN_OK = False
def print_divergent(div):
print("\n\n[ANNOTATION DIVERGENCE DETAILS]")
pd.set_option('display.max_colwidth', int(div.convID.map(len).max())+1)
pd.set_option('display.width', 1000)
|
pd.set_option('display.max_rows',500)
|
pandas.set_option
|
import pytest
from pandas import Interval, DataFrame
from pandas.testing import assert_frame_equal
from datar.base.funs import *
from datar.base import table, pi, paste0
from datar.stats import rnorm
from .conftest import assert_iterable_equal
def test_cut():
z = rnorm(10000)
tab = table(cut(z, breaks=range(-6, 7)))
assert tab.shape == (1, 12)
assert tab.columns.tolist() == [
Interval(-6, -5, closed='right'),
Interval(-5, -4, closed='right'),
Interval(-4, -3, closed='right'),
Interval(-3, -2, closed='right'),
Interval(-2, -1, closed='right'),
Interval(-1, 0, closed='right'),
Interval(0, 1, closed='right'),
Interval(1, 2, closed='right'),
Interval(2, 3, closed='right'),
Interval(3, 4, closed='right'),
Interval(4, 5, closed='right'),
Interval(5, 6, closed='right'),
]
assert sum(tab.values.flatten()) == 10000
z = cut([1] * 5, 4)
assert_iterable_equal(z.to_numpy(), [Interval(0.9995, 1.0, closed='right')] * 5)
assert_iterable_equal(z.categories.to_list(), [
Interval(0.999, 0.9995, closed='right'),
|
Interval(0.9995, 1.0, closed='right')
|
pandas.Interval
|
# Hide deprecation warnings
import warnings
warnings.filterwarnings('ignore')
# Common imports
import numpy as np
import pandas as pd
import seaborn as sns
import squarify
import missingno as msno
from statsmodels.graphics.mosaicplot import mosaic
# To plot pretty figures
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# To format floats
from IPython.display import display
|
pd.set_option('display.float_format', lambda x: '%.5f' % x)
|
pandas.set_option
|
from datetime import datetime, timedelta
import operator
from typing import Any, Sequence, Type, Union, cast
import warnings
import numpy as np
from pandas._libs import NaT, NaTType, Timestamp, algos, iNaT, lib
from pandas._libs.tslibs.c_timestamp import integer_op_not_supported
from pandas._libs.tslibs.period import DIFFERENT_FREQ, IncompatibleFrequency, Period
from pandas._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds
from pandas._libs.tslibs.timestamps import RoundTo, round_nsint64
from pandas._typing import DatetimeLikeScalar
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError, NullFrequencyError, PerformanceWarning
from pandas.util._decorators import Appender, Substitution
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_float_dtype,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_period_dtype,
is_string_dtype,
is_timedelta64_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.inference import is_array_like
from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna
from pandas.core import missing, nanops, ops
from pandas.core.algorithms import checked_add_with_arr, take, unique1d, value_counts
from pandas.core.arrays.base import ExtensionArray, ExtensionOpsMixin
import pandas.core.common as com
from pandas.core.indexers import check_bool_array_indexer
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.ops.invalid import invalid_comparison, make_invalid_op
from pandas.tseries import frequencies
from pandas.tseries.offsets import DateOffset, Tick
def _datetimelike_array_cmp(cls, op):
"""
Wrap comparison operations to convert Timestamp/Timedelta/Period-like to
boxed scalars/arrays.
"""
opname = f"__{op.__name__}__"
nat_result = opname == "__ne__"
@unpack_zerodim_and_defer(opname)
def wrapper(self, other):
if isinstance(other, str):
try:
# GH#18435 strings get a pass from tzawareness compat
other = self._scalar_from_string(other)
except ValueError:
# failed to parse as Timestamp/Timedelta/Period
return invalid_comparison(self, other, op)
if isinstance(other, self._recognized_scalars) or other is NaT:
other = self._scalar_type(other)
self._check_compatible_with(other)
other_i8 = self._unbox_scalar(other)
result = op(self.view("i8"), other_i8)
if isna(other):
result.fill(nat_result)
elif not is_list_like(other):
return invalid_comparison(self, other, op)
elif len(other) != len(self):
raise ValueError("Lengths must match")
else:
if isinstance(other, list):
# TODO: could use pd.Index to do inference?
other = np.array(other)
if not isinstance(other, (np.ndarray, type(self))):
return invalid_comparison(self, other, op)
if is_object_dtype(other):
# We have to use comp_method_OBJECT_ARRAY instead of numpy
# comparison otherwise it would fail to raise when
# comparing tz-aware and tz-naive
with np.errstate(all="ignore"):
result = ops.comp_method_OBJECT_ARRAY(
op, self.astype(object), other
)
o_mask = isna(other)
elif not type(self)._is_recognized_dtype(other.dtype):
return invalid_comparison(self, other, op)
else:
# For PeriodDType this casting is unnecessary
other = type(self)._from_sequence(other)
self._check_compatible_with(other)
result = op(self.view("i8"), other.view("i8"))
o_mask = other._isnan
if o_mask.any():
result[o_mask] = nat_result
if self._hasnans:
result[self._isnan] = nat_result
return result
return set_function_name(wrapper, opname, cls)
class AttributesMixin:
_data: np.ndarray
@classmethod
def _simple_new(cls, values, **kwargs):
raise AbstractMethodError(cls)
@property
def _scalar_type(self) -> Type[DatetimeLikeScalar]:
"""The scalar associated with this datelike
* PeriodArray : Period
* DatetimeArray : Timestamp
* TimedeltaArray : Timedelta
"""
raise AbstractMethodError(self)
def _scalar_from_string(
self, value: str
) -> Union[Period, Timestamp, Timedelta, NaTType]:
"""
Construct a scalar type from a string.
Parameters
----------
value : str
Returns
-------
Period, Timestamp, or Timedelta, or NaT
Whatever the type of ``self._scalar_type`` is.
Notes
-----
This should call ``self._check_compatible_with`` before
unboxing the result.
"""
raise AbstractMethodError(self)
def _unbox_scalar(self, value: Union[Period, Timestamp, Timedelta, NaTType]) -> int:
"""
Unbox the integer value of a scalar `value`.
Parameters
----------
value : Union[Period, Timestamp, Timedelta]
Returns
-------
int
Examples
--------
>>> self._unbox_scalar(Timedelta('10s')) # DOCTEST: +SKIP
10000000000
"""
raise AbstractMethodError(self)
def _check_compatible_with(
self, other: Union[Period, Timestamp, Timedelta, NaTType], setitem: bool = False
) -> None:
"""
Verify that `self` and `other` are compatible.
* DatetimeArray verifies that the timezones (if any) match
* PeriodArray verifies that the freq matches
* Timedelta has no verification
In each case, NaT is considered compatible.
Parameters
----------
other
setitem : bool, default False
For __setitem__ we may have stricter compatiblity resrictions than
for comparisons.
Raises
------
Exception
"""
raise AbstractMethodError(self)
class DatelikeOps:
"""
Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex.
"""
@Substitution(
URL="https://docs.python.org/3/library/datetime.html"
"#strftime-and-strptime-behavior"
)
def strftime(self, date_format):
"""
Convert to Index using specified date_format.
Return an Index of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in `python string format
doc <%(URL)s>`__.
Parameters
----------
date_format : str
Date format string (e.g. "%%Y-%%m-%%d").
Returns
-------
ndarray
NumPy ndarray of formatted strings.
See Also
--------
to_datetime : Convert the given argument to datetime.
DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.
DatetimeIndex.round : Round the DatetimeIndex to the specified freq.
DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.
Examples
--------
>>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"),
... periods=3, freq='s')
>>> rng.strftime('%%B %%d, %%Y, %%r')
Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',
'March 10, 2018, 09:00:02 AM'],
dtype='object')
"""
result = self._format_native_types(date_format=date_format, na_rep=np.nan)
return result.astype(object)
class TimelikeOps:
"""
Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex.
"""
_round_doc = """
Perform {op} operation on the data to the specified `freq`.
Parameters
----------
freq : str or Offset
The frequency level to {op} the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end). See
:ref:`frequency aliases <timeseries.offset_aliases>` for
a list of possible `freq` values.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
Only relevant for DatetimeIndex:
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
.. versionadded:: 0.24.0
nonexistent : 'shift_forward', 'shift_backward', 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
.. versionadded:: 0.24.0
Returns
-------
DatetimeIndex, TimedeltaIndex, or Series
Index of the same type for a DatetimeIndex or TimedeltaIndex,
or a Series with the same index for a Series.
Raises
------
ValueError if the `freq` cannot be converted.
Examples
--------
**DatetimeIndex**
>>> rng = pd.date_range('1/1/2018 11:59:00', periods=3, freq='min')
>>> rng
DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00',
'2018-01-01 12:01:00'],
dtype='datetime64[ns]', freq='T')
"""
_round_example = """>>> rng.round('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.round("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
_floor_example = """>>> rng.floor('H')
DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.floor("H")
0 2018-01-01 11:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
_ceil_example = """>>> rng.ceil('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 13:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.ceil("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 13:00:00
dtype: datetime64[ns]
"""
def _round(self, freq, mode, ambiguous, nonexistent):
# round the local times
if is_datetime64tz_dtype(self):
# operate on naive timestamps, then convert back to aware
naive = self.tz_localize(None)
result = naive._round(freq, mode, ambiguous, nonexistent)
aware = result.tz_localize(
self.tz, ambiguous=ambiguous, nonexistent=nonexistent
)
return aware
values = self.view("i8")
result = round_nsint64(values, mode, freq)
result = self._maybe_mask_results(result, fill_value=NaT)
return self._simple_new(result, dtype=self.dtype)
@Appender((_round_doc + _round_example).format(op="round"))
def round(self, freq, ambiguous="raise", nonexistent="raise"):
return self._round(freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent)
@Appender((_round_doc + _floor_example).format(op="floor"))
def floor(self, freq, ambiguous="raise", nonexistent="raise"):
return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent)
@Appender((_round_doc + _ceil_example).format(op="ceil"))
def ceil(self, freq, ambiguous="raise", nonexistent="raise"):
return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent)
class DatetimeLikeArrayMixin(ExtensionOpsMixin, AttributesMixin, ExtensionArray):
"""
Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray
Assumes that __new__/__init__ defines:
_data
_freq
and that the inheriting class has methods:
_generate_range
"""
@property
def ndim(self) -> int:
return self._data.ndim
@property
def shape(self):
return self._data.shape
def reshape(self, *args, **kwargs):
# Note: we drop any freq
data = self._data.reshape(*args, **kwargs)
return type(self)(data, dtype=self.dtype)
def ravel(self, *args, **kwargs):
# Note: we drop any freq
data = self._data.ravel(*args, **kwargs)
return type(self)(data, dtype=self.dtype)
@property
def _box_func(self):
"""
box function to get object from internal representation
"""
raise AbstractMethodError(self)
def _box_values(self, values):
"""
apply box func to passed values
"""
return lib.map_infer(values, self._box_func)
def __iter__(self):
return (self._box_func(v) for v in self.asi8)
@property
def asi8(self) -> np.ndarray:
"""
Integer representation of the values.
Returns
-------
ndarray
An ndarray with int64 dtype.
"""
# do not cache or you'll create a memory leak
return self._data.view("i8")
@property
def _ndarray_values(self):
return self._data
# ----------------------------------------------------------------
# Rendering Methods
def _format_native_types(self, na_rep="NaT", date_format=None):
"""
Helper method for astype when converting to strings.
Returns
-------
ndarray[str]
"""
raise AbstractMethodError(self)
def _formatter(self, boxed=False):
# TODO: Remove Datetime & DatetimeTZ formatters.
return "'{}'".format
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
@property
def nbytes(self):
return self._data.nbytes
def __array__(self, dtype=None) -> np.ndarray:
# used for Timedelta/DatetimeArray, overwritten by PeriodArray
if is_object_dtype(dtype):
return np.array(list(self), dtype=object)
return self._data
@property
def size(self) -> int:
"""The number of elements in this array."""
return np.prod(self.shape)
def __len__(self) -> int:
return len(self._data)
def __getitem__(self, key):
"""
This getitem defers to the underlying array, which by-definition can
only handle list-likes, slices, and integer scalars
"""
is_int = lib.is_integer(key)
if lib.is_scalar(key) and not is_int:
raise IndexError(
"only integers, slices (`:`), ellipsis (`...`), "
"numpy.newaxis (`None`) and integer or boolean "
"arrays are valid indices"
)
getitem = self._data.__getitem__
if is_int:
val = getitem(key)
if lib.is_scalar(val):
# i.e. self.ndim == 1
return self._box_func(val)
return type(self)(val, dtype=self.dtype)
if com.is_bool_indexer(key):
key = check_bool_array_indexer(self, key)
if key.all():
key = slice(0, None, None)
else:
key = lib.maybe_booleans_to_slice(key.view(np.uint8))
is_period = is_period_dtype(self)
if is_period:
freq = self.freq
else:
freq = None
if isinstance(key, slice):
if self.freq is not None and key.step is not None:
freq = key.step * self.freq
else:
freq = self.freq
elif key is Ellipsis:
# GH#21282 indexing with Ellipsis is similar to a full slice,
# should preserve `freq` attribute
freq = self.freq
result = getitem(key)
if result.ndim > 1:
# To support MPL which performs slicing with 2 dim
# even though it only has 1 dim by definition
if is_period:
return self._simple_new(result, dtype=self.dtype, freq=freq)
return result
return self._simple_new(result, dtype=self.dtype, freq=freq)
def __setitem__(
self,
key: Union[int, Sequence[int], Sequence[bool], slice],
value: Union[NaTType, Any, Sequence[Any]],
) -> None:
# I'm fudging the types a bit here. "Any" above really depends
# on type(self). For PeriodArray, it's Period (or stuff coercible
# to a period in from_sequence). For DatetimeArray, it's Timestamp...
# I don't know if mypy can do that, possibly with Generics.
# https://mypy.readthedocs.io/en/latest/generics.html
if lib.is_scalar(value) and not isna(value):
value = com.maybe_box_datetimelike(value)
if is_list_like(value):
is_slice = isinstance(key, slice)
if lib.is_scalar(key):
raise ValueError("setting an array element with a sequence.")
if not is_slice:
key = cast(Sequence, key)
if len(key) != len(value) and not com.is_bool_indexer(key):
msg = (
f"shape mismatch: value array of length '{len(key)}' "
"does not match indexing result of length "
f"'{len(value)}'."
)
raise ValueError(msg)
elif not len(key):
return
value = type(self)._from_sequence(value, dtype=self.dtype)
self._check_compatible_with(value, setitem=True)
value = value.asi8
elif isinstance(value, self._scalar_type):
self._check_compatible_with(value, setitem=True)
value = self._unbox_scalar(value)
elif is_valid_nat_for_dtype(value, self.dtype):
value = iNaT
else:
msg = (
f"'value' should be a '{self._scalar_type.__name__}', 'NaT', "
f"or array of those. Got '{type(value).__name__}' instead."
)
raise TypeError(msg)
self._data[key] = value
self._maybe_clear_freq()
def _maybe_clear_freq(self):
# inplace operations like __setitem__ may invalidate the freq of
# DatetimeArray and TimedeltaArray
pass
def astype(self, dtype, copy=True):
# Some notes on cases we don't have to handle here in the base class:
# 1. PeriodArray.astype handles period -> period
# 2. DatetimeArray.astype handles conversion between tz.
# 3. DatetimeArray.astype handles datetime -> period
from pandas import Categorical
dtype = pandas_dtype(dtype)
if is_object_dtype(dtype):
return self._box_values(self.asi8)
elif is_string_dtype(dtype) and not is_categorical_dtype(dtype):
return self._format_native_types()
elif is_integer_dtype(dtype):
# we deliberately ignore int32 vs. int64 here.
# See https://github.com/pandas-dev/pandas/issues/24381 for more.
values = self.asi8
if is_unsigned_integer_dtype(dtype):
# Again, we ignore int32 vs. int64
values = values.view("uint64")
if copy:
values = values.copy()
return values
elif (
is_datetime_or_timedelta_dtype(dtype)
and not is_dtype_equal(self.dtype, dtype)
) or is_float_dtype(dtype):
# disallow conversion between datetime/timedelta,
# and conversions for any datetimelike to float
msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
raise TypeError(msg)
elif is_categorical_dtype(dtype):
return Categorical(self, dtype=dtype)
else:
return np.asarray(self, dtype=dtype)
def view(self, dtype=None):
if dtype is None or dtype is self.dtype:
return type(self)(self._data, dtype=self.dtype)
return self._data.view(dtype=dtype)
# ------------------------------------------------------------------
# ExtensionArray Interface
def unique(self):
result = unique1d(self.asi8)
return type(self)(result, dtype=self.dtype)
def _validate_fill_value(self, fill_value):
"""
If a fill_value is passed to `take` convert it to an i8 representation,
raising ValueError if this is not possible.
Parameters
----------
fill_value : object
Returns
-------
fill_value : np.int64
Raises
------
ValueError
"""
if isna(fill_value):
fill_value = iNaT
elif isinstance(fill_value, self._recognized_scalars):
self._check_compatible_with(fill_value)
fill_value = self._scalar_type(fill_value)
fill_value = self._unbox_scalar(fill_value)
else:
raise ValueError(
f"'fill_value' should be a {self._scalar_type}. Got '{fill_value}'."
)
return fill_value
def take(self, indices, allow_fill=False, fill_value=None):
if allow_fill:
fill_value = self._validate_fill_value(fill_value)
new_values = take(
self.asi8, indices, allow_fill=allow_fill, fill_value=fill_value
)
return type(self)(new_values, dtype=self.dtype)
@classmethod
def _concat_same_type(cls, to_concat):
dtypes = {x.dtype for x in to_concat}
assert len(dtypes) == 1
dtype = list(dtypes)[0]
values = np.concatenate([x.asi8 for x in to_concat])
return cls(values, dtype=dtype)
def copy(self):
values = self.asi8.copy()
return type(self)._simple_new(values, dtype=self.dtype, freq=self.freq)
def _values_for_factorize(self):
return self.asi8, iNaT
@classmethod
def _from_factorized(cls, values, original):
return cls(values, dtype=original.dtype)
def _values_for_argsort(self):
return self._data
# ------------------------------------------------------------------
# Additional array methods
# These are not part of the EA API, but we implement them because
# pandas assumes they're there.
def searchsorted(self, value, side="left", sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `value`.
"""
if isinstance(value, str):
value = self._scalar_from_string(value)
if not (isinstance(value, (self._scalar_type, type(self))) or isna(value)):
raise ValueError(f"Unexpected type for 'value': {type(value)}")
self._check_compatible_with(value)
if isinstance(value, type(self)):
value = value.asi8
else:
value = self._unbox_scalar(value)
return self.asi8.searchsorted(value, side=side, sorter=sorter)
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an array.
See Also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
values = self._data.repeat(repeats)
return type(self)(values.view("i8"), dtype=self.dtype)
def value_counts(self, dropna=False):
"""
Return a Series containing counts of unique values.
Parameters
----------
dropna : bool, default True
Don't include counts of NaT values.
Returns
-------
Series
"""
from pandas import Series, Index
if dropna:
values = self[~self.isna()]._data
else:
values = self._data
cls = type(self)
result = value_counts(values, sort=False, dropna=dropna)
index = Index(
cls(result.index.view("i8"), dtype=self.dtype), name=result.index.name
)
return Series(result.values, index=index, name=result.name)
def map(self, mapper):
# TODO(GH-23179): Add ExtensionArray.map
# Need to figure out if we want ExtensionArray.map first.
# If so, then we can refactor IndexOpsMixin._map_values to
# a standalone function and call from here..
# Else, just rewrite _map_infer_values to do the right thing.
from pandas import Index
return Index(self).map(mapper).array
# ------------------------------------------------------------------
# Null Handling
def isna(self):
return self._isnan
@property # NB: override with cache_readonly in immutable subclasses
def _isnan(self):
"""
return if each value is nan
"""
return self.asi8 == iNaT
@property # NB: override with cache_readonly in immutable subclasses
def _hasnans(self):
"""
return if I have any nans; enables various perf speedups
"""
return bool(self._isnan.any())
def _maybe_mask_results(self, result, fill_value=iNaT, convert=None):
"""
Parameters
----------
result : a ndarray
fill_value : object, default iNaT
convert : str, dtype or None
Returns
-------
result : ndarray with values replace by the fill_value
mask the result if needed, convert to the provided dtype if its not
None
This is an internal routine.
"""
if self._hasnans:
if convert:
result = result.astype(convert)
if fill_value is None:
fill_value = np.nan
result[self._isnan] = fill_value
return result
def fillna(self, value=None, method=None, limit=None):
# TODO(GH-20300): remove this
# Just overriding to ensure that we avoid an astype(object).
# Either 20300 or a `_values_for_fillna` would avoid this duplication.
if isinstance(value, ABCSeries):
value = value.array
value, method = validate_fillna_kwargs(value, method)
mask = self.isna()
if is_array_like(value):
if len(value) != len(self):
raise ValueError(
f"Length of 'value' does not match. Got ({len(value)}) "
f" expected {len(self)}"
)
value = value[mask]
if mask.any():
if method is not None:
if method == "pad":
func = missing.pad_1d
else:
func = missing.backfill_1d
values = self._data
if not is_period_dtype(self):
# For PeriodArray self._data is i8, which gets copied
# by `func`. Otherwise we need to make a copy manually
# to avoid modifying `self` in-place.
values = values.copy()
new_values = func(values, limit=limit, mask=mask)
if is_datetime64tz_dtype(self):
# we need to pass int64 values to the constructor to avoid
# re-localizing incorrectly
new_values = new_values.view("i8")
new_values = type(self)(new_values, dtype=self.dtype)
else:
# fill with value
new_values = self.copy()
new_values[mask] = value
else:
new_values = self.copy()
return new_values
# ------------------------------------------------------------------
# Frequency Properties/Methods
@property
def freq(self):
"""
Return the frequency object if it is set, otherwise None.
"""
return self._freq
@freq.setter
def freq(self, value):
if value is not None:
value = frequencies.to_offset(value)
self._validate_frequency(self, value)
self._freq = value
@property
def freqstr(self):
"""
Return the frequency object as a string if its set, otherwise None
"""
if self.freq is None:
return None
return self.freq.freqstr
@property # NB: override with cache_readonly in immutable subclasses
def inferred_freq(self):
"""
Tryies to return a string representing a frequency guess,
generated by infer_freq. Returns None if it can't autodetect the
frequency.
"""
if self.ndim != 1:
return None
try:
return frequencies.infer_freq(self)
except ValueError:
return None
@property # NB: override with cache_readonly in immutable subclasses
def _resolution(self):
return frequencies.Resolution.get_reso_from_freq(self.freqstr)
@property # NB: override with cache_readonly in immutable subclasses
def resolution(self):
"""
Returns day, hour, minute, second, millisecond or microsecond
"""
return frequencies.Resolution.get_str(self._resolution)
@classmethod
def _validate_frequency(cls, index, freq, **kwargs):
"""
Validate that a frequency is compatible with the values of a given
Datetime Array/Index or Timedelta Array/Index
Parameters
----------
index : DatetimeIndex or TimedeltaIndex
The index on which to determine if the given frequency is valid
freq : DateOffset
The frequency to validate
"""
if is_period_dtype(cls):
# Frequency validation is not meaningful for Period Array/Index
return None
inferred = index.inferred_freq
if index.size == 0 or inferred == freq.freqstr:
return None
try:
on_freq = cls._generate_range(
start=index[0], end=None, periods=len(index), freq=freq, **kwargs
)
if not np.array_equal(index.asi8, on_freq.asi8):
raise ValueError
except ValueError as e:
if "non-fixed" in str(e):
# non-fixed frequencies are not meaningful for timedelta64;
# we retain that error message
raise e
# GH#11587 the main way this is reached is if the `np.array_equal`
# check above is False. This can also be reached if index[0]
# is `NaT`, in which case the call to `cls._generate_range` will
# raise a ValueError, which we re-raise with a more targeted
# message.
raise ValueError(
f"Inferred frequency {inferred} from passed values "
f"does not conform to passed frequency {freq.freqstr}"
)
# monotonicity/uniqueness properties are called via frequencies.infer_freq,
# see GH#23789
@property
def _is_monotonic_increasing(self):
return algos.is_monotonic(self.asi8, timelike=True)[0]
@property
def _is_monotonic_decreasing(self):
return algos.is_monotonic(self.asi8, timelike=True)[1]
@property
def _is_unique(self):
return len(unique1d(self.asi8)) == len(self)
# ------------------------------------------------------------------
# Arithmetic Methods
_create_comparison_method = classmethod(_datetimelike_array_cmp)
# pow is invalid for all three subclasses; TimedeltaArray will override
# the multiplication and division ops
__pow__ = make_invalid_op("__pow__")
__rpow__ = make_invalid_op("__rpow__")
__mul__ = make_invalid_op("__mul__")
__rmul__ = make_invalid_op("__rmul__")
__truediv__ = make_invalid_op("__truediv__")
__rtruediv__ = make_invalid_op("__rtruediv__")
__floordiv__ = make_invalid_op("__floordiv__")
__rfloordiv__ = make_invalid_op("__rfloordiv__")
__mod__ = make_invalid_op("__mod__")
__rmod__ = make_invalid_op("__rmod__")
__divmod__ = make_invalid_op("__divmod__")
__rdivmod__ = make_invalid_op("__rdivmod__")
def _add_datetimelike_scalar(self, other):
# Overridden by TimedeltaArray
raise TypeError(f"cannot add {type(self).__name__} and {type(other).__name__}")
_add_datetime_arraylike = _add_datetimelike_scalar
def _sub_datetimelike_scalar(self, other):
# Overridden by DatetimeArray
assert other is not NaT
raise TypeError(f"cannot subtract a datelike from a {type(self).__name__}")
_sub_datetime_arraylike = _sub_datetimelike_scalar
def _sub_period(self, other):
# Overridden by PeriodArray
raise TypeError(f"cannot subtract Period from a {type(self).__name__}")
def _add_offset(self, offset):
raise AbstractMethodError(self)
def _add_delta(self, other):
"""
Add a timedelta-like, Tick or TimedeltaIndex-like object
to self, yielding an int64 numpy array
Parameters
----------
delta : {timedelta, np.timedelta64, Tick,
TimedeltaIndex, ndarray[timedelta64]}
Returns
-------
result : ndarray[int64]
Notes
-----
The result's name is set outside of _add_delta by the calling
method (__add__ or __sub__), if necessary (i.e. for Indexes).
"""
if isinstance(other, (Tick, timedelta, np.timedelta64)):
new_values = self._add_timedeltalike_scalar(other)
elif is_timedelta64_dtype(other):
# ndarray[timedelta64] or TimedeltaArray/index
new_values = self._add_delta_tdi(other)
return new_values
def _add_timedeltalike_scalar(self, other):
"""
Add a delta of a timedeltalike
return the i8 result view
"""
if isna(other):
# i.e np.timedelta64("NaT"), not recognized by delta_to_nanoseconds
new_values = np.empty(self.shape, dtype="i8")
new_values[:] = iNaT
return new_values
inc = delta_to_nanoseconds(other)
new_values = checked_add_with_arr(self.asi8, inc, arr_mask=self._isnan).view(
"i8"
)
new_values = self._maybe_mask_results(new_values)
return new_values.view("i8")
def _add_delta_tdi(self, other):
"""
Add a delta of a TimedeltaIndex
return the i8 result view
"""
if len(self) != len(other):
raise ValueError("cannot add indices of unequal length")
if isinstance(other, np.ndarray):
# ndarray[timedelta64]; wrap in TimedeltaIndex for op
from pandas.core.arrays import TimedeltaArray
other = TimedeltaArray._from_sequence(other)
self_i8 = self.asi8
other_i8 = other.asi8
new_values = checked_add_with_arr(
self_i8, other_i8, arr_mask=self._isnan, b_mask=other._isnan
)
if self._hasnans or other._hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = iNaT
return new_values.view("i8")
def _add_nat(self):
"""
Add pd.NaT to self
"""
if is_period_dtype(self):
raise TypeError(
f"Cannot add {type(self).__name__} and {type(NaT).__name__}"
)
# GH#19124 pd.NaT is treated like a timedelta for both timedelta
# and datetime dtypes
result = np.zeros(self.shape, dtype=np.int64)
result.fill(iNaT)
return type(self)(result, dtype=self.dtype, freq=None)
def _sub_nat(self):
"""
Subtract pd.NaT from self
"""
# GH#19124 Timedelta - datetime is not in general well-defined.
# We make an exception for pd.NaT, which in this case quacks
# like a timedelta.
# For datetime64 dtypes by convention we treat NaT as a datetime, so
# this subtraction returns a timedelta64 dtype.
# For period dtype, timedelta64 is a close-enough return dtype.
result = np.zeros(self.shape, dtype=np.int64)
result.fill(iNaT)
return result.view("timedelta64[ns]")
def _sub_period_array(self, other):
"""
Subtract a Period Array/Index from self. This is only valid if self
is itself a Period Array/Index, raises otherwise. Both objects must
have the same frequency.
Parameters
----------
other : PeriodIndex or PeriodArray
Returns
-------
result : np.ndarray[object]
Array of DateOffset objects; nulls represented by NaT.
"""
if not is_period_dtype(self):
raise TypeError(
f"cannot subtract {other.dtype}-dtype from {type(self).__name__}"
)
if self.freq != other.freq:
msg = DIFFERENT_FREQ.format(
cls=type(self).__name__, own_freq=self.freqstr, other_freq=other.freqstr
)
raise IncompatibleFrequency(msg)
new_values = checked_add_with_arr(
self.asi8, -other.asi8, arr_mask=self._isnan, b_mask=other._isnan
)
new_values = np.array([self.freq.base * x for x in new_values])
if self._hasnans or other._hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = NaT
return new_values
def _addsub_object_array(self, other: np.ndarray, op):
"""
Add or subtract array-like of DateOffset objects
Parameters
----------
other : np.ndarray[object]
op : {operator.add, operator.sub}
Returns
-------
result : same class as self
"""
assert op in [operator.add, operator.sub]
if len(other) == 1:
return op(self, other[0])
warnings.warn(
"Adding/subtracting array of DateOffsets to "
f"{type(self).__name__} not vectorized",
PerformanceWarning,
)
# For EA self.astype('O') returns a numpy array, not an Index
left = self.astype("O")
res_values = op(left, np.array(other))
kwargs = {}
if not is_period_dtype(self):
kwargs["freq"] = "infer"
try:
res = type(self)._from_sequence(res_values, **kwargs)
except ValueError:
# e.g. we've passed a Timestamp to TimedeltaArray
res = res_values
return res
def _time_shift(self, periods, freq=None):
"""
Shift each value by `periods`.
Note this is different from ExtensionArray.shift, which
shifts the *position* of each element, padding the end with
missing values.
Parameters
----------
periods : int
Number of periods to shift by.
freq : pandas.DateOffset, pandas.Timedelta, or str
Frequency increment to shift by.
"""
if freq is not None and freq != self.freq:
if isinstance(freq, str):
freq = frequencies.to_offset(freq)
offset = periods * freq
result = self + offset
return result
if periods == 0:
# immutable so OK
return self.copy()
if self.freq is None:
raise NullFrequencyError("Cannot shift with no freq")
start = self[0] + periods * self.freq
end = self[-1] + periods * self.freq
# Note: in the DatetimeTZ case, _generate_range will infer the
# appropriate timezone from `start` and `end`, so tz does not need
# to be passed explicitly.
return self._generate_range(start=start, end=end, periods=None, freq=self.freq)
@unpack_zerodim_and_defer("__add__")
def __add__(self, other):
# scalar others
if other is NaT:
result = self._add_nat()
elif isinstance(other, (Tick, timedelta, np.timedelta64)):
result = self._add_delta(other)
elif isinstance(other, DateOffset):
# specifically _not_ a Tick
result = self._add_offset(other)
elif isinstance(other, (datetime, np.datetime64)):
result = self._add_datetimelike_scalar(other)
elif lib.is_integer(other):
# This check must come after the check for np.timedelta64
# as is_integer returns True for these
if not is_period_dtype(self):
raise integer_op_not_supported(self)
result = self._time_shift(other)
# array-like others
elif is_timedelta64_dtype(other):
# TimedeltaIndex, ndarray[timedelta64]
result = self._add_delta(other)
elif is_object_dtype(other):
# e.g. Array/Index of DateOffset objects
result = self._addsub_object_array(other, operator.add)
elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other):
# DatetimeIndex, ndarray[datetime64]
return self._add_datetime_arraylike(other)
elif is_integer_dtype(other):
if not is_period_dtype(self):
raise integer_op_not_supported(self)
result = self._addsub_int_array(other, operator.add)
else:
# Includes Categorical, other ExtensionArrays
# For PeriodDtype, if self is a TimedeltaArray and other is a
# PeriodArray with a timedelta-like (i.e. Tick) freq, this
# operation is valid. Defer to the PeriodArray implementation.
# In remaining cases, this will end up raising TypeError.
return NotImplemented
if is_timedelta64_dtype(result) and isinstance(result, np.ndarray):
from pandas.core.arrays import TimedeltaArray
return TimedeltaArray(result)
return result
def __radd__(self, other):
# alias for __add__
return self.__add__(other)
@unpack_zerodim_and_defer("__sub__")
def __sub__(self, other):
# scalar others
if other is NaT:
result = self._sub_nat()
elif isinstance(other, (Tick, timedelta, np.timedelta64)):
result = self._add_delta(-other)
elif isinstance(other, DateOffset):
# specifically _not_ a Tick
result = self._add_offset(-other)
elif isinstance(other, (datetime, np.datetime64)):
result = self._sub_datetimelike_scalar(other)
elif lib.is_integer(other):
# This check must come after the check for np.timedelta64
# as is_integer returns True for these
if not is_period_dtype(self):
raise integer_op_not_supported(self)
result = self._time_shift(-other)
elif isinstance(other, Period):
result = self._sub_period(other)
# array-like others
elif is_timedelta64_dtype(other):
# TimedeltaIndex, ndarray[timedelta64]
result = self._add_delta(-other)
elif is_object_dtype(other):
# e.g. Array/Index of DateOffset objects
result = self._addsub_object_array(other, operator.sub)
elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other):
# DatetimeIndex, ndarray[datetime64]
result = self._sub_datetime_arraylike(other)
elif is_period_dtype(other):
# PeriodIndex
result = self._sub_period_array(other)
elif is_integer_dtype(other):
if not is_period_dtype(self):
raise
|
integer_op_not_supported(self)
|
pandas._libs.tslibs.c_timestamp.integer_op_not_supported
|
import pandas as pd
import numpy as np
data=
|
pd.read_csv('england-premier-league-players-2018-to-2019-stats.csv')
|
pandas.read_csv
|
import boto3
import pandas as pd
import matplotlib.pyplot as plt
def getIexKey():
return '<KEY>'
def getTickerList():
return [
'JPM',
'BAC',
'C',
'WFC',
'GS',
]
def getTickerListFlatten(tickerList: []):
# Create an empty string called `ticker_string` that we'll add tickerList and commas to
s = ''
# Loop through every element of `tickerList` and add them and a comma to ticker_string
for tickerElement in tickerList:
s += tickerElement
s += ','
# Drop the last comma from `ticker_string`
s = s[:-1]
return s
def getHttpRequestString(ticker_string, endpoints, years, IEX_API_Key):
# Interpolate the endpoint strings into the HTTP_request string
return f'https://cloud.iexapis.com/stable/stock/market/batch?symbols={ticker_string}&types={endpoints}&range={years}y&cache=true&token={IEX_API_Key}'
def getSeriesList(bank_data, tickers: []):
# Create an empty list that we will append pandas Series of stock price data into
seriesList = []
# Loop through each of our tickerList and parse a pandas Series of their closing prices over the last 5 years
for tickerElement in tickers:
seriesList.append(pd.DataFrame(bank_data[tickerElement]['chart'])['close'])
# Add in a column of dates
seriesList.append(
|
pd.DataFrame(bank_data['JPM']['chart'])
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
from sklearn.neighbors import DistanceMetric
import networkx as nx
from itertools import combinations
def undo_PCA(x, pca, pca_comp):
mu = np.mean(x, axis=0)
xhat = np.dot(pca.transform(x), pca_comp)
xhat += mu
print(xhat.shape)
return xhat.T
def emb2exp(semb, gemb, semb_bias, gemb_bias):
x = np.dot(semb, gemb.T)
x += semb_bias
x += gemb_bias.T
print(x.shape)
return x
def plot_samp_3dPCA(samp_pca, cats, cat2col,
subset_idxs=[], showcat=False, showlegend=True,
alpha=0.1, s=25, fs=(20,20)):
if len(subset_idxs)==0:
X = samp_pca[0]
Y = samp_pca[1]
Z = samp_pca[2]
else:
X = samp_pca[0][subset_idxs]
Y = samp_pca[1][subset_idxs]
Z = samp_pca[2][subset_idxs]
colors = [cat2col[c] for c in cats]
fig = plt.figure(figsize=fs)
ax = fig.gca(projection='3d')
ax.scatter(X, Y, Z, c=colors, s=s, alpha=alpha)
if showcat:
for x, y, z, c in zip(X, Y, Z, cats): ax.text(x, y, z, c, fontsize=8)
if showlegend:
proxies = []
for c in cat2col: proxies.append(plt.Rectangle((0, 0), 1, 1, fc=cat2col[c]))
ax.legend(proxies, list(set(list(cat2col))), numpoints = 1)
ax.set_xlabel('PC1')
ax.set_ylabel('PC2')
ax.set_zlabel('PC3')
plt.show()
def plot_gene_3dPCA(gene_pca, genelist,
hl_idxs=[], hl_cols=['r', 'g', 'b'],
showhlgene=True, showbg=True,
bgcol=(0.5,0.5,0.5), bgs=30, bgalpha=0.1,
hlfs=10, hls=30, hlalpha=0.8, fs=(20,20)):
X = gene_pca[0]
Y = gene_pca[1]
Z = gene_pca[2]
fig = plt.figure(figsize=fs)
ax = fig.gca(projection='3d')
if showbg: ax.scatter(X, Y, Z, c=(0.5,0.5,0.5), s=bgs, alpha=bgalpha)
for i, hl in enumerate(hl_idxs):
for idx in hl:
if showhlgene: ax.text(X[idx],Y[idx],Z[idx],genelist[idx], color=hl_cols[i], fontsize=hlfs)
ax.scatter(X[idx],Y[idx],Z[idx], c=hl_cols[i], s=hls, alpha=hlalpha)
ax.set_xlabel('PC1')
ax.set_ylabel('PC2')
ax.set_zlabel('PC3')
plt.show()
def find_centroid(emb, sid2ca, sids, cancer):
idxs = [i for i,s in enumerate(sids) if sid2ca[s]==cancer]
arr = np.array([emb[i] for i in idxs])
return np.mean(arr, axis=0)
def print_gdist(g1, g2, dist, gene2idx):
print(dist[gene2idx[g1]][gene2idx[g2]])
def get_emb_dist(emb, return_pd=True, index=[], distmetric='euclidean', masked=False, maskval=10):
dist = DistanceMetric.get_metric(distmetric)
emb_dist = np.absolute(dist.pairwise(emb))
if masked:
utri = np.triu_indices(len(emb_dist))
emb_dist_masked = emb_dist
emb_dist_masked[utri] = maskval
res = emb_dist_masked
else:
res = emb_dist
if return_pd: res = pd.DataFrame(res, index=index, columns=index)
print('shape: %s; mean: %.3f; std: %.3f' % (str(emb_dist.shape), emb_dist.mean(), emb_dist.std()))
mean = np.mean(emb_dist, axis=0)
std = np.std(emb_dist, axis=0)
return res, mean, std
def n_closest_nbs(dist, gene, n=10):
n += 1
arr = np.array(dist[gene])
nb_idx = np.argpartition(arr, n)[:n]
tdf = pd.DataFrame(dist[gene][nb_idx]).T.assign(parent=gene)
mdf =
|
pd.melt(tdf, id_vars='parent', var_name='child', value_name='l2dist')
|
pandas.melt
|
import pandas as pd
import numpy as np
import glob
import os
import math
import re
def _date_parser(pattern):
def f(date):
try:
parsed = pd.datetime.strptime(date, pattern)
except Exception as e:
# the first 4 or 8 lines (header) and the last line won't be parsed
parsed = ''
return parsed
return f
def _caption_date_parser(col):
return np.vectorize(_date_parser('%Y%m%d%H%M%S.%f'))(col)
def _p_boundary_date_parser(date_A, date_B):
date = '{} {}'.format(date_A, date_B)
return _date_parser('%m/%d/%y %H:%M:%S')(date)
def _load_csv_file(path, sep, names, parse_dates, date_parser, dtype=None):
try:
df = pd.read_csv(path, sep=sep, header=None, names=names,
parse_dates=parse_dates, date_parser=date_parser,
dtype=dtype)
return df
except Exception as e:
# file may not exist
# file may exist but may be empty
print(e)
return pd.DataFrame()
def _load_single_caption_file(path, keep_story_boundary=False):
col_names = ['t_start', 't_end', 'marker', 'caption']
df = _load_csv_file(path, sep='|', names=col_names,
parse_dates=['t_start', 't_end'],
date_parser=_caption_date_parser,
dtype={'t_start':str, 't_end':str})
metadata = annotate_file(path)
if not df.empty:
# cleanse column data here
df.drop(['marker', 't_end'], axis=1, inplace=True)
df = df.dropna()
df = df.reset_index(drop=True)
if not keep_story_boundary and not df['caption'].empty:
df = _remove_story_boundary_annotation(df)
# story boundaries removed as ASR-generated caption files won't have these
return (df, metadata)
def _load_single_program_boundary_file(path):
col_names = ['vcr_i', 'recording_date', 'vcr_i_2', 'recording_date_2',
't_start', 't_end', 't_schedule', 'program_name']
df = _load_csv_file(path, sep=r'\s+', names=col_names,
parse_dates={'t_program_boundary': ['recording_date', 't_start']},
date_parser=_p_boundary_date_parser)
# cleanse column data here
cols_to_drop = ['vcr_i', 'vcr_i_2', 'recording_date_2',
't_end', 't_schedule', 'program_name']
df.drop(cols_to_drop, axis=1, inplace=True)
df.dropna()
df = df.reset_index(drop=True)
metadata = annotate_file(path)
return (df, metadata)
def annotate_file(path):
base = os.path.basename(path)
filename, ext = os.path.splitext(base)
filename_components = re.split('_|-', filename)
# filename_components = filename.split('_')
if len(filename_components) == 11:
metadata = {
'filename': filename,
'filetype': ext,
'recording_end_date': '_'.join(filename_components[0:3]),
'vcr_index': filename_components[6]
}
else:
print('corrupted filename: {}'.format(filename))
# corrupted file name
metadata = {
'filename': filename,
'filetype': ext,
'recording_end_date': '',
'vcr_index': ''
}
return metadata
def load_files(root_path, file_extension='txt3', recursive_search=True, keep_story_boundary=False):
if not file_extension in ['txt3', 'cuts']:
raise Exception('UnsupportedDataType')
if root_path.endswith(file_extension):
if file_extension == 'txt3':
yield _load_single_caption_file(root_path, keep_story_boundary=keep_story_boundary)
else:
# TODO: caption filwee
yield _load_single_program_boundary_file(root_path)
else:
if not root_path.endswith('/'):
root_path += '/'
root_path += '**/*.{}'.format(file_extension)
filepaths = glob.iglob(root_path, recursive=recursive_search)
if file_extension == 'txt3':
for path in filepaths:
yield _load_single_caption_file(path, keep_story_boundary=keep_story_boundary)
else:
for path in filepaths:
yield _load_single_program_boundary_file(path)
def load_caption_files(root_path, recursive_search=True, keep_story_boundary=False):
'''[summary]
[description]
Returns:
[description]
[type]
'''
return load_files(root_path, file_extension='txt3', recursive_search=recursive_search, keep_story_boundary=keep_story_boundary)
def load_program_boundary_files(root_path, recursive_search=True):
'''[summary]
[description]
Returns:
[description]
[type]
'''
return load_files(root_path, file_extension='cuts', recursive_search=recursive_search)
def _remove_story_boundary_annotation(caption):
is_story_boundary = caption['caption'].str.contains('type=', flags=re.IGNORECASE)
return caption[~is_story_boundary]
def find_y_path_from_X_filename(X_filename, search_root_path):
if search_root_path.endswith('/'):
search_root_path = search_root_path[:-1]
pattern = '{}/**/{}.{}'.format(search_root_path, X_filename, 'cuts')
paths = glob.glob(pattern, recursive=True)
if len(paths) > 1:
print('duplicate files with matching caption filename')
return paths[0]
def split_video_to_clips(start_time, end_time, interval):
pass
def split_audio_to_clips(start_time, end_time, interval):
pass
def split_caption_to_X(caption, interval=10):
'''[summary]
[description]
Args:
caption: [consists of lines]
interval: [seconds] (default: {10})
Returns:
[description]
[type]
'''
freq = '{}s'.format(interval)
grouper =
|
pd.Grouper(key='t_start',freq=freq)
|
pandas.Grouper
|
#!/usr/bin/env python
# coding: utf-8
# 1 Import libraries and Set path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
import scipy.stats as scs
from scipy.stats.mstats import winsorize
from scipy.stats.mstats import gmean
from tabulate import tabulate
# 2 Set path of my sub-directory
from pathlib import Path
# key in your own file path below
myfolder = Path('key in your own file path here')
# 3 Set up files to write output and charts
from matplotlib.backends.backend_pdf import PdfPages
outfile = open('output.txt', 'w')
chartfile = PdfPages('chart-retreg.pdf')
# Stock returns data
# 4 Read Compustat monthly stock returns data
df1 = pd.read_csv(my-folder / 'stock-returns.csv', parse_dates = ['datadate'])
df1 = df1.sort_values(by=['gvkey','datadate'])
df1 = df1.dropna()
# 5 Create portfolio formation year (pfy) variable, where
# pfy = current year for Jul-Dec dates and previous year for Jan-Jun dates.
# This is to facilitate compounding returns over Jul-Jun by pfy later below.
df1['year'], df1['month'] = df1['datadate'].dt.year, df1['datadate'].dt.month
df1['pfy'] = np.where(df1.month > 6, df1.year, df1.year - 1)
# 6 Compute monthly return compounding factor (1+monthly return)
# trt1m is the monthly return, express as percentage, need to convert to % by / 100
df1['mretfactor'] = 1 + df1.trt1m/100
df1 = df1.sort_values(by=['gvkey','pfy'])
df2 = df1[['gvkey', 'conm', 'datadate', 'pfy', 'mretfactor']]
# 7 Compound monthly returns to get annual returns at end-June of each pfy,
# ensuring only firm-years with 12 mths of return data from Jul-Jun are selected.
df2['yret'] = df2.groupby(['gvkey', 'pfy'])['mretfactor'].cumprod() - 1
df3 = df2.groupby(['gvkey', 'pfy']).nth(11)
df3['yret'] = winsorize(df3['yret'], limits=[0.025,0.025])
df3 = df3.drop(['mretfactor'], axis=1) # "axis=1" means to drop column
# Accounting data
# 8 Read Compustat accounting data
df4 = pd.read_csv(myfolder / 'accounting-data2.csv', parse_dates = ['datadate'])
df4 = df4.sort_values(by=['gvkey','datadate'])
# 9 Create portfolio formation year (pfy) variable, portfolio formation in April where
# pfy = current year for Jan-Mar year-end dates and next year for Apr-Dec year-end dates.
# This is to facilitate compounding returns over July-June by pfy below.
# dt.year is pandas method to extract year from 'datadate' variable
# dt.month is pandas method to extract month from 'datadate' variable
df4['year'], df4['month'] = df4['datadate'].dt.year, df4['datadate'].dt.month
df4['pfy'] = np.where(df4.month < 4, df4.year, df4.year + 1)
# 10 Compute accounting variables from Compustat data, keep relevant variables, delete missing values
# Profitability
df4['ROA'] = df4['ni'] / df4['at']
df4['ROA_prev'] = df4.groupby('gvkey')['ROA'].shift(1)
# Leverage
df4['Leverage_ratio'] = df4['dltt'] / df4['seq']
df4['Leverage_ratio_prev'] = df4.groupby('gvkey')['Leverage_ratio'].shift(1)
df4['Current_ratio'] = df4['act'] / df4['lct']
df4['Current_ratio_prev'] = df4.groupby('gvkey')['Current_ratio'].shift(1)
df4['csho_prev'] = df4.groupby('gvkey')['csho'].shift(1)
df4['Shares_issued'] = df4['csho'] - df4['csho_prev']
# Operating
df4['GP_margin'] = df4['gp'] / df4['revt']
df4['GP_margin_prev'] = df4.groupby('gvkey')['GP_margin'].shift(1)
df4['at_prev'] = df4.groupby('gvkey')['at'].shift(1)
df4['at_average']= (df4['at'] + df4['at_prev'])/2
df4['Asset_TO'] = df4['revt'] / df4['at_average']
df4['Asset_TO_prev'] = df4.groupby('gvkey')['Asset_TO'].shift(1)
df4['GP_profitability'] = df4['gp']/df4['at']
df4 = df4[['ib', 'gvkey', 'pfy', 'ni', 'oancf', 'mkvalt', 'gsector', 'ROA', 'ROA_prev', 'Leverage_ratio', 'Leverage_ratio_prev', 'Current_ratio',
'Current_ratio_prev', 'csho_prev', 'Shares_issued', 'GP_margin', 'GP_margin_prev', 'at_prev',
'at_average', 'Asset_TO', 'Asset_TO_prev', 'GP_profitability' ]]
df4 = df4[np.isfinite(df4)]
df4 = df4.dropna()
# 11 EDA before winsorize
dfeda = df4[['ROA', 'ROA_prev', 'oancf', 'ib', 'Leverage_ratio', 'Current_ratio', 'Shares_issued',
'GP_margin', 'Asset_TO', 'mkvalt', 'ni']]
dfeda['PE'] = dfeda['mkvalt'] / dfeda['ni']
dfeda['CROA'] = dfeda['ROA'] - dfeda['ROA_prev']
dfeda['Cquality'] = np.where(dfeda['oancf']> dfeda['ib'], 1, 0)
dfeda2 = dfeda[['ROA', 'oancf', 'CROA', 'Cquality', 'Leverage_ratio', 'Current_ratio', 'Shares_issued',
'GP_margin', 'Asset_TO', 'PE']]
print('EDA before winsorize \n\n', dfeda2.describe(), '\n'*5, file=outfile)
# 12 Winsorize variables at 2.5% of left and right tails
for var in ['ib', 'ni', 'oancf', 'mkvalt', 'ROA', 'ROA_prev', 'Leverage_ratio', 'Leverage_ratio_prev', 'Current_ratio',
'Current_ratio_prev', 'csho_prev', 'Shares_issued', 'GP_margin', 'GP_margin_prev', 'at_prev',
'at_average', 'Asset_TO', 'Asset_TO_prev', 'GP_profitability']:
df4[var] = winsorize(df4[var], limits=[0.025,0.025])
# 13 EDA after winsorize
dfeda3 = df4[['ROA', 'ROA_prev', 'oancf', 'ib', 'Leverage_ratio', 'Current_ratio', 'Shares_issued',
'GP_margin', 'Asset_TO', 'mkvalt', 'ni']]
dfeda3['PE'] = dfeda3['mkvalt'] / dfeda3['ni']
dfeda3['CROA'] = dfeda3['ROA'] - dfeda3['ROA_prev']
dfeda3['Cquality'] = np.where(dfeda3['oancf']> dfeda3['ib'], 1, 0)
dfeda4 = dfeda3[['ROA', 'oancf', 'CROA', 'Cquality', 'Leverage_ratio', 'Current_ratio', 'Shares_issued',
'GP_margin', 'Asset_TO', 'PE']]
print('EDA after winsorize \n\n', dfeda4.describe(), '\n'*5, file=outfile)
# Merge Stock returns data with Accounting data
# 14 Merge accounting dataset (df4) with returns dataset (df3)
# "inner" means to merge only observations that have data in BOTH datasets
df5 = pd.merge(df3, df4, how='inner', on=['gvkey', 'pfy'])
df5 = df5[['ib', 'gvkey', 'conm', 'pfy', 'yret', 'ni', 'mkvalt', 'oancf', 'gsector', 'ROA', 'ROA_prev', 'Leverage_ratio', 'Leverage_ratio_prev', 'Current_ratio',
'Current_ratio_prev', 'csho_prev', 'Shares_issued', 'GP_margin', 'GP_margin_prev', 'at_prev',
'at_average', 'Asset_TO', 'Asset_TO_prev', 'GP_profitability']]
# Compute F-score
# 15 Compute 9 F-score ratios
# Profitability
df5['F_income'] = np.where(df5['ROA']> 0, 1, 0)
df5['F_opcash'] = np.where(df5['oancf']> 0, 1, 0)
df5['F_ROA'] = np.where(df5['ROA']>df5['ROA_prev'], 1, 0)
df5['F_quality'] = np.where(df5['oancf']> df5['ib'], 1, 0)
# Leverage
df5['F_leverage'] = np.where(df5['Leverage_ratio']< df5['Leverage_ratio_prev'], 1, 0)
df5['F_currentratio'] = np.where(df5['Current_ratio']> df5['Current_ratio_prev'], 1, 0)
df5['F_dilute'] = np.where(df5['Shares_issued']< 0 , 1, 0)
# Operating
df5['F_GPM'] = np.where(df5['GP_margin']< df5['GP_margin_prev'], 1, 0)
df5['F_ATO'] = np.where(df5['Asset_TO']< df5['Asset_TO_prev'], 1, 0)
# 16 Group F-score based on categories
df5['F-profitability'] = df5['F_income'] + df5['F_opcash'] + df5['F_ROA'] + df5['F_quality']
df5['F_leverage_liquidity'] = df5['F_leverage'] + df5['F_currentratio'] + df5['F_dilute']
df5['F_operating'] = df5['F_GPM'] + df5['F_ATO']
df5['F_score'] = df5['F-profitability'] + df5['F_leverage_liquidity'] + df5['F_operating']
# Long Portfolio
# 17 Filter out F_score more than 7
df6 = df5[df5.F_score > 7]
# 18 Average PE per pfy per gsector
df6['PE'] = df6['mkvalt'] / df6['ni']
df7 = df6.groupby(['pfy','gsector'], as_index=False)['PE'].mean()
# 19 Filter for stocks with PE lower than gsector average
df8 = df6.merge(df7, on = ['pfy','gsector'], how='left')
df8['y_x'] = df8['PE_y'] - df8['PE_x']
df11 = df8[df8['y_x'] > 0]
# 20 Finding the number of unique company/gvkey in our long portfolio
df12 = df11['gvkey'].unique()
# 21 Mean yret of each pfy
df23 = pd.DataFrame(df11.groupby(['pfy'], as_index=False)['yret'].mean())
df23.rename(columns={'yret':'pyret'}, inplace = True)
# 22 add pfy count number
df24 = df11.groupby(['pfy'], as_index=False)['yret'].count()
df25 = pd.merge(df23, df24, how='inner', on=['pfy'])
df25.rename(columns={'yret':'count'}, inplace = True)
# 23 Compute yearly return compounding factor (1+yearly return)
df25['ppyret'] = df25['pyret'] + 1
# Risk free rate
# 24 Calculate risk free rate using UStreasury 1month
import quandl
from datetime import datetime
# Key in your quandl api key below
QUANDL_API_KEY = 'key in your quandl api key here'
quandl.ApiConfig.api_key = QUANDL_API_KEY
start = datetime(2002, 1, 1)
end = datetime(2020, 12, 31)
rf = quandl.get('USTREASURY/YIELD.1',start_date=start, end_date=end)
risk_free = rf['1 MO']
rfr = risk_free.mean()/100
# 25 Annualise the total return, based on average and total
Lportfolio_annualised_return_rut = scs.gmean(df25.loc[:,"ppyret"])-1
# 26 Calculate annualized volatility from the standard deviation
Lportfolio_vola_rut = np.std(df25['pyret'], ddof=1)
# 27 Calculate the Sharpe ratio
Lportfolio_sharpe_rut = ((Lportfolio_annualised_return_rut - rfr)/ Lportfolio_vola_rut)
# 28 Define negative returns and compute standard deviation
Lportfolio_negative_ret_rut = df25.loc[df25['pyret'] < 0]
Lportfolio_expected_ret_rut = np.mean(df25['pyret'])
Lportfolio_downside_std_rut = Lportfolio_negative_ret_rut['pyret'].std()
# 29 Compute Sortino Ratio
Lportfolio_sortino_rut = (Lportfolio_expected_ret_rut - rfr)/Lportfolio_downside_std_rut
# 30 Compute Worst and Best pfy return
Lpcolumn = df25["pyret"]
Lpmax_value = Lpcolumn.max()
Lpmin_value = Lpcolumn.min()
# 31 Compute % of profitable pfy
Lpprofitable_pfy = len(df25[df25['pyret']>0]['pyret'])/len(df25['pyret'])
# 32 Compute long portfolio monthly price
#Merge long portofio df11 with stock return to get monthly close price
col = ['pfy','gvkey']
df21 = df11[col]
df26 = pd.merge(df1, df21, how='inner', on=['gvkey', 'pfy'])
# Calculate long portfolio monthly price
df27 = df26.groupby(['pfy','month'], as_index=False)['prccm'].mean()
# 33 Compute max drawdown and duration
# Initialize variables: hwm (high watermark), drawdown, duration
lphwm = np.zeros(len(df27))
lpdrawdown = np.zeros(len(df27))
lpduration = 0
# 34 Determine maximum drawdown (maxDD)
for t in range(len(df27)):
lphwm[t] = max(lphwm[t-1], df27['prccm'][t])
lpdrawdown[t] = ((lphwm[t] - df27.prccm[t]) / lphwm[t]) * 100
lpmaxDD = lpdrawdown.max()
# 35 Determine maximum drawdown duration
# numpy.allclose compares whether two floating values are equal to the absolute
# tolerance (atol) precision (1e-8 is 1x10^-8)
for j in range(len(df27)):
if np.allclose(lpdrawdown[j], lpmaxDD, atol=1e-8):
for k in range(j):
if np.allclose(df27.prccm[k], lphwm[j], atol=1e-8):
lpduration = j - k
else:
continue
else:
continue
# Short portfolio
# 36 Filter out F_score less than 2
df28 = df5[df5.F_score < 2]
# 37 Average PE per pfy per gsector
df28['PE'] = df28['mkvalt'] / df28['ni']
df29 = df28.groupby(['pfy','gsector'], as_index=False)['PE'].mean()
# 38 Filter for stocks with PE lower than gsector average
df30 = df28.merge(df29, on = ['pfy','gsector'], how='left')
df30['y_x'] = df30['PE_y'] - df30['PE_x']
df33 = df30[df30['y_x'] > 0]
# 39 Finding the number of unique company/gvkey in our short portfolio
df34 = df33['gvkey'].unique()
# 40 Mean yret of each pfy
df37 = pd.DataFrame(df33.groupby(['pfy'], as_index=False)['yret'].mean())
df37.rename(columns={'yret':'pyret'}, inplace = True)
# 41 add pfy count number
df38 = df33.groupby(['pfy'], as_index=False)['yret'].count()
df39 = pd.merge(df37, df38, how='inner', on=['pfy'])
df39.rename(columns={'yret':'count'}, inplace = True)
# 42 Reverse return sign due to short portfolio
df39['spyret'] = df39['pyret'] * -1
# 43 Compute yearly return compounding factor (1+yearly return)
df39['sppyret'] = df39['spyret'] + 1
# 44 Annualise the total return, based on average and total
Sportfolio_annualised_return_rut = scs.gmean(df39.loc[:,"sppyret"])-1
# 45 Calculate annualized volatility from the standard deviation
Sportfolio_vola_rut = np.std(df39['spyret'], ddof=1)
# 46 Calculate the Sharpe ratio
Sportfolio_sharpe_rut = ((Sportfolio_annualised_return_rut - rfr)/ Sportfolio_vola_rut)
# 47 Define negative returns and compute standard deviation
Sportfolio_negative_ret_rut = df39.loc[df39['spyret'] < 0]
Sportfolio_expected_ret_rut = np.mean(df39['spyret'])
Sportfolio_downside_std_rut = Sportfolio_negative_ret_rut['spyret'].std()
# 48 Compute Sortino Ratio
Sportfolio_sortino_rut = (Sportfolio_expected_ret_rut - rfr)/Sportfolio_downside_std_rut
# 49 Compute Worst and Best pfy return
Spcolumn = df39["spyret"]
Spmax_value = Spcolumn.max()
Spmin_value = Spcolumn.min()
# 50 Compute % of profitable pfy
Spprofitable_pfy = len(df39[df39['spyret']>0]['spyret'])/len(df39['spyret'])
# 51 Compute short portfolio monthly price
# Prepare the short portofio df11 to merge with yahoo finance data
col = ['pfy','gvkey']
df40 = df33[col]
# Merge short portofio df33 with stock return to get monthly close price
df41 = pd.merge(df1, df40, how='inner', on=['gvkey', 'pfy'])
# Calculate short portfolio monthly price
df42 = df41.groupby(['pfy','month'], as_index=False)['prccm'].mean()
# 52 Compute max drawdown and duration
# Initialize variables: hwm (high watermark), drawdown, duration
sphwm = np.zeros(len(df42))
spdrawdown = np.zeros(len(df42))
spduration = 0
# 53 Determine maximum drawdown (maxDD)
for t in range(len(df42)):
sphwm[t] = max(sphwm[t-1], df42['prccm'][t])
spdrawdown[t] = ((sphwm[t] - df42.prccm[t]) / sphwm[t]) * 100
spmaxDD = spdrawdown.max()
# 54 Determine maximum drawdown duration
# numpy.allclose compares whether two floating values are equal to the absolute
# tolerance (atol) precision (1e-8 is 1x10^-8)
for j in range(len(df42)):
if np.allclose(spdrawdown[j], spmaxDD, atol=1e-8):
for k in range(j):
if np.allclose(df42.prccm[k], sphwm[j], atol=1e-8):
spduration = j - k
else:
continue
else:
continue
# Long & Short Portfolio
# 55 Merge long and short portofio
df43 = df25[['pfy','pyret']]
df44 = df39[['pfy','spyret']]
df45 = pd.merge(df43, df44, how='inner', on=['pfy'])
# 56 compute long short return
df45['lspyret'] = df45['pyret']/2 + df45['spyret']/2
# Compute yearly return compounding factor (1+yearly return)
df45['lsppyret'] = df45['lspyret'] + 1
# 57 Annualise the total return, based on average and total
LSportfolio_annualised_return_rut = scs.gmean(df45.loc[:,"lsppyret"])-1
# 58 Calculate annualized volatility from the standard deviation
LSportfolio_vola_rut = np.std(df45['lspyret'], ddof=1)
# 59 Calculate the Sharpe ratio
LSportfolio_sharpe_rut = ((LSportfolio_annualised_return_rut - rfr)/ LSportfolio_vola_rut)
# 60 Define negative returns and compute standard deviation
LSportfolio_negative_ret_rut = df45.loc[df45['lspyret'] < 0]
LSportfolio_expected_ret_rut = np.mean(df45['lspyret'])
LSportfolio_downside_std_rut = LSportfolio_negative_ret_rut['lspyret'].std()
# 61 Compute Sortino Ratio
LSportfolio_sortino_rut = (LSportfolio_expected_ret_rut - rfr)/LSportfolio_downside_std_rut
# 62 Compute Worst and Best pfy return
LSpcolumn = df45["lspyret"]
LSpmax_value = LSpcolumn.max()
LSpmin_value = LSpcolumn.min()
# 63 Compute % of profitable pfy
LSpprofitable_pfy = len(df45[df45['lspyret']>0]['lspyret'])/len(df45['lspyret'])
# 64 Merge long and short portofio monthly price
df46 = pd.merge(df27, df42, how='inner', on=['pfy', 'month'])
df46['lsprccm'] = df46['prccm_x']/2 + df46['prccm_y']/2
# 65 Compute max drawdown and duration
# Initialize variables: hwm (high watermark), drawdown, duration
lsphwm = np.zeros(len(df46))
lspdrawdown = np.zeros(len(df46))
lspduration = 0
# 66 Determine maximum drawdown (maxDD)
for t in range(len(df46)):
lsphwm[t] = max(lsphwm[t-1], df46['lsprccm'][t])
lspdrawdown[t] = ((lsphwm[t] - df46.lsprccm[t]) / lsphwm[t]) * 100
lspmaxDD = lspdrawdown.max()
# 67 Determine maximum drawdown duration
# numpy.allclose compares whether two floating values are equal to the absolute
# tolerance (atol) precision (1e-8 is 1x10^-8)
for j in range(len(df46)):
if np.allclose(lspdrawdown[j], lspmaxDD, atol=1e-8):
for k in range(j):
if np.allclose(df46.lsprccm[k], lsphwm[j], atol=1e-8):
lspduration = j - k
else:
continue
else:
continue
# Market return
# 68 Monthly return of Russell 3000
rut = pd.read_csv(myfolder / '^RUA.csv', parse_dates=['Date'])
rut['rutret'] = rut.sort_values(by='Date')['Adj Close'].pct_change()
# 69 Create portfolio formation year (pfy) variable, where
# pfy = current year for Jul-Dec dates and previous year for Jan-Jun dates.
# This is to facilitate compounding returns over Jul-Jun by pfy later below.
rut['year'], rut['month'] = rut['Date'].dt.year, rut['Date'].dt.month
rut['pfy'] = np.where(rut.month > 6, rut.year, rut.year - 1)
rut
# 70 Compute monthly return compounding factor (1+monthly return)
rut['mretfactor'] = 1 + rut.rutret
rut2 = rut[['Date','Adj Close','rutret', 'pfy', 'mretfactor']]
# 71 Compound monthly returns to get annual returns at end-June of each pfy,
# ensuring only firm-years with 12 mths of return data from Jul-Jun are selected.
rut2['rutyret'] = rut2.groupby(['pfy'])['mretfactor'].cumprod() - 1
rut3 = rut2.groupby(['pfy']).nth(11)
# 72 Compute yearly return compounding factor (1+yearly return)
rut3['rrutyret'] = rut3['rutyret'] + 1
# 73 Compute Returns, Sharpe and Sortino ratio
# 74 Compute monthly stock returns from price data
rut4 = rut3[['Date', 'Adj Close','rutyret']]
rut4 = rut3.rename(columns = {'Adj Close': 'price'})
# 75 Annualise the total return, based on average and total
annualised_return_rut = scs.gmean(rut3.loc[:,"rrutyret"])-1
# 76 Calculate annualized volatility from the standard deviation
vola_rut = np.std(rut4['rutyret'], ddof=1)
# 77 Calculate the Sharpe ratio
sharpe_rut = ((annualised_return_rut - rfr)/ vola_rut)
# 78 Define negative returns and compute standard deviation
negative_ret_rut = rut4.loc[rut4['rutyret'] < 0]
expected_ret_rut = np.mean(rut4['rutyret'])
downside_std_rut = negative_ret_rut['rutyret'].std()
# 79 Compute Sortino Ratio
sortino_rut = (expected_ret_rut - rfr)/downside_std_rut
# 80 Compute Worst and Best pfy return
rcolumn = rut4["rutyret"]
rmax_value = rcolumn.max()
rmin_value = rcolumn.min()
# 81 Compute % of profitable pfy
rprofitable_pfy = len(rut4[rut4['rutyret']>0]['rutyret'])/len(rut4['rutyret'])
# Compute Max drawdown and duration
# 82 Rename to price
rut5 = rut2.rename(columns = {'Adj Close': 'price'})
# 83 Initialize variables: hwm (high watermark), drawdown, duration
rhwm = np.zeros(len(rut5))
rdrawdown = np.zeros(len(rut5))
rduration = 0
# 84 Determine maximum drawdown (maxDD)
for t in range(len(rut5)):
rhwm[t] = max(rhwm[t-1], rut5['price'][t])
rdrawdown[t] = ((rhwm[t] - rut5.price[t]) / rhwm[t]) * 100
rmaxDD = rdrawdown.max()
# 85 Determine maximum drawdown duration
# numpy.allclose compares whether two floating values are equal to the absolute
# tolerance (atol) precision (1e-8 is 1x10^-8)
for j in range(len(rut5)):
if np.allclose(rdrawdown[j], rmaxDD, atol=1e-8):
for k in range(j):
if np.allclose(rut5.price[k], rhwm[j], atol=1e-8):
rduration = j - k
else:
continue
else:
continue
# Investment peformance
# 86 Plot Portfolio and Russell 3000 Returns
rut6 = rut4.drop(['Date', 'price', 'rutret', 'mretfactor', 'rrutyret'], axis=1) # "axis=1" means to drop column
df47 = df45.iloc[: , :-1]
df48 = pd.merge(df47, rut6, how='inner', on=['pfy'])
df48.rename(columns={'pyret':'Long Portfolio', 'spyret':'Short Portfolio', 'lspyret':'Long Short Portfolio','rutyret':'Market Index'}, inplace = True)
df48_plot = pd.melt(df48,id_vars='pfy', var_name='Returns',value_name='returns')
fig, ax = plt.subplots(figsize=(8,6))
ax = sns.lineplot(data=df48_plot, x='pfy', y='returns', hue='Returns')
ax.set(xlabel = 'pfy', ylabel = 'Returns')
ax.set_title('Plot of Portfolio and Russell 3000 Returns')
plt.show()
chartfile.savefig(fig)
# 87 Calculate market Wealth Index
#rut7 = rut.drop(['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'rutret', 'year', 'Adj Close'], axis=1)
rut3['RUT_WI'] = (rut3['rrutyret']).cumprod()
rut3 = rut3.reset_index()
rut8 = rut3.drop(['Date', 'Adj Close', 'rutret', 'mretfactor', 'rutyret', 'rrutyret'], axis=1)
# 88 Calculate long portfolio Wealth Index
df25['P_WI'] = (df25['ppyret']).cumprod()
df49 = df25.drop(['pyret', 'count', 'ppyret'], axis=1)
# 89 Calculate short portfolio Wealth Index
df39['S_WI'] = (df39['sppyret']).cumprod()
df50 = df39.drop(['pyret', 'count', 'spyret', 'sppyret'], axis=1)
# 90 Calculate long short portfolio Wealth Index
df45['LS_WI'] = (df45['lsppyret']).cumprod()
df52 = df45.drop(['pyret', 'spyret', 'lspyret', 'lsppyret'], axis=1)
# 91 Plot Portfolio and Russell 3000 Wealth Index Line plot
df53 = pd.merge(df49, df50, how='right', on=['pfy'])
df54 = pd.merge(df53, df52, how='left', on=['pfy'])
df55 = pd.merge(df54, rut8, how='left', on=['pfy'])
df55.rename(columns={'P_WI':'Long Portfolio WI', 'S_WI':'Short Portfolio WI', 'LS_WI':'Long Short Portfolio WI','RUT_WI':'Market Index WI'}, inplace = True)
df55_plot =
|
pd.melt(df55,id_vars='pfy', var_name='Wealth Index',value_name='wealth index')
|
pandas.melt
|
import re
import pandas as pd
import numpy as np
from ._core import CoreReader
from typing import Union
from softnanotools.logger import Logger
logger = Logger(__name__)
class LAMMPSDataReader(CoreReader):
def __init__(
self,
fname: str,
names: Union[dict, list, tuple] = None,
species: dict = None,
classes: Union[dict, list, tuple] = None,
style: str ='bond',
configure: bool = True,
**kwargs
):
super().__init__()
self.style = style
self.fname = fname
if names == None:
self.names = None
else:
self.names = names
self.species = species
if classes == None:
self.classes = [None]
else:
self.classes = classes
self._read()
if configure:
self.configure()
def _read(self):
"""Reads a LAMMPS Data File containing configuration and
topology information"""
box = {}
with open(self.fname, 'r') as f:
for i, line in enumerate(f.readlines()):
if re.findall("atoms", line):
n_atoms = int(line.split()[0])
elif re.findall("bonds", line):
n_bonds = int(line.split()[0])
elif re.findall("xlo xhi", line):
box['x'] = [float(j) for j in line.split()[:2]]
elif re.findall("ylo yhi", line):
box['y'] = [float(j) for j in line.split()[:2]]
elif re.findall("zlo zhi", line):
box['z'] = [float(j) for j in line.split()[:2]]
elif re.findall("Atoms", line):
skip_atoms = i + 1
elif re.findall("Bonds", line):
skip_bonds = i + 1
break
self.metadata['box'] = np.array([
([float(i) for i in box['x']][1] - [float(i) for i in box['x']][0]),
([float(i) for i in box['y']][1] - [float(i) for i in box['y']][0]),
([float(i) for i in box['z']][1] - [float(i) for i in box['z']][0]),
])
logger.debug(f'Box: {self.metadata["box"]}')
# manage styles for columns
columns = {
0: 'id',
1: 'mol',
2: 'type',
3: 'x',
4: 'y',
5: 'z',
}
if self.style == 'full':
columns = {
0: 'id',
1: 'mol',
2: 'type',
3: 'q',
4: 'x',
5: 'y',
6: 'z',
}
self.atoms = pd.read_csv(
self.fname,
delim_whitespace=True,
header=None,
nrows=n_atoms,
skiprows=skip_atoms,
).rename(columns=columns).sort_values('id').reset_index(drop=True)
logger.debug(f'ATOMS:\n{self.atoms}')
try:
assert len(self.atoms) == n_atoms
assert self.atoms['id'].iloc[0] == 1
assert self.atoms['id'].iloc[-1] == n_atoms
except:
logger.error('Assertion Error when importing Atoms')
self.bonds = pd.read_csv(
self.fname,
delim_whitespace=True,
header=None,
nrows=n_bonds,
skiprows=skip_bonds,
).rename(columns={
0: 'id',
1: 'type',
2: 'atom_1',
3: 'atom_2',
}).sort_values('id').reset_index(drop=True)
logger.debug(f'BONDS:\n{self.bonds}')
try:
assert len(self.bonds) == n_bonds
assert self.bonds['id'].iloc[0] == 1
assert self.bonds['id'].iloc[-1] == n_bonds
except:
logger.error('Assertion Error when importing Bonds')
def configure(self, simulation):
"""Adds positions and topologies to a ReaDDy simulation instance"""
mols = set(list(self.atoms['mol']))
for idx, i in enumerate(mols):
if isinstance(self.names, dict):
name = self.names[i]
cls = self.classes[i]
elif isinstance(self.names, (list, tuple)):
name = self.names[idx]
cls = self.classes[idx]
else:
name = i
cls = None
mol = self.atoms[self.atoms['mol']==i]
logger.debug(f"For molecule[{idx+1}] {name}:\n\nAtoms:\n{mol}")
sequence = mol['type'].apply(
lambda x: self.species[x] if self.species != None else x
)
positions = mol[['x', 'y', 'z']]
edges = []
if cls != None:
for j, row in self.bonds.iterrows():
if row['atom_1'] in mol['id']:
edges.append((row['atom_1']-1, row['atom_2']-1))
elif row['atom_2'] in mol['id']:
edges.append((row['atom_1']-1, row['atom_2']-1))
logger.debug(f"Edges:\n{
|
pd.DataFrame(edges)
|
pandas.DataFrame
|
import pytest
import pandas as pd
import numpy as np
from pandas.util.testing import assert_frame_equal, assert_index_equal
from pdblp import pdblp
import blpapi
import os
@pytest.fixture(scope="module")
def port(request):
return request.config.getoption("--port")
@pytest.fixture(scope="module")
def host(request):
return request.config.getoption("--host")
@pytest.fixture(scope="module")
def timeout(request):
return request.config.getoption("--timeout")
@pytest.fixture(scope="module")
def con(host, port, timeout):
return pdblp.BCon(host=host, port=port, timeout=timeout).start()
@pytest.fixture(scope="module")
def data_path():
return os.path.join(os.path.dirname(__file__), "data/")
def pivot_and_assert(df, df_exp, with_date=False):
# as shown below, since the raw data returned from bbg is an array
# with unknown ordering, there is no guruantee that the `position` will
# always be the same so pivoting prior to comparison is necessary
#
# fieldData = {
# INDX_MWEIGHT[] = {
# INDX_MWEIGHT = {
# Member Ticker and Exchange Code = "BON8"
# Percentage Weight = 2.410000
# }
# INDX_MWEIGHT = {
# Member Ticker and Exchange Code = "C N8"
# Percentage Weight = 6.560000
# }
# INDX_MWEIGHT = {
# Member Ticker and Exchange Code = "CLN8"
# Percentage Weight = 7.620000
# }
# }
# }
name_cols = list(df_exp.name.unique())
sort_cols = list(df_exp.name.unique())
index_cols = ["name", "position", "field", "ticker"]
if with_date:
sort_cols.append("date")
index_cols.append("date")
df = (df.set_index(index_cols).loc[:, "value"]
.unstack(level=0).reset_index().drop(columns="position")
.sort_values(by=sort_cols, axis=0))
df_exp = (df_exp.set_index(index_cols).loc[:, "value"]
.unstack(level=0).reset_index().drop(columns="position")
.sort_values(by=sort_cols, axis=0))
# deal with mixed types resulting in str from csv read
for name in name_cols:
try:
df_exp.loc[:, name] = df_exp.loc[:, name].astype(float)
except ValueError:
pass
for name in name_cols:
try:
df.loc[:, name] = df.loc[:, name].astype(float)
except ValueError:
pass
if with_date:
df.loc[:, "date"] = pd.to_datetime(df.loc[:, "date"],
format="%Y%m%d")
df_exp.loc[:, "date"] = pd.to_datetime(df_exp.loc[:, "date"],
format="%Y%m%d")
assert_frame_equal(df, df_exp)
ifbbg = pytest.mark.skipif(pytest.config.cache.get('offline', False),
reason="No BBG connection, skipping tests")
@ifbbg
def test_bdh_empty_data_only(con):
df = con.bdh(
tickers=['1437355D US Equity'],
flds=['PX_LAST', 'VOLUME'],
start_date='20180510',
end_date='20180511',
longdata=False
)
df_exp = pd.DataFrame(
[], index=pd.DatetimeIndex([], name='date'),
columns=pd.MultiIndex.from_product([[], []],
names=('ticker', 'field'))
)
assert_frame_equal(df, df_exp)
@ifbbg
def test_bdh_empty_data_with_non_empty_data(con):
df = con.bdh(
tickers=['AAPL US Equity', '1437355D US Equity'],
flds=['PX_LAST', 'VOLUME'],
start_date='20180510',
end_date='20180511',
longdata=False
)
df_exp = pd.DataFrame(
[[190.04, 27989289.0], [188.59, 26212221.0]],
index=pd.DatetimeIndex(["20180510", "20180511"], name="date"),
columns=pd.MultiIndex.from_product([["AAPL US Equity"],
["PX_LAST", "VOLUME"]],
names=["ticker", "field"])
)
assert_frame_equal(df, df_exp)
@ifbbg
def test_bdh_partially_empty_data(con):
df = con.bdh(
tickers=['XIV US Equity', 'AAPL US Equity'],
flds=['PX_LAST'],
start_date='20180215',
end_date='20180216',
longdata=False
)
df_exp = pd.DataFrame(
[[6.04, 172.99], [np.NaN, 172.43]],
index=pd.DatetimeIndex(["20180215", "20180216"], name="date"),
columns=pd.MultiIndex.from_product(
[["XIV US Equity", "AAPL US Equity"], ["PX_LAST"]],
names=["ticker", "field"]
)
)
assert_frame_equal(df, df_exp)
@ifbbg
def test_bdh_one_ticker_one_field_pivoted(con):
df = con.bdh('SPY US Equity', 'PX_LAST', '20150629', '20150630')
midx = pd.MultiIndex(levels=[["SPY US Equity"], ["PX_LAST"]],
labels=[[0], [0]], names=["ticker", "field"])
df_expect = pd.DataFrame(
index=pd.date_range("2015-06-29", "2015-06-30"),
columns=midx,
data=[205.42, 205.85]
)
df_expect.index.names = ["date"]
assert_frame_equal(df, df_expect)
@ifbbg
def test_bdh_one_ticker_one_field_longdata(con):
df = con.bdh('SPY US Equity', 'PX_LAST', '20150629', '20150630',
longdata=True)
idx = pd.Index(["date", "ticker", "field", "value"])
data = [["2015-06-29", "2015-06-30"],
["SPY US Equity", "SPY US Equity"], ["PX_LAST", "PX_LAST"],
[205.42, 205.85]]
df_expect = pd.DataFrame(data=data, index=idx).transpose()
df_expect.loc[:, "date"] = pd.to_datetime(df_expect.loc[:, "date"])
df_expect.loc[:, "value"] = np.float64(df_expect.loc[:, "value"])
assert_frame_equal(df, df_expect)
@ifbbg
def test_bdh_one_ticker_two_field_pivoted(con):
cols = ['PX_LAST', 'VOLUME']
df = con.bdh('SPY US Equity', cols, '20150629', '20150630')
midx = pd.MultiIndex(
levels=[["SPY US Equity"], cols],
labels=[[0, 0], [0, 1]], names=["ticker", "field"]
)
df_expect = pd.DataFrame(
index=pd.date_range("2015-06-29", "2015-06-30"),
columns=midx,
data=[[205.42, 202621332], [205.85, 182925106]]
)
df_expect = df_expect.astype(np.float64)
df_expect.index.names = ["date"]
assert_frame_equal(df, df_expect)
@ifbbg
def test_bdh_one_ticker_two_field_longdata(con):
cols = ['PX_LAST', 'VOLUME']
df = con.bdh('SPY US Equity', cols, '20150629', '20150630',
longdata=True)
idx = pd.Index(["date", "ticker", "field", "value"])
data = [["2015-06-29", "2015-06-29", "2015-06-30", "2015-06-30"],
["SPY US Equity", "SPY US Equity", "SPY US Equity", "SPY US Equity"], # NOQA
["PX_LAST", "VOLUME", "PX_LAST", "VOLUME"],
[205.42, 202621332, 205.85, 182925106]]
df_expect = pd.DataFrame(data=data, index=idx).transpose()
df_expect.loc[:, "date"] = pd.to_datetime(df_expect.loc[:, "date"])
df_expect.loc[:, "value"] = np.float64(df_expect.loc[:, "value"])
assert_frame_equal(df, df_expect)
@ifbbg
def test_bdh_value_errors(con):
bad_col = "not_a_fld"
with pytest.raises(ValueError):
con.bdh("SPY US Equity", bad_col, "20150630", "20150630")
bad_ticker = "not_a_ticker"
with pytest.raises(ValueError):
con.bdh(bad_ticker, "PX_LAST", "20150630", "20150630")
@ifbbg
def test_bdib(con):
# BBG has limited history for the IntradayBarRequest service so request
# recent data
prev_busday = pd.Timestamp(
pd.np.busday_offset(pd.Timestamp.today().date(), -1)
)
ts1 = prev_busday.strftime("%Y-%m-%d") + "T10:00:00"
ts2 = prev_busday.strftime("%Y-%m-%d") + "T10:20:01"
df = con.bdib('SPY US Equity', ts1, ts2, event_type="BID", interval=10)
ts2e = prev_busday.strftime("%Y-%m-%d") + "T10:20:00"
idx_exp = pd.date_range(ts1, ts2e, periods=3, name="time")
col_exp = pd.Index(["open", "high", "low", "close", "volume", "numEvents"])
assert_index_equal(df.index, idx_exp)
assert_index_equal(df.columns, col_exp)
# REF TESTS
@ifbbg
def test_ref_one_ticker_one_field(con):
df = con.ref('AUD Curncy', 'NAME')
df_expect = pd.DataFrame(
columns=["ticker", "field", "value"],
data=[["AUD Curncy", "NAME", "Australian Dollar Spot"]]
)
assert_frame_equal(df, df_expect)
@ifbbg
def test_ref_one_ticker_one_field_override(con):
df = con.ref('AUD Curncy', 'SETTLE_DT',
[("REFERENCE_DATE", "20161010")])
df_expect = pd.DataFrame(
columns=["ticker", "field", "value"],
data=[["AUD Curncy", "SETTLE_DT",
pd.datetime(2016, 10, 12).date()]]
)
assert_frame_equal(df, df_expect)
@ifbbg
def test_ref_invalid_field(con):
with pytest.raises(ValueError):
con.ref("EI862261 Corp", "not_a_field")
@ifbbg
def test_ref_not_applicable_field(con):
# test both cases described in
# https://github.com/matthewgilbert/pdblp/issues/6
df = con.ref("BCOM Index", ["INDX_GWEIGHT"])
df_expect = pd.DataFrame(
[["BCOM Index", "INDX_GWEIGHT", np.NaN]],
columns=['ticker', 'field', 'value']
)
assert_frame_equal(df, df_expect)
df = con.ref("BCOM Index", ["INDX_MWEIGHT_PX2"])
df_expect = pd.DataFrame(
[["BCOM Index", "INDX_MWEIGHT_PX2", np.NaN]],
columns=['ticker', 'field', 'value']
)
assert_frame_equal(df, df_expect)
@ifbbg
def test_ref_invalid_security(con):
with pytest.raises(ValueError):
con.ref("NOT_A_TICKER", "MATURITY")
@ifbbg
def test_ref_applicable_with_not_applicable_field(con):
df = con.ref("BVIS0587 Index", ["MATURITY", "NAME"])
df_exp = pd.DataFrame(
[["BVIS0587 Index", "MATURITY", np.NaN],
["BVIS0587 Index", "NAME", "CAD Canada Govt BVAL Curve"]],
columns=["ticker", "field", "value"])
assert_frame_equal(df, df_exp)
@ifbbg
def test_ref_mixed_data_error(con):
# calling ref which returns singleton and array data throws error
with pytest.raises(ValueError):
con.ref('CL1 Comdty', 'FUT_CHAIN')
# BULKREF TESTS
@ifbbg
def test_bulkref_one_ticker_one_field(con, data_path):
df = con.bulkref('BCOM Index', 'INDX_MWEIGHT',
ovrds=[("END_DATE_OVERRIDE", "20150530")])
df_expected = pd.read_csv(
os.path.join(data_path, "bulkref_20150530.csv")
)
pivot_and_assert(df, df_expected)
@ifbbg
def test_bulkref_two_ticker_one_field(con, data_path):
df = con.bulkref(['BCOM Index', 'OEX Index'], 'INDX_MWEIGHT',
ovrds=[("END_DATE_OVERRIDE", "20150530")])
df_expected = pd.read_csv(
os.path.join(data_path, "bulkref_two_fields_20150530.csv")
)
pivot_and_assert(df, df_expected)
@ifbbg
def test_bulkref_singleton_error(con):
# calling bulkref which returns singleton throws error
with pytest.raises(ValueError):
con.bulkref('CL1 Comdty', 'FUT_CUR_GEN_TICKER')
@ifbbg
def test_bulkref_null_scalar_sub_element(con):
# related to https://github.com/matthewgilbert/pdblp/issues/32#issuecomment-385555289 # NOQA
# smoke test to check parse correctly
ovrds = [("DVD_START_DT", "19860101"), ("DVD_END_DT", "19870101")]
con.bulkref("101 HK EQUITY", "DVD_HIST", ovrds=ovrds)
@ifbbg
def test_bulkref_empty_field(con):
df = con.bulkref(["88428LAA0 Corp"], ["INDEX_LIST"])
df_exp = pd.DataFrame(
[["88428LAA0 Corp", "INDEX_LIST", np.NaN, np.NaN, np.NaN]],
columns=["ticker", "field", "name", "value", "position"]
)
assert_frame_equal(df, df_exp)
@ifbbg
def test_bulkref_empty_with_nonempty_field_smoketest(con):
con.bulkref(['88428LAA0 Corp'], ['INDEX_LIST', 'USE_OF_PROCEEDS'])
@ifbbg
def test_bulkref_not_applicable_field(con):
df = con.bulkref("CL1 Comdty", ["FUT_DLVRBLE_BNDS_ISINS"])
df_exp = pd.DataFrame(
[["CL1 Comdty", "FUT_DLVRBLE_BNDS_ISINS", np.NaN, np.NaN, np.NaN]],
columns=["ticker", "field", "name", "value", "position"]
)
assert_frame_equal(df, df_exp)
@ifbbg
def test_bulkref_not_applicable_with_applicable_field_smoketest(con):
con.bulkref('CL1 Comdty', ['OPT_CHAIN', 'FUT_DLVRBLE_BNDS_ISINS'])
# REF_HIST TESTS
@ifbbg
def test_hist_ref_one_ticker_one_field_numeric(con):
dates = ["20160104", "20160105"]
df = con.ref_hist("AUD1M CMPN Curncy", "DAYS_TO_MTY", dates)
df_expect = pd.DataFrame(
{"date": dates,
"ticker": ["AUD1M CMPN Curncy", "AUD1M CMPN Curncy"],
"field": ["DAYS_TO_MTY", "DAYS_TO_MTY"],
"value": [33, 32]}
)
assert_frame_equal(df, df_expect)
@ifbbg
def test_hist_ref_one_ticker_one_field_non_numeric(con):
dates = ["20160104", "20160105"]
df = con.ref_hist("AUD1M CMPN Curncy", "SETTLE_DT", dates)
df_expect = pd.DataFrame(
{"date": dates,
"ticker": ["AUD1M CMPN Curncy", "AUD1M CMPN Curncy"],
"field": ["SETTLE_DT", "SETTLE_DT"],
"value": 2 * [pd.datetime(2016, 2, 8).date()]}
)
assert_frame_equal(df, df_expect)
# BULKREF_HIST TESTS
@ifbbg
def test_bulkref_hist_one_field(con, data_path):
dates = ["20150530", "20160530"]
df = con.bulkref_hist('BCOM Index', 'INDX_MWEIGHT', dates=dates,
date_field='END_DATE_OVERRIDE')
df_expected = pd.read_csv(
os.path.join(data_path, "bulkref_20150530_20160530.csv")
)
pivot_and_assert(df, df_expected, with_date=True)
@ifbbg
def test_bulkhist_ref_with_alternative_reference_field(con):
# smoke test to check that the response was sent off and correctly
# received
dates = ["20160625"]
con.bulkref_hist("BVIS0587 Index", "CURVE_TENOR_RATES", dates,
date_field="CURVE_DATE")
@ifbbg
def test_context_manager(port, host):
with pdblp.bopen(host=host, port=port) as bb:
df = bb.bdh('SPY US Equity', 'PX_LAST', '20150629', '20150630')
midx = pd.MultiIndex(levels=[["SPY US Equity"], ["PX_LAST"]],
labels=[[0], [0]], names=["ticker", "field"])
df_expect = pd.DataFrame(
index=
|
pd.date_range("2015-06-29", "2015-06-30")
|
pandas.date_range
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
import os
import pandas as pd
import optuna
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import SGDClassifier
from xgboost import XGBClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
import sklearn.metrics as metrics
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit
import plot
import preprocess
import utils
def tune_logreg(trial, X_train, X_test, y_train, y_test):
alpha = trial.suggest_loguniform('alpha', 1e-4, 1.)
classifier = SGDClassifier(loss='log', penalty='l2', max_iter=300, alpha=alpha, random_state=42)
classifier.fit(X_train, y_train)
y_pred = classifier.predict_proba(X_test)[:, 1]
score = metrics.average_precision_score(y_test, y_pred)
return score
def tune_rf(trial, X_train, X_test, y_train, y_test):
estimator = trial.suggest_int('n_estimators', 10, 50)
classifier = RandomForestClassifier(n_estimators=estimator)
classifier.fit(X_train, y_train)
y_pred = classifier.predict_proba(X_test)[:, 1]
score = metrics.average_precision_score(y_test, y_pred)
return score
def tune_xgboost(trial, X_train, X_test, y_train, y_test):
n_estimators = trial.suggest_int('n_estimators', 10, 150)
max_depth = trial.suggest_int('max_depth', 1, 5)
learning_rate = trial.suggest_loguniform('learning_rate', 1e-4, 1.)
classifier = XGBClassifier(n_estimators=n_estimators, max_depth=max_depth, learning_rate=learning_rate)
classifier.fit(X_train, y_train)
y_pred = classifier.predict_proba(X_test)[:, 1]
score = metrics.average_precision_score(y_test, y_pred)
return score
def tune_GP(trial, X_train, X_test, y_train, y_test):
length_scale = trial.suggest_loguniform('length_scale', 1e-4, 1.)
classifier = GaussianProcessClassifier(1.0 * RBF(length_scale))
classifier.fit(X_train, y_train)
y_pred = classifier.predict_proba(X_test)[:, 1]
score = metrics.average_precision_score(y_test, y_pred)
return score
def select_impute(method, X_split, y_split, X_eval, y_eval):
if IMPUTE == 'mean':
X_split, X_eval = preprocess.impute(X_split, X_eval, 'mean')
X_split, X_eval = preprocess.scale(X_split, X_eval, 'z-score')
elif IMPUTE == 'median':
X_split, X_eval = preprocess.impute(X_split, X_eval, 'median')
X_split, X_eval = preprocess.scale(X_split, X_eval, 'z-score')
elif IMPUTE == 'mice':
X_split, X_eval = preprocess.impute(X_split, X_eval, 'mice')
X_split, X_eval = preprocess.scale(X_split, X_eval, 'z-score')
elif IMPUTE == 'mice+smote':
X_split, X_eval = preprocess.impute(X_split, X_eval, 'mice+smote')
X_split, X_eval = preprocess.scale(X_split, X_eval, 'z-score')
X_split, y_split = preprocess.oversample(X_split, y_split, strategy='ADASYN', param=0.5)
else:
raise NameError('Imputation method not found.')
return X_split, y_split, X_eval, y_eval
def select_classifier(choice, X_split, y_split, X_eval, y_eval):
study = optuna.create_study(direction='maximize')
if MODEL == 'logreg':
study.optimize(lambda trial: tune_logreg(trial, X_split, X_eval, y_split, y_eval),
n_trials=N_TRIALS)
classifier = SGDClassifier(loss='log',
penalty='l2',
max_iter=300,
random_state=RANDOM_STATE,
alpha=study.best_params['alpha'])
elif MODEL == 'rf':
study.optimize(lambda trial: tune_rf(trial, X_split, X_eval, y_split, y_eval),
n_trials=N_TRIALS)
classifier = RandomForestClassifier(**study.best_params)
elif MODEL == 'xgboost':
study.optimize(lambda trial: tune_xgboost(trial, X_split, X_eval, y_split, y_eval),
n_trials=N_TRIALS)
classifier = XGBClassifier(**study.best_params)
elif MODEL == 'gp':
study.optimize(lambda trial: tune_GP(trial, X_split, X_eval, y_split, y_eval), n_trials=50)
classifier = GaussianProcessClassifier(1.0 * RBF(**study.best_params))
return classifier
if __name__ == '__main__':
SAVE_DIR = './saved_models/'
# DATA_VER = 4.1
DATA_VER = 4.0
# OUTCOME = 'death'
OUTCOME = 'vent'
N_SPLITS = 3
RANDOM_STATE = 10
TEST_SIZE = 0.3
N_TRIALS = 200
IMPUTE = 'mean'
# IMPUTE = 'median'
# IMPUTE = 'mice'
# MODEL = 'logreg'
# MODEL = 'rf'
MODEL = 'xgboost'
# MODEL = 'gps'
# setup
model_dir = os.path.join(SAVE_DIR,
'{}-{}'.format(DATA_VER, OUTCOME),
'{}_impute{}_nsplit{}_testsize{}_rs{}'.format(MODEL,
IMPUTE,
N_SPLITS,
TEST_SIZE,
RANDOM_STATE))
os.makedirs(model_dir, exist_ok=True)
# data loading
col2remove = ["WEIGHT/SCALE", "HEIGHT", '% O2 Sat', 'Insp. O2 Conc.', 'PCO2', 'SPO2', 'PO2', 'R FIO2',
'diag_A', 'diag_B', 'diag_C', 'diag_D', 'diag_E', 'diag_F', 'diag_G', 'diag_H',
'diag_I', 'diag_J', 'diag_K', 'diag_L', 'diag_M', 'diag_N', 'diag_O', 'diag_P',
'diag_Q', 'diag_R', 'diag_S', 'diag_T', 'diag_U', 'diag_V', 'diag_W', 'diag_X',
'diag_Y', 'diag_Z', 'URINE OUTPUT',
# 'gender', 'race', 'Procalcitonin', 'D-dimer',
]
X, y, df = utils.load_data(DATA_VER, OUTCOME, col2remove)
# train test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=TEST_SIZE, random_state=RANDOM_STATE)
sss = StratifiedShuffleSplit(n_splits=N_SPLITS, random_state=RANDOM_STATE, test_size=TEST_SIZE)
# cross validation
classifiers, scores = [], []
data_split, data_eval = [], []
labels_split, labels_eval = [], []
for idx_split, idx_eval in sss.split(X_train, y_train):
# data
X_split, y_split = X_train[idx_split], y_train[idx_split]
X_eval, y_eval = X_train[idx_eval], y_train[idx_eval]
X_split, y_split, X_eval, y_eval = select_impute(IMPUTE, X_split, y_split, X_eval, y_eval)
# train
classifier = select_classifier(MODEL, X_split, y_split, X_eval, y_eval)
classifier.fit(X_split, y_split)
pred, score = utils.get_results(classifier, X_eval, y_eval)
# save for post-process
data_split.append(X_split)
data_eval.append(X_eval)
labels_split.append(y_split)
labels_eval.append(y_eval)
classifiers.append(classifier)
scores.append(score)
# evaluation
scores = pd.concat(pd.DataFrame(score, index=[i]) for i, score in enumerate(scores))
scores.to_csv(os.path.join(model_dir, 'scores_cv.csv'))
scores_mean, scores_std = scores.mean(0), scores.std(0)
|
pd.DataFrame(scores_mean)
|
pandas.DataFrame
|
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import os
import operator
import unittest
import cStringIO as StringIO
import nose
from numpy import nan
import numpy as np
import numpy.ma as ma
from pandas import Index, Series, TimeSeries, DataFrame, isnull, notnull
from pandas.core.index import MultiIndex
import pandas.core.datetools as datetools
from pandas.util import py3compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
#-------------------------------------------------------------------------------
# Series test cases
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class CheckNameIntegration(object):
def test_scalarop_preserve_name(self):
result = self.ts * 2
self.assertEquals(result.name, self.ts.name)
def test_copy_name(self):
result = self.ts.copy()
self.assertEquals(result.name, self.ts.name)
# def test_copy_index_name_checking(self):
# # don't want to be able to modify the index stored elsewhere after
# # making a copy
# self.ts.index.name = None
# cp = self.ts.copy()
# cp.index.name = 'foo'
# self.assert_(self.ts.index.name is None)
def test_append_preserve_name(self):
result = self.ts[:5].append(self.ts[5:])
self.assertEquals(result.name, self.ts.name)
def test_binop_maybe_preserve_name(self):
# names match, preserve
result = self.ts * self.ts
self.assertEquals(result.name, self.ts.name)
result = self.ts * self.ts[:-2]
self.assertEquals(result.name, self.ts.name)
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'something else'
result = self.ts + cp
self.assert_(result.name is None)
def test_combine_first_name(self):
result = self.ts.combine_first(self.ts[:5])
self.assertEquals(result.name, self.ts.name)
def test_getitem_preserve_name(self):
result = self.ts[self.ts > 0]
self.assertEquals(result.name, self.ts.name)
result = self.ts[[0, 2, 4]]
self.assertEquals(result.name, self.ts.name)
result = self.ts[5:10]
self.assertEquals(result.name, self.ts.name)
def test_multilevel_name_print(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(range(0,len(index)), index=index, name='sth')
expected = ["first second",
"foo one 0",
" two 1",
" three 2",
"bar one 3",
" two 4",
"baz two 5",
" three 6",
"qux one 7",
" two 8",
" three 9",
"Name: sth"]
expected = "\n".join(expected)
self.assertEquals(repr(s), expected)
def test_multilevel_preserve_name(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(np.random.randn(len(index)), index=index, name='sth')
result = s['foo']
result2 = s.ix['foo']
self.assertEquals(result.name, s.name)
self.assertEquals(result2.name, s.name)
def test_name_printing(self):
# test small series
s = Series([0, 1, 2])
s.name = "test"
self.assert_("Name: test" in repr(s))
s.name = None
self.assert_(not "Name:" in repr(s))
# test big series (diff code path)
s = Series(range(0,1000))
s.name = "test"
self.assert_("Name: test" in repr(s))
s.name = None
self.assert_(not "Name:" in repr(s))
def test_pickle_preserve_name(self):
unpickled = self._pickle_roundtrip(self.ts)
self.assertEquals(unpickled.name, self.ts.name)
def _pickle_roundtrip(self, obj):
obj.save('__tmp__')
unpickled = Series.load('__tmp__')
os.remove('__tmp__')
return unpickled
def test_argsort_preserve_name(self):
result = self.ts.argsort()
self.assertEquals(result.name, self.ts.name)
def test_sort_index_name(self):
result = self.ts.sort_index(ascending=False)
self.assertEquals(result.name, self.ts.name)
def test_to_sparse_pass_name(self):
result = self.ts.to_sparse()
self.assertEquals(result.name, self.ts.name)
class SafeForSparse(object):
pass
class TestSeries(unittest.TestCase, CheckNameIntegration):
def setUp(self):
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = 'objects'
self.empty = Series([], index=[])
def test_constructor(self):
# Recognize TimeSeries
self.assert_(isinstance(self.ts, TimeSeries))
# Pass in Series
derived = Series(self.ts)
self.assert_(isinstance(derived, TimeSeries))
self.assert_(tm.equalContents(derived.index, self.ts.index))
# Ensure new index is not created
self.assertEquals(id(self.ts.index), id(derived.index))
# Pass in scalar
scalar = Series(0.5)
self.assert_(isinstance(scalar, float))
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
self.assert_(mixed.dtype == np.object_)
self.assert_(mixed[1] is np.NaN)
self.assert_(not isinstance(self.empty, TimeSeries))
self.assert_(not isinstance(Series({}), TimeSeries))
self.assertRaises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
def test_constructor_empty(self):
empty = Series()
empty2 = Series([])
assert_series_equal(empty, empty2)
empty = Series(index=range(10))
empty2 = Series(np.nan, index=range(10))
assert_series_equal(empty, empty2)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([nan, nan, nan])
assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0.0, nan, 2.0], index=index)
assert_series_equal(result, expected)
def test_constructor_default_index(self):
s = Series([0, 1, 2])
assert_almost_equal(s.index, np.arange(3))
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
self.assert_(isinstance(s, Series))
def test_constructor_cast(self):
self.assertRaises(ValueError, Series, ['a', 'b', 'c'], dtype=float)
def test_constructor_dict(self):
d = {'a' : 0., 'b' : 1., 'c' : 2.}
result = Series(d, index=['b', 'c', 'd', 'a'])
expected = Series([1, 2, nan, 0], index=['b', 'c', 'd', 'a'])
assert_series_equal(result, expected)
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
self.assertEqual(list(s), data)
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
self.assertEqual(tuple(s), data)
def test_fromDict(self):
data = {'a' : 0, 'b' : 1, 'c' : 2, 'd' : 3}
series = Series(data)
self.assert_(tm.is_sorted(series.index))
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : datetime.now()}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : '3'}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : '0', 'b' : '1'}
series = Series(data, dtype=float)
self.assert_(series.dtype == np.float64)
def test_setindex(self):
# wrong type
series = self.series.copy()
self.assertRaises(TypeError, setattr, series, 'index', None)
# wrong length
series = self.series.copy()
self.assertRaises(AssertionError, setattr, series, 'index',
np.arange(len(series) - 1))
# works
series = self.series.copy()
series.index = np.arange(len(series))
self.assert_(isinstance(series.index, Index))
def test_array_finalize(self):
pass
def test_fromValue(self):
nans = Series(np.NaN, index=self.ts.index)
self.assert_(nans.dtype == np.float_)
self.assertEqual(len(nans), len(self.ts))
strings = Series('foo', index=self.ts.index)
self.assert_(strings.dtype == np.object_)
self.assertEqual(len(strings), len(self.ts))
d = datetime.now()
dates = Series(d, index=self.ts.index)
self.assert_(dates.dtype == np.object_)
self.assertEqual(len(dates), len(self.ts))
def test_contains(self):
tm.assert_contains_all(self.ts.index, self.ts)
def test_pickle(self):
unp_series = self._pickle_roundtrip(self.series)
unp_ts = self._pickle_roundtrip(self.ts)
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def _pickle_roundtrip(self, obj):
obj.save('__tmp__')
unpickled = Series.load('__tmp__')
os.remove('__tmp__')
return unpickled
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assert_(self.series.get(-1) is None)
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - datetools.bday
self.assertRaises(KeyError, self.ts.__getitem__, d)
def test_iget(self):
s = Series(np.random.randn(10), index=range(0, 20, 2))
for i in range(len(s)):
result = s.iget(i)
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iget(slice(1, 3))
expected = s.ix[2:4]
assert_series_equal(result, expected)
def test_getitem_regression(self):
s = Series(range(5), index=range(5))
result = s[range(5)]
assert_series_equal(result, s)
def test_getitem_slice_bug(self):
s = Series(range(10), range(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1,2,3]]
slice2 = self.objSeries[[1,2,3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_(np.array_equal(result.index, s.index[mask]))
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
|
assert_series_equal(result, expected)
|
pandas.util.testing.assert_series_equal
|
import pandas as pd
import numpy as np
import os, argparse, fnmatch, re, json
TRACK_PREFIX = "week"
GAME_PREFIX = "games"
PLAYER_PREFIX = "players"
PLAY_PREFIX = "plays"
GAME_COL = "gameId"
PLAY_COL = "playId"
PLAYER_COL = "nflId"
PREFIX_DELIM = "_"
NAN_VALUE = "NAN"
NUMERIC_COL = "number"
NORMALIZE_PREFIX = "normalizer"
## appends new_list to old_list without
## creating duplicates while maintaining existing
## order of old_list
def appendTo(norm_map, value, new_list):
new_list = sorted(new_list)
if value not in norm_map:
norm_map[value] = { NAN_VALUE: 0 }
old_list = list(norm_map[value].keys())
only_new_items = [item for item in new_list if item not in old_list]
index = len(old_list)
for item in new_list:
if item in old_list:
continue
norm_map[value][item] = index
index = index + 1
def normalize_same_type(data, data_format, norm_map):
if data_format == NUMERIC_COL:
data = data.replace([None, "NAN"], "0")
data = data.astype(np.int8)
return data
data = data.fillna(NAN_VALUE)
columns = list(data.columns)
for col in columns:
unique_values = list(data[col].unique())
appendTo(norm_map, data_format, unique_values)
data = data.replace({col: norm_map[data_format]})
data = data.astype(np.int8)
return data
def split_data(data, col, split, norm_map):
new_data = (data[col]
.str.split(split["delim"])
.str.join(" ")
.str.strip()
.str.replace(r"\s+", " ")
.str.split(" ", expand=True))
col_count = len(list(new_data.columns))
format_len = len(split["format"])
for start in range(format_len):
prefix = (split["prefix"] + PREFIX_DELIM
+ split["format"][start] + PREFIX_DELIM)
selected_columns = list(range(start, col_count, format_len))
cols_map = {}
for index in range(len(selected_columns)):
cols_index = start + index * format_len
cols_map[cols_index] = prefix + str(index)
new_data.rename(columns = cols_map, inplace=True)
new_cols = list(cols_map.values())
new_data[new_cols] = normalize_same_type(new_data[new_cols],
split["format"][start], norm_map)
data = data.drop(columns=[col])
data[list(new_data.columns)] = new_data
return data
def normalize_data(data, meta, norm_map, split={}):
object_cols = list(data.select_dtypes(["object"]).columns)
data[object_cols] = data[object_cols].fillna(NAN_VALUE)
boolean_cols = list(data.select_dtypes(["bool"]).columns)
data[boolean_cols] = data[boolean_cols].astype(np.int8)
for col in meta:
unique_values = list(data[col].unique())
value = meta[col]
appendTo(norm_map, value, unique_values)
data = data.replace({col: norm_map[value]})
meta_cols = list(meta.keys())
data[meta_cols] = data[meta_cols].astype(np.int8)
for col in split:
data = split_data(data, col, split[col], norm_map)
data.reset_index(drop=True, inplace=True)
return data
def normalize_play(data, normalize_map):
SKIP_COLS = ["playDescription", "gameClock"]
data = data.drop(columns=SKIP_COLS)
normalize_split = {
"personnelO": {
"delim": ",",
"prefix": "personnel_o",
"format": ["number", "positions"],
"style": "fixed"
},
"personnelD": {
"delim": ",",
"prefix": "personnel_d",
"format": ["number", "positions"],
"style": "fixed"
},
"penaltyCodes": {
"delim": ";",
"prefix": "penalty_code",
"format": ["codes"]
},
"penaltyJerseyNumbers": {
"delim": ";",
"prefix": "penalty_jn",
"format": ["teams", "number"]
}
}
normalize_meta = {
"possessionTeam": "teams",
"playType": "playtypes",
"yardlineSide": "teams",
"offenseFormation": "formations",
"typeDropback": "dropbacktypes",
"passResult": "results"
}
return normalize_data(data, normalize_meta, normalize_map,
split=normalize_split)
def normalize_game(data, normalize_map):
SKIP_COLS = ["gameDate", "gameTimeEastern", "week"]
data = data.drop(columns=SKIP_COLS)
normalize_meta = {
"homeTeamAbbr": "teams",
"visitorTeamAbbr": "teams"
}
return normalize_data(data, normalize_meta, normalize_map)
def normalize_track(data, normalize_map):
SKIP_COLS = ["time", "displayName"]
data = data.drop(columns=SKIP_COLS)
normalize_meta = {
"event": "events",
"position": "positions",
"team": "teamtypes",
"playDirection": "directions",
"route": "routes"
}
return normalize_data(data, normalize_meta, normalize_map)
def save_dataframe_as_json(dataframe, output_path, filename):
json_data = dataframe.to_json(orient="records", indent=2)
json_path = os.path.join(output_path, filename)
with open(json_path, "w") as output:
output.write(json_data)
def get_dataframes(data_path, output_path):
normalizer = {}
game_path = os.path.join(data_path, "{}.csv".format(GAME_PREFIX))
game_data = pd.read_csv(game_path)
save_dataframe_as_json(game_data, output_path,
"{}.json".format(GAME_PREFIX))
game_data = normalize_game(game_data, normalizer)
player_path = os.path.join(data_path, "{}.csv".format(PLAYER_PREFIX))
player_data = pd.read_csv(player_path)
save_dataframe_as_json(player_data, output_path,
"{}.json".format(PLAYER_PREFIX))
play_path = os.path.join(data_path, "{}.csv".format(PLAY_PREFIX))
play_data = pd.read_csv(play_path)
play_data = normalize_play(play_data, normalizer)
track_files = fnmatch.filter(os.listdir(data_path), "{}*.csv".format(
TRACK_PREFIX))
index = 0
for tf in track_files:
track_path = os.path.join(data_path, tf)
track_data =
|
pd.read_csv(track_path)
|
pandas.read_csv
|
import pandas as pd
#import seaborn as sns
from matplotlib import pyplot as plt
import pdb
import glob
def get_all_dataframe(
data_source='election_reporting_dot_com',
state='mi',
year=2020,
):
county_folder_list = [
x.split('/')[-2] for x in glob.glob(
'./election_reporting_com/2020/mi/*/'
)
]
state = 'mi'
df_dict_all = {
'party': pd.DataFrame(),
'president': pd.DataFrame(),
'senator': pd.DataFrame()
}
for county in county_folder_list:
print(f'getting dataframe for county {county}')
df_dict = get_county_dataframe(
data_source='election_reporting_dot_com',
state='mi',
county=county,
year=2020
)
# pdb.set_trace()
for x in df_dict.keys():
if x in df_dict_all:
df_dict_all[x] = pd.concat(
[df_dict_all[x], df_dict[x]],
ignore_index=True
)
else:
print(f'key {x} not recognized. precess c to continue')
pdb.set_trace()
return df_dict_all
def get_county_dataframe(
data_source='election_reporting_dot_com',
state='mi',
county='kent',
year=2020
):
'''
get data pandas dataframe dictionary given state, conuty, year and data source
'''
if data_source == 'election_reporting_dot_com':
file_list = glob.glob(
f'./election_reporting_com/{year}/{state}/{county}/*cleaned.csv'
)
df_dict = {}
# df_dict_keys = [
# x.split('/')[-1].split('.')[0] for x in file_list
# ]
df_dict_keys = ['party', 'president', 'senator']
# for x, y in zip(df_dict_keys, file_list):
for y in file_list:
print(f'reading from {y}')
for x in df_dict_keys:
if x in y:
df_dict[x] = pd.read_csv(y)
else:
return None
return df_dict
def plot_president_vs_senator_all_counties(
fig_counter_base=100,
state='mi',
save_figure=True,
year=2020
):
fig_counter = fig_counter_base
df_dict = get_all_dataframe()
df_p = df_dict['president']
df_s = df_dict['senator']
# precincts = df_p['precinct'].unique()
df_merge = pd.merge(df_p, df_s, suffixes=('_p', '_s'), how='inner', on='precinct')
senator_dem_key = None
senator_rep_key = None
for x in df_p.columns:
if 'biden' in x:
biden_key = x
elif 'trump' in x:
trump_key = x
for x in df_s.columns:
if '_dem' in x:
senator_dem_key = x
elif '_rep' in x:
senator_rep_key = x
if senator_dem_key is None or senator_rep_key is None:
print(f'data for county {county} has column that does not have expected key')
pdb.set_trace()
total_president_series = df_merge[biden_key] + df_merge[trump_key]
df_president_percentage_dem = df_merge[biden_key]/total_president_series
df_president_percentage_rep = df_merge[trump_key]/total_president_series
df_diff_dem = df_merge[biden_key]/total_president_series - df_merge[senator_dem_key]
df_diff_rep = df_merge[trump_key]/total_president_series - df_merge[senator_rep_key]
total_senator_series = df_merge[senator_dem_key]+df_merge[senator_rep_key]
df_senator_percentage_dem = df_merge[senator_dem_key]/total_senator_series
df_senator_percentage_rep = df_merge[senator_rep_key]/total_senator_series
pdb.set_trace()
fig_counter += 1
plt.figure(fig_counter)
party = 'republican'
title = f'{party} {state} all avaiable counties'
plt.title(title)
# plt.scatter(df_senator_percentage_rep, df_diff_rep)
plt.scatter(df_senator_percentage_rep, df_president_percentage_rep)
plt.xlabel(f'senator votes fractions for {party}')
plt.ylabel(f'president votes fractions for {party}')
if save_figure:
filename = 'pdfs/electioncom/'+title.replace(' ', '_')+'.pdf'
plt.savefig(filename)
print(f'saving figure {filename}')
else:
plt.draw()
plt.pause(0.001)
fig_counter += 1
plt.figure(fig_counter)
party = 'democrats'
title = f'{party} {state} all avaiable counties'
plt.title(title)
# plt.scatter(df_senator_percentage_dem, df_diff_dem)
plt.scatter(df_senator_percentage_dem, df_president_percentage_dem)
plt.xlabel(f'senator votes fractions for {party}')
plt.ylabel(f'president votes fractions for {party}')
plt.xlim(0, 1)
plt.ylim(0, 1)
if save_figure:
filename = 'pdfs/electioncom/'+title.replace(' ', '_')+'.pdf'
print(f'saving figure {filename}')
plt.savefig(filename)
else:
plt.draw()
plt.pause(0.001)
input("Press [enter] to continue.")
def plot_president_vs_senator_per_county(
county,
fig_counter_base,
state='mi',
save_figure=True,
year=2020
):
fig_counter = fig_counter_base
# file_list = glob.glob(
# f'./election_reporting_com/{year}/{state}/{county}/*cleaned.csv'
# )
# df_dict = {}
# df_dict_keys = [
# x.split('/')[-1].split('.')[0] for x in file_list
# ]
# for x, y in zip(df_dict_keys, file_list):
# print(f'reading from {y}, dict key: {x}')
# df_dict[x] = pd.read_csv(y)
# if 'party' in y:
# df_sp = df_dict[x]
# elif 'president' in y:
# df_p = df_dict[x]
# elif 'senator' in y:
# df_s = df_dict[x]
# else:
# print(f'unknown file {y} with key being {x}')
df_dict = get_county_dataframe(county=county,year=year,state=state)
df_p = df_dict['president']
df_s = df_dict['senator']
# precincts = df_p['precinct'].unique()
# df_merge = pd.merge(df_p, df_s, suffixes=('_p', '_s'))
df_merge =
|
pd.merge(df_p, df_s, suffixes=('_p', '_s'), how='inner', on='precinct')
|
pandas.merge
|
import pandas as pd
import numpy as np
import os, sys
in_dir = '../../' + sys.argv[1] + '/'#'../../goal-inference-simulations/'
out_dir = '../../synthetic-' + sys.argv[1] + '/'#'../../synthetic-goal-inference-simulations/'
subset = '1en01'
try:
os.makedirs(out_dir)
except:
pass
games = {1:{},2:{},3:{},4:{},5:{},6:{}}
for game in os.listdir(in_dir):
if game[-4:] != '.csv':
continue
bg_file = game.split('_')[-2]
if bg_file.split('-')[1] != subset:
continue
data = pd.io.parsers.read_csv(in_dir + game)
players = set(data['pid'])
n = len(players)
if bg_file in games[n]:
games[n][bg_file][game] = []
else:
games[n][bg_file] = {game:[]}
for p in players:
games[n][bg_file][game] += [data[data['pid'] == p].copy()]
count = 0
for n in games:
for bg in games[n]:
for g in games[n][bg]:
count += 1
assert len(games[n][bg][g]) == n
if n == 1:
df = games[n][bg][g][0]
else:
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
class MultiPassSieveModel():
def __init__(self, *models):
self.models = models
def predict(self, df):
preds = pd.DataFrame([[False]*2]*len(df), columns=['a_coref', 'b_coref'])
for model in self.models:
preds_ = model.predict(df)
preds_ =
|
pd.DataFrame(preds_, columns=['a_coref', 'b_coref'])
|
pandas.DataFrame
|
'''
Preprocessing Tranformers Based on sci-kit's API
By <NAME>
Created on June 12, 2017
'''
import copy
import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from pymo.rotation_tools import Rotation
class MocapParameterizer(BaseEstimator, TransformerMixin):
def __init__(self, param_type = 'euler'):
'''
param_type = {'euler', 'quat', 'expmap', 'position'}
'''
self.param_type = param_type
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
if self.param_type == 'euler':
return X
elif self.param_type == 'expmap':
return self._to_expmap(X)
elif self.param_type == 'quat':
return X
elif self.param_type == 'position':
return self._to_pos(X)
elif self.param_type == 'axis_angle':
return self._to_axis_angle(X)
else:
raise UnsupportedParamError('Unsupported param: %s. Valid param types are: euler, quat, expmap, position' % self.param_type)
# return X
def inverse_transform(self, X, copy=None):
if self.param_type == 'euler':
return X
elif self.param_type == 'expmap':
return self._expmap_to_euler(X)
elif self.param_type == 'quat':
raise UnsupportedParamError('quat2euler is not supported')
elif self.param_type == 'position':
print('positions 2 eulers is not supported')
return X
else:
raise UnsupportedParamError('Unsupported param: %s. Valid param types are: euler, quat, expmap, position' % self.param_type)
def _to_pos(self, X):
'''Converts joints rotations in Euler angles to joint positions'''
Q = []
for track in X:
channels = []
titles = []
euler_df = track.values
# Create a new DataFrame to store the exponential map rep
pos_df = pd.DataFrame(index=euler_df.index)
# Copy the root rotations into the new DataFrame
# rxp = '%s_Xrotation'%track.root_name
# ryp = '%s_Yrotation'%track.root_name
# rzp = '%s_Zrotation'%track.root_name
# pos_df[rxp] = pd.Series(data=euler_df[rxp], index=pos_df.index)
# pos_df[ryp] = pd.Series(data=euler_df[ryp], index=pos_df.index)
# pos_df[rzp] = pd.Series(data=euler_df[rzp], index=pos_df.index)
# List the columns that contain rotation channels
rot_cols = [c for c in euler_df.columns if ('rotation' in c)]
# List the columns that contain position channels
pos_cols = [c for c in euler_df.columns if ('position' in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton)
tree_data = {}
for joint in track.traverse():
parent = track.skeleton[joint]['parent']
# Get the rotation columns that belong to this joint
rc = euler_df[[c for c in rot_cols if joint in c]]
# Get the position columns that belong to this joint
pc = euler_df[[c for c in pos_cols if joint in c]]
# Make sure the columns are organized in xyz order
if rc.shape[1] < 3:
euler_values = [[0,0,0] for f in rc.iterrows()]
else:
euler_values = [[f[1]['%s_Xrotation'%joint],
f[1]['%s_Yrotation'%joint],
f[1]['%s_Zrotation'%joint]] for f in rc.iterrows()]
################# in euler angle, the order of rotation axis is very important #####################
rotation_order = rc.columns[0][rc.columns[0].find('rotation') - 1] + rc.columns[1][rc.columns[1].find('rotation') - 1] + rc.columns[2][rc.columns[2].find('rotation') - 1] #rotation_order is string : 'XYZ' or'ZYX' or ...
####################################################################################################
if pc.shape[1] < 3:
pos_values = [[0,0,0] for f in pc.iterrows()]
else:
pos_values =[[f[1]['%s_Xposition'%joint],
f[1]['%s_Yposition'%joint],
f[1]['%s_Zposition'%joint]] for f in pc.iterrows()]
#euler_values = [[0,0,0] for f in rc.iterrows()] #for deugging
#pos_values = [[0,0,0] for f in pc.iterrows()] #for deugging
# Convert the eulers to rotation matrices
############################ input rotation order as Rotation class's argument #########################
rotmats = np.asarray([Rotation([f[0], f[1], f[2]], 'euler', rotation_order, from_deg=True).rotmat for f in euler_values])
########################################################################################################
tree_data[joint]=[
[], # to store the rotation matrix
[] # to store the calculated position
]
if track.root_name == joint:
tree_data[joint][0] = rotmats
# tree_data[joint][1] = np.add(pos_values, track.skeleton[joint]['offsets'])
tree_data[joint][1] = pos_values
else:
# for every frame i, multiply this joint's rotmat to the rotmat of its parent
tree_data[joint][0] = np.asarray([np.matmul(rotmats[i], tree_data[parent][0][i])
for i in range(len(tree_data[parent][0]))])
# add the position channel to the offset and store it in k, for every frame i
k = np.asarray([np.add(pos_values[i], track.skeleton[joint]['offsets'])
for i in range(len(tree_data[parent][0]))])
# multiply k to the rotmat of the parent for every frame i
q = np.asarray([np.matmul(k[i], tree_data[parent][0][i])
for i in range(len(tree_data[parent][0]))])
# add q to the position of the parent, for every frame i
tree_data[joint][1] = np.asarray([np.add(q[i], tree_data[parent][1][i])
for i in range(len(tree_data[parent][1]))])
# Create the corresponding columns in the new DataFrame
pos_df['%s_Xposition'%joint] = pd.Series(data=[e[0] for e in tree_data[joint][1]], index=pos_df.index)
pos_df['%s_Yposition'%joint] = pd.Series(data=[e[1] for e in tree_data[joint][1]], index=pos_df.index)
pos_df['%s_Zposition'%joint] = pd.Series(data=[e[2] for e in tree_data[joint][1]], index=pos_df.index)
new_track = track.clone()
new_track.values = pos_df
Q.append(new_track)
return Q
def _to_axis_angle(self, X):
'''Converts joints rotations in Euler angles to axis angle rotations'''
Q = []
for track in X:
# fix track names
# adapt joint name so that it's equal for either male or female
channels = []
titles = []
euler_df = track.values
# Create a new DataFrame to store the axis angle values
axis_anlge_df = pd.DataFrame(index=euler_df.index)
# Copy the root rotations into the new DataFrame
# rxp = '%s_Xrotation'%track.root_name
# ryp = '%s_Yrotation'%track.root_name
# rzp = '%s_Zrotation'%track.root_name
# pos_df[rxp] = pd.Series(data=euler_df[rxp], index=pos_df.index)
# pos_df[ryp] = pd.Series(data=euler_df[ryp], index=pos_df.index)
# pos_df[rzp] = pd.Series(data=euler_df[rzp], index=pos_df.index)
# List the columns that contain rotation channels
rot_cols = [c for c in euler_df.columns if ('rotation' in c)]
# List the columns that contain position channels
pos_cols = [c for c in euler_df.columns if ('position' in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton)
tree_data = {}
for joint in track.traverse():
parent = track.skeleton[joint]['parent']
# Get the rotation columns that belong to this joint
rc = euler_df[[c for c in rot_cols if joint in c]]
# Get the position columns that belong to this joint
pc = euler_df[[c for c in pos_cols if joint in c]]
# Make sure the columns are organized in xyz order
if rc.shape[1] < 3:
euler_values = [[0,0,0] for f in rc.iterrows()]
else:
euler_values = [[f[1]['%s_Xrotation'%joint],
f[1]['%s_Yrotation'%joint],
f[1]['%s_Zrotation'%joint]] for f in rc.iterrows()]
################# in euler angle, the order of rotation axis is very important #####################
rotation_order = rc.columns[0][rc.columns[0].find('rotation') - 1] + rc.columns[1][rc.columns[1].find('rotation') - 1] + rc.columns[2][rc.columns[2].find('rotation') - 1] #rotation_order is string : 'XYZ' or'ZYX' or ...
####################################################################################################
if pc.shape[1] < 3:
pos_values = [[0,0,0] for f in pc.iterrows()]
else:
pos_values =[[f[1]['%s_Xposition'%joint],
f[1]['%s_Yposition'%joint],
f[1]['%s_Zposition'%joint]] for f in pc.iterrows()]
#euler_values = [[0,0,0] for f in rc.iterrows()] #for deugging
#pos_values = [[0,0,0] for f in pc.iterrows()] #for deugging
# Convert the eulers to axis angles
############################ input rotation order as Rotation class's argument #########################
axis_angles = np.asarray([Rotation([f[0], f[1], f[2]], 'euler', rotation_order, from_deg=True).get_euler_axis() for f in euler_values])
########################################################################################################
# Create the corresponding columns in the new DataFrame
axis_anlge_df['%s_Xposition'%joint] = pd.Series(data=[e[0] for e in pos_values], index=axis_anlge_df.index)
axis_anlge_df['%s_Yposition'%joint] = pd.Series(data=[e[1] for e in pos_values], index=axis_anlge_df.index)
axis_anlge_df['%s_Zposition'%joint] = pd.Series(data=[e[2] for e in pos_values], index=axis_anlge_df.index)
axis_anlge_df['%s_Xrotation'%joint] = pd.Series(data=[e[0] for e in axis_angles], index=axis_anlge_df.index)
axis_anlge_df['%s_Yrotation'%joint] = pd.Series(data=[e[1] for e in axis_angles], index=axis_anlge_df.index)
axis_anlge_df['%s_Zrotation'%joint] = pd.Series(data=[e[2] for e in axis_angles], index=axis_anlge_df.index)
new_track = track.clone()
new_track.values = axis_anlge_df
Q.append(new_track)
return Q
def _to_expmap(self, X):
'''Converts Euler angles to Exponential Maps'''
Q = []
for track in X:
channels = []
titles = []
euler_df = track.values
# Create a new DataFrame to store the exponential map rep
exp_df = pd.DataFrame(index=euler_df.index)
# Copy the root positions into the new DataFrame
rxp = '%s_Xposition'%track.root_name
ryp = '%s_Yposition'%track.root_name
rzp = '%s_Zposition'%track.root_name
exp_df[rxp] = pd.Series(data=euler_df[rxp], index=exp_df.index)
exp_df[ryp] = pd.Series(data=euler_df[ryp], index=exp_df.index)
exp_df[rzp] = pd.Series(data=euler_df[rzp], index=exp_df.index)
# List the columns that contain rotation channels
rots = [c for c in euler_df.columns if ('rotation' in c and 'Nub' not in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton if 'Nub' not in joint)
for joint in joints:
r = euler_df[[c for c in rots if joint in c]] # Get the columns that belong to this joint
euler = [[f[1]['%s_Xrotation'%joint], f[1]['%s_Yrotation'%joint], f[1]['%s_Zrotation'%joint]] for f in r.iterrows()] # Make sure the columsn are organized in xyz order
exps = [Rotation(f, 'euler', from_deg=True).to_expmap() for f in euler] # Convert the eulers to exp maps
# Create the corresponding columns in the new DataFrame
exp_df['%s_alpha'%joint] = pd.Series(data=[e[0] for e in exps], index=exp_df.index)
exp_df['%s_beta'%joint] = pd.Series(data=[e[1] for e in exps], index=exp_df.index)
exp_df['%s_gamma'%joint] = pd.Series(data=[e[2] for e in exps], index=exp_df.index)
new_track = track.clone()
new_track.values = exp_df
Q.append(new_track)
return Q
def _expmap_to_euler(self, X):
Q = []
for track in X:
channels = []
titles = []
exp_df = track.values
# Create a new DataFrame to store the exponential map rep
euler_df = pd.DataFrame(index=exp_df.index)
# Copy the root positions into the new DataFrame
rxp = '%s_Xposition'%track.root_name
ryp = '%s_Yposition'%track.root_name
rzp = '%s_Zposition'%track.root_name
euler_df[rxp] = pd.Series(data=exp_df[rxp], index=euler_df.index)
euler_df[ryp] = pd.Series(data=exp_df[ryp], index=euler_df.index)
euler_df[rzp] = pd.Series(data=exp_df[rzp], index=euler_df.index)
# List the columns that contain rotation channels
exp_params = [c for c in exp_df.columns if ( any(p in c for p in ['alpha', 'beta','gamma']) and 'Nub' not in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton if 'Nub' not in joint)
for joint in joints:
r = exp_df[[c for c in exp_params if joint in c]] # Get the columns that belong to this joint
expmap = [[f[1]['%s_alpha'%joint], f[1]['%s_beta'%joint], f[1]['%s_gamma'%joint]] for f in r.iterrows()] # Make sure the columsn are organized in xyz order
euler_rots = [Rotation(f, 'expmap').to_euler(True)[0] for f in expmap] # Convert the eulers to exp maps
# Create the corresponding columns in the new DataFrame
euler_df['%s_Xrotation'%joint] = pd.Series(data=[e[0] for e in euler_rots], index=euler_df.index)
euler_df['%s_Yrotation'%joint] = pd.Series(data=[e[1] for e in euler_rots], index=euler_df.index)
euler_df['%s_Zrotation'%joint] = pd.Series(data=[e[2] for e in euler_rots], index=euler_df.index)
new_track = track.clone()
new_track.values = euler_df
Q.append(new_track)
return Q
class JointSelector(BaseEstimator, TransformerMixin):
'''
Allows for filtering the mocap data to include only the selected joints
'''
def __init__(self, joints, include_root=False):
self.joints = joints
self.include_root = include_root
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
selected_joints = []
selected_channels = []
if self.include_root:
selected_joints.append(X[0].root_name)
selected_joints.extend(self.joints)
for joint_name in selected_joints:
selected_channels.extend([o for o in X[0].values.columns if joint_name in o])
Q = []
for track in X:
t2 = track.clone()
for key in track.skeleton.keys():
if key not in selected_joints:
t2.skeleton.pop(key)
t2.values = track.values[selected_channels]
Q.append(t2)
return Q
class Numpyfier(BaseEstimator, TransformerMixin):
'''
Just converts the values in a MocapData object into a numpy array
Useful for the final stage of a pipeline before training
'''
def __init__(self):
pass
def fit(self, X, y=None):
self.org_mocap_ = X[0].clone()
self.org_mocap_.values.drop(self.org_mocap_.values.index, inplace=True)
return self
def transform(self, X, y=None):
Q = []
for track in X:
Q.append(track.values.values)
return np.array(Q)
def inverse_transform(self, X, copy=None):
Q = []
for track in X:
new_mocap = self.org_mocap_.clone()
time_index = pd.to_timedelta([f for f in range(track.shape[0])], unit='s')
new_df = pd.DataFrame(data=track, index=time_index, columns=self.org_mocap_.values.columns)
new_mocap.values = new_df
Q.append(new_mocap)
return Q
class RootTransformer(BaseEstimator, TransformerMixin):
def __init__(self, method):
"""
Accepted methods:
abdolute_translation_deltas
pos_rot_deltas
"""
self.method = method
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
Q = []
for track in X:
if self.method == 'abdolute_translation_deltas':
new_df = track.values.copy()
xpcol = '%s_Xposition'%track.root_name
ypcol = '%s_Yposition'%track.root_name
zpcol = '%s_Zposition'%track.root_name
dxpcol = '%s_dXposition'%track.root_name
dzpcol = '%s_dZposition'%track.root_name
dx = track.values[xpcol].diff()
dz = track.values[zpcol].diff()
dx[0] = 0
dz[0] = 0
new_df.drop([xpcol, zpcol], axis=1, inplace=True)
new_df[dxpcol] = dx
new_df[dzpcol] = dz
new_track = track.clone()
new_track.values = new_df
# end of abdolute_translation_deltas
elif self.method == 'pos_rot_deltas':
new_track = track.clone()
# Absolute columns
xp_col = '%s_Xposition'%track.root_name
yp_col = '%s_Yposition'%track.root_name
zp_col = '%s_Zposition'%track.root_name
xr_col = '%s_Xrotation'%track.root_name
yr_col = '%s_Yrotation'%track.root_name
zr_col = '%s_Zrotation'%track.root_name
# Delta columns
dxp_col = '%s_dXposition'%track.root_name
dzp_col = '%s_dZposition'%track.root_name
dxr_col = '%s_dXrotation'%track.root_name
dyr_col = '%s_dYrotation'%track.root_name
dzr_col = '%s_dZrotation'%track.root_name
new_df = track.values.copy()
root_pos_x_diff = pd.Series(data=track.values[xp_col].diff(), index=new_df.index)
root_pos_z_diff = pd.Series(data=track.values[zp_col].diff(), index=new_df.index)
root_rot_y_diff = pd.Series(data=track.values[yr_col].diff(), index=new_df.index)
root_rot_x_diff = pd.Series(data=track.values[xr_col].diff(), index=new_df.index)
root_rot_z_diff = pd.Series(data=track.values[zr_col].diff(), index=new_df.index)
root_pos_x_diff[0] = 0
root_pos_z_diff[0] = 0
root_rot_y_diff[0] = 0
root_rot_x_diff[0] = 0
root_rot_z_diff[0] = 0
new_df.drop([xr_col, yr_col, zr_col, xp_col, zp_col], axis=1, inplace=True)
new_df[dxp_col] = root_pos_x_diff
new_df[dzp_col] = root_pos_z_diff
new_df[dxr_col] = root_rot_x_diff
new_df[dyr_col] = root_rot_y_diff
new_df[dzr_col] = root_rot_z_diff
new_track.values = new_df
Q.append(new_track)
return Q
def inverse_transform(self, X, copy=None, start_pos=None):
Q = []
#TODO: simplify this implementation
startx = 0
startz = 0
if start_pos is not None:
startx, startz = start_pos
for track in X:
new_track = track.clone()
if self.method == 'abdolute_translation_deltas':
new_df = new_track.values
xpcol = '%s_Xposition'%track.root_name
ypcol = '%s_Yposition'%track.root_name
zpcol = '%s_Zposition'%track.root_name
dxpcol = '%s_dXposition'%track.root_name
dzpcol = '%s_dZposition'%track.root_name
dx = track.values[dxpcol].values
dz = track.values[dzpcol].values
recx = [startx]
recz = [startz]
for i in range(dx.shape[0]-1):
recx.append(recx[i]+dx[i+1])
recz.append(recz[i]+dz[i+1])
# recx = [recx[i]+dx[i+1] for i in range(dx.shape[0]-1)]
# recz = [recz[i]+dz[i+1] for i in range(dz.shape[0]-1)]
# recx = dx[:-1] + dx[1:]
# recz = dz[:-1] + dz[1:]
new_df[xpcol] = pd.Series(data=recx, index=new_df.index)
new_df[zpcol] = pd.Series(data=recz, index=new_df.index)
new_df.drop([dxpcol, dzpcol], axis=1, inplace=True)
new_track.values = new_df
# end of abdolute_translation_deltas
elif self.method == 'pos_rot_deltas':
new_track = track.clone()
# Absolute columns
xp_col = '%s_Xposition'%track.root_name
yp_col = '%s_Yposition'%track.root_name
zp_col = '%s_Zposition'%track.root_name
xr_col = '%s_Xrotation'%track.root_name
yr_col = '%s_Yrotation'%track.root_name
zr_col = '%s_Zrotation'%track.root_name
# Delta columns
dxp_col = '%s_dXposition'%track.root_name
dzp_col = '%s_dZposition'%track.root_name
dxr_col = '%s_dXrotation'%track.root_name
dyr_col = '%s_dYrotation'%track.root_name
dzr_col = '%s_dZrotation'%track.root_name
new_df = track.values.copy()
dx = track.values[dxp_col].values
dz = track.values[dzp_col].values
drx = track.values[dxr_col].values
dry = track.values[dyr_col].values
drz = track.values[dzr_col].values
rec_xp = [startx]
rec_zp = [startz]
rec_xr = [0]
rec_yr = [0]
rec_zr = [0]
for i in range(dx.shape[0]-1):
rec_xp.append(rec_xp[i]+dx[i+1])
rec_zp.append(rec_zp[i]+dz[i+1])
rec_xr.append(rec_xr[i]+drx[i+1])
rec_yr.append(rec_yr[i]+dry[i+1])
rec_zr.append(rec_zr[i]+drz[i+1])
new_df[xp_col] =
|
pd.Series(data=rec_xp, index=new_df.index)
|
pandas.Series
|
## some utils functions for scanpy
import os
import anndata
import numpy as np
import pandas as pd
import scanpy as sc
from scipy import io
from scipy.sparse import hstack
def adata_hstack(blocks, sample_ids=None, layer_keys=None):
if layer_keys is None:
layer_keys = blocks[0].layers.keys()
layers = {}
for _key in layer_keys:
layers[_key] = hstack([adata.layers[_key].T for adata in blocks]).T
if len(layer_keys) == 0:
layers = None
X_blocks = [adata.X.transpose() for adata in blocks]
obs_blocks = [adata.obs for adata in blocks]
new_X = hstack(X_blocks).transpose()
new_obs = pd.concat(obs_blocks, axis=0)
new_var = blocks[0].var
new_adata = anndata.AnnData(X=new_X, obs=new_obs, var=new_var,
layers=layers)
sample_ids_default = []
for i in range(len(blocks)):
sample_ids_default += ["S%d" %i] * blocks[i].shape[0]
if sample_ids is not None:
if len(sample_ids) != len(new_obs):
print("sample ids has different size to observations, change to default.")
sample_ids = sample_ids_default
else:
sample_ids = sample_ids_default
cell_ids = [
new_adata.obs.index.values[i] + ":" +
sample_ids[i] for i in range(len(sample_ids))]
new_adata.obs['cell_id'] = cell_ids
new_adata.obs['sample_id'] = sample_ids
return new_adata
def adata_preprocess(adata, min_cells=3, min_genes=500, max_genes=5000,
max_percent_mito=0.1):
## first filtering
sc.pp.filter_cells(adata, min_genes=min_genes)
print(adata.shape)
sc.pp.filter_genes(adata, min_cells=min_cells)
print(adata.shape)
## basic info
mito_genes = [name for name in adata.var_names if name.startswith('MT-')]
adata.obs['n_counts'] = np.sum(adata.X, axis=1).A1
adata.obs['n_genes'] = np.sum(adata.X>=1, axis=1).A1
adata.obs['n_mito'] = np.sum(adata[:, mito_genes].X, axis=1).A1
adata.obs['percent_mito'] = adata.obs['n_mito'] / adata.obs['n_counts']
## filter cells
adata = adata[adata.obs['n_genes'] < max_genes, :]
adata = adata[adata.obs['percent_mito'] < max_percent_mito, :]
## log transform
adata.raw = sc.pp.log1p(adata, copy=True)
sc.pp.normalize_per_cell(adata, counts_per_cell_after=1e4)
## filter genes
filter_result = sc.pp.filter_genes_dispersion(adata.X, min_mean=0.0125,
max_mean=3, min_disp=0.2)
adata = adata[:, filter_result.gene_subset]
## regress and scale
sc.pp.log1p(adata)
sc.pp.regress_out(adata, ['n_counts', 'percent_mito'])
sc.pp.scale(adata, max_value=10)
### PCA, t-SNE, and UMAP
sc.tl.pca(adata)
adata.obsm['X_pca'] *= -1 # multiply by -1 to match Seurat
sc.tl.tsne(adata, random_state=2, n_pcs=10)
sc.pp.neighbors(adata, n_neighbors=10)
sc.tl.umap(adata)
return adata
def load_10X(path, min_counts=None, min_cells=None, version3=False):
"""
Load 10X data from cellranger output matrix, into
scipy csr matrix, arrays for genes and cell barcodes
Filter cells by min_counts and filter genes by min_cells
"""
## load 10X matrix folder
if version3:
mat = io.mmread(path + "/matrix.mtx.gz").tocsr()
genes = np.genfromtxt(path + "/features.tsv.gz", dtype="str", delimiter="\t")
cells = np.genfromtxt(path + "/barcodes.tsv.gz", dtype="str", delimiter="\t")
else:
mat = io.mmread(path + "/matrix.mtx").tocsr()
genes = np.genfromtxt(path + "/genes.tsv", dtype="str", delimiter="\t")
cells = np.genfromtxt(path + "/barcodes.tsv", dtype="str", delimiter="\t")
## filter cells
if min_counts is not None and min_counts > 0:
n_counts = np.array(np.sum(mat, axis=0)).reshape(-1)
idx = n_counts >= min_counts
mat = mat[:, idx]
cells = cells[idx]
## filter genes
if min_cells is not None and min_cells > 0:
n_cells = np.array(np.sum(mat, axis=1)).reshape(-1)
idx = n_counts >= min_counts
mat = mat[idx, :]
genes = genes[idx, ]
return mat, genes, cells
def save_10X(path, mat, genes, barcodes, version3=False):
"""
Save 10X matrix, genes and cell barcodes into under the path.
"""
if not os.path.exists(path):
os.makedirs(path)
io.mmwrite(path + '/matrix.mtx', mat)
if version3:
fid = open(path + '/features.tsv', 'w')
else:
fid = open(path + '/genes.tsv', 'w')
for ii in range(genes.shape[0]):
fid.writelines("\t".join(genes[ii, :]) + "\n")
fid.close()
fid = open(path + '/barcodes.tsv', 'w')
for _cell in barcodes:
fid.writelines("%s\n" %(_cell))
fid.close()
if version3:
import subprocess
bashCommand = "gzip -f %s %s %s" %(path + '/matrix.mtx',
path + '/features.tsv',
path + '/barcodes.tsv')
pro = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
pro.communicate()[0]
def read_dropEst(path, cell_file = 'barcodes.tsv',
gene_file = 'genes.tsv',
layer_keys = ['exon', 'intron', 'spanning'],
layer_files = ['cell.counts.exon.mtx',
'cell.counts.intron.mtx',
'cell.counts.spanning.mtx'],
combine_unspliced = True):
"""
Load dropEst matrices produced by this script:
"""
## load 10X matrix folder
# genes = np.genfromtxt(path + "/" + gene_file, dtype="str", delimiter="\t")
# cells = np.genfromtxt(path + "/" + cell_file, dtype="str", delimiter="\t")
genes =
|
pd.read_csv(path + "/" + gene_file, sep="\t", index_col=0, header=None)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@license: MIT
"""
from pandas import DataFrame
# Load good XXth data
good_XXth_data =
|
DataFrame.from_csv('good_XXth_data.csv', index_col=None)
|
pandas.DataFrame.from_csv
|
# Apache License 2.0
#
# Copyright (c) 2017 <NAME> & Dohme Corp. a subsidiary of Merck & Co., Inc., Kenilworth, NJ, USA.
# Written by <NAME> <<EMAIL>>
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the LICENSE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import csv
import os
import pickle
import pandas as pd
from io import StringIO
from pMPO import pMPOBuilder
########################################################################################################################
########################################################################################################################
# The DataFrame with the reference data used in the original model building
REFERENCE_DATAFRAME = os.path.join(os.path.join(os.path.dirname(__file__), 'assets'), 'CNS_pMPO.df.pkl')
# The refrence pMPO values for each molecule in Hakan's paper
REFERENCE_CNS_PMPO_VALUES = {'Abacavir': {'CNS_pMPO': 0.44, 'CNS_pMPO_withSigmoidal': 0.157},
'Acetohexamide': {'CNS_pMPO': 0.51, 'CNS_pMPO_withSigmoidal': 0.163},
'Acetyldigitoxin': {'CNS_pMPO': 0.1, 'CNS_pMPO_withSigmoidal': 0.102},
'Acrivastine': {'CNS_pMPO': 0.97, 'CNS_pMPO_withSigmoidal': 0.949},
'Acyclovir': {'CNS_pMPO': 0.19, 'CNS_pMPO_withSigmoidal': 0.111},
'Adefovir': {'CNS_pMPO': 0.2, 'CNS_pMPO_withSigmoidal': 0.15},
'Albuterol': {'CNS_pMPO': 0.39, 'CNS_pMPO_withSigmoidal': 0.266},
'Alendronate': {'CNS_pMPO': 0.2, 'CNS_pMPO_withSigmoidal': 0.203},
'Alfuzosin': {'CNS_pMPO': 0.41, 'CNS_pMPO_withSigmoidal': 0.134},
'Aliskiren': {'CNS_pMPO': 0.18, 'CNS_pMPO_withSigmoidal': 0.086},
'Allopurinol': {'CNS_pMPO': 0.48, 'CNS_pMPO_withSigmoidal': 0.226},
'Alogliptin': {'CNS_pMPO': 0.66, 'CNS_pMPO_withSigmoidal': 0.483},
'Alosetron': {'CNS_pMPO': 0.96, 'CNS_pMPO_withSigmoidal': 0.754},
'Altretamine': {'CNS_pMPO': 0.77, 'CNS_pMPO_withSigmoidal': 0.694},
'Alvimopan': {'CNS_pMPO': 0.44, 'CNS_pMPO_withSigmoidal': 0.115},
'Ambenonium': {'CNS_pMPO': 0.48, 'CNS_pMPO_withSigmoidal': 0.327},
'Ambrisentan': {'CNS_pMPO': 0.69, 'CNS_pMPO_withSigmoidal': 0.438},
'Amiloride': {'CNS_pMPO': 0.23, 'CNS_pMPO_withSigmoidal': 0.224},
'Aminocaproic acid': {'CNS_pMPO': 0.54, 'CNS_pMPO_withSigmoidal': 0.379},
'Aminosalicylic acid': {'CNS_pMPO': 0.26, 'CNS_pMPO_withSigmoidal': 0.06},
'Amoxicillin': {'CNS_pMPO': 0.24, 'CNS_pMPO_withSigmoidal': 0.067},
'Amprenavir': {'CNS_pMPO': 0.1, 'CNS_pMPO_withSigmoidal': 0.059},
'Anagrelide': {'CNS_pMPO': 0.86, 'CNS_pMPO_withSigmoidal': 0.858},
'Anastrozole': {'CNS_pMPO': 0.63, 'CNS_pMPO_withSigmoidal': 0.376},
'Anisindione': {'CNS_pMPO': 0.7, 'CNS_pMPO_withSigmoidal': 0.701},
'Anisotropine': {'CNS_pMPO': 0.62, 'CNS_pMPO_withSigmoidal': 0.527},
'Apixaban': {'CNS_pMPO': 0.44, 'CNS_pMPO_withSigmoidal': 0.263},
'Aspirin': {'CNS_pMPO': 0.65, 'CNS_pMPO_withSigmoidal': 0.607},
'Astemizole': {'CNS_pMPO': 0.79, 'CNS_pMPO_withSigmoidal': 0.747},
'Atazanavir': {'CNS_pMPO': 0.06, 'CNS_pMPO_withSigmoidal': 0.027},
'Atenolol': {'CNS_pMPO': 0.44, 'CNS_pMPO_withSigmoidal': 0.254},
'Atorvastatin': {'CNS_pMPO': 0.15, 'CNS_pMPO_withSigmoidal': 0.039},
'Atovaquone': {'CNS_pMPO': 0.8, 'CNS_pMPO_withSigmoidal': 0.73},
'Avanafil': {'CNS_pMPO': 0.24, 'CNS_pMPO_withSigmoidal': 0.087},
'Azathioprine': {'CNS_pMPO': 0.54, 'CNS_pMPO_withSigmoidal': 0.415},
'Azilsartan': {'CNS_pMPO': 0.34, 'CNS_pMPO_withSigmoidal': 0.279},
'Azithromycin': {'CNS_pMPO': 0.24, 'CNS_pMPO_withSigmoidal': 0.234},
'Balsalazide': {'CNS_pMPO': 0.15, 'CNS_pMPO_withSigmoidal': 0.094},
'Bedaquiline': {'CNS_pMPO': 0.71, 'CNS_pMPO_withSigmoidal': 0.705},
'Benazepril': {'CNS_pMPO': 0.43, 'CNS_pMPO_withSigmoidal': 0.011},
'Bendroflumethiazide': {'CNS_pMPO': 0.24, 'CNS_pMPO_withSigmoidal': 0.127},
'Bentiromide': {'CNS_pMPO': 0.22, 'CNS_pMPO_withSigmoidal': 0.003},
'Betamethasone': {'CNS_pMPO': 0.35, 'CNS_pMPO_withSigmoidal': 0.136},
'Betaxolol': {'CNS_pMPO': 0.83, 'CNS_pMPO_withSigmoidal': 0.599},
'Bethanechol': {'CNS_pMPO': 0.65, 'CNS_pMPO_withSigmoidal': 0.647},
'Bicalutamide': {'CNS_pMPO': 0.29, 'CNS_pMPO_withSigmoidal': 0.044},
'Bisoprolol': {'CNS_pMPO': 0.79, 'CNS_pMPO_withSigmoidal': 0.564},
'Boceprevir': {'CNS_pMPO': 0.14, 'CNS_pMPO_withSigmoidal': 0.126},
'Bosentan': {'CNS_pMPO': 0.2, 'CNS_pMPO_withSigmoidal': 0.01},
'Bosutinib': {'CNS_pMPO': 0.6, 'CNS_pMPO_withSigmoidal': 0.432},
'Budesonide': {'CNS_pMPO': 0.41, 'CNS_pMPO_withSigmoidal': 0.111},
'Bufuralol': {'CNS_pMPO': 0.82, 'CNS_pMPO_withSigmoidal': 0.685},
'Busulfan': {'CNS_pMPO': 0.48, 'CNS_pMPO_withSigmoidal': 0.279},
'Cabozantinib': {'CNS_pMPO': 0.36, 'CNS_pMPO_withSigmoidal': 0.048},
'Canagliflozin': {'CNS_pMPO': 0.2, 'CNS_pMPO_withSigmoidal': 0.027},
'Capecitabine': {'CNS_pMPO': 0.23, 'CNS_pMPO_withSigmoidal': 0.087},
'Carbenicillin': {'CNS_pMPO': 0.15, 'CNS_pMPO_withSigmoidal': 0.029},
'Carbidopa': {'CNS_pMPO': 0.26, 'CNS_pMPO_withSigmoidal': 0.226},
'Carglumic acid': {'CNS_pMPO': 0.08, 'CNS_pMPO_withSigmoidal': 0.076},
'Carprofen': {'CNS_pMPO': 0.75, 'CNS_pMPO_withSigmoidal': 0.619},
'Carteolol': {'CNS_pMPO': 0.58, 'CNS_pMPO_withSigmoidal': 0.424},
'Cefaclor': {'CNS_pMPO': 0.27, 'CNS_pMPO_withSigmoidal': 0.059},
'Cefdinir': {'CNS_pMPO': 0.1, 'CNS_pMPO_withSigmoidal': 0.007},
'Cefditoren': {'CNS_pMPO': 0.06, 'CNS_pMPO_withSigmoidal': 0.0},
'Cefpodoxime': {'CNS_pMPO': 0.1, 'CNS_pMPO_withSigmoidal': 0.0},
'Cefuroxime': {'CNS_pMPO': 0.1, 'CNS_pMPO_withSigmoidal': 0.0},
'Celecoxib': {'CNS_pMPO': 0.65, 'CNS_pMPO_withSigmoidal': 0.404},
'Ceritinib': {'CNS_pMPO': 0.27, 'CNS_pMPO_withSigmoidal': 0.195},
'Cerivastatin': {'CNS_pMPO': 0.3, 'CNS_pMPO_withSigmoidal': 0.001},
'Cetirizine': {'CNS_pMPO': 0.87, 'CNS_pMPO_withSigmoidal': 0.725},
'Chenodiol': {'CNS_pMPO': 0.46, 'CNS_pMPO_withSigmoidal': 0.197},
'Chlorambucil': {'CNS_pMPO': 0.87, 'CNS_pMPO_withSigmoidal': 0.737},
'Chloroquine': {'CNS_pMPO': 0.85, 'CNS_pMPO_withSigmoidal': 0.854},
'Chlorotrianisene': {'CNS_pMPO': 0.5, 'CNS_pMPO_withSigmoidal': 0.41},
'Chlorphenesin carbamate': {'CNS_pMPO': 0.58, 'CNS_pMPO_withSigmoidal': 0.287},
'Chlorpropamide': {'CNS_pMPO': 0.62, 'CNS_pMPO_withSigmoidal': 0.259},
'Chlorthalidone': {'CNS_pMPO': 0.26, 'CNS_pMPO_withSigmoidal': 0.137},
'Cimetidine': {'CNS_pMPO': 0.47, 'CNS_pMPO_withSigmoidal': 0.143},
'Cinoxacin': {'CNS_pMPO': 0.58, 'CNS_pMPO_withSigmoidal': 0.411},
'Ciprofloxacin': {'CNS_pMPO': 0.68, 'CNS_pMPO_withSigmoidal': 0.406},
'Cisapride': {'CNS_pMPO': 0.56, 'CNS_pMPO_withSigmoidal': 0.244},
'Clavulanate': {'CNS_pMPO': 0.37, 'CNS_pMPO_withSigmoidal': 0.101},
'Clindamycin': {'CNS_pMPO': 0.35, 'CNS_pMPO_withSigmoidal': 0.114},
'Clofazimine': {'CNS_pMPO': 0.74, 'CNS_pMPO_withSigmoidal': 0.708},
'Clofibrate': {'CNS_pMPO': 0.65, 'CNS_pMPO_withSigmoidal': 0.654},
'Clomiphene': {'CNS_pMPO': 0.47, 'CNS_pMPO_withSigmoidal': 0.384},
'Clonidine': {'CNS_pMPO': 0.77, 'CNS_pMPO_withSigmoidal': 0.539},
'Cloxacillin': {'CNS_pMPO': 0.27, 'CNS_pMPO_withSigmoidal': 0.01},
'Cobicistat': {'CNS_pMPO': 0.18, 'CNS_pMPO_withSigmoidal': 0.109},
'Colchicine': {'CNS_pMPO': 0.65, 'CNS_pMPO_withSigmoidal': 0.3},
'Crizotinib': {'CNS_pMPO': 0.61, 'CNS_pMPO_withSigmoidal': 0.278},
'Cromolyn': {'CNS_pMPO': 0.07, 'CNS_pMPO_withSigmoidal': 0.0},
'Cyclacillin': {'CNS_pMPO': 0.32, 'CNS_pMPO_withSigmoidal': 0.181},
'Cyclophosphamide': {'CNS_pMPO': 0.84, 'CNS_pMPO_withSigmoidal': 0.741},
'Cysteamine': {'CNS_pMPO': 0.48, 'CNS_pMPO_withSigmoidal': 0.318},
'Dabrafenib': {'CNS_pMPO': 0.3, 'CNS_pMPO_withSigmoidal': 0.126},
'Dantrolene': {'CNS_pMPO': 0.56, 'CNS_pMPO_withSigmoidal': 0.523},
'Dapagliflozin': {'CNS_pMPO': 0.21, 'CNS_pMPO_withSigmoidal': 0.053},
'Darifenacin': {'CNS_pMPO': 0.88, 'CNS_pMPO_withSigmoidal': 0.805},
'Deferasirox': {'CNS_pMPO': 0.29, 'CNS_pMPO_withSigmoidal': 0.042},
'Delavirdine': {'CNS_pMPO': 0.22, 'CNS_pMPO_withSigmoidal': 0.103},
'Demeclocycline': {'CNS_pMPO': 0.09, 'CNS_pMPO_withSigmoidal': 0.048},
'Desogestrel': {'CNS_pMPO': 0.62, 'CNS_pMPO_withSigmoidal': 0.615},
'Dexlansoprazole': {'CNS_pMPO': 0.79, 'CNS_pMPO_withSigmoidal': 0.654},
'Diazoxide': {'CNS_pMPO': 0.82, 'CNS_pMPO_withSigmoidal': 0.741},
'Dichlorphenamide': {'CNS_pMPO': 0.43, 'CNS_pMPO_withSigmoidal': 0.18},
'Diclofenac': {'CNS_pMPO': 0.75, 'CNS_pMPO_withSigmoidal': 0.522},
'Dicumarol': {'CNS_pMPO': 0.5, 'CNS_pMPO_withSigmoidal': 0.152},
'Didanosine': {'CNS_pMPO': 0.44, 'CNS_pMPO_withSigmoidal': 0.136},
'Diethylcarbamazine': {'CNS_pMPO': 0.68, 'CNS_pMPO_withSigmoidal': 0.487},
'Diflunisal': {'CNS_pMPO': 0.72, 'CNS_pMPO_withSigmoidal': 0.576},
'<NAME>': {'CNS_pMPO': 0.62, 'CNS_pMPO_withSigmoidal': 0.512},
'Diphemanil': {'CNS_pMPO': 0.48, 'CNS_pMPO_withSigmoidal': 0.485},
'Dipyridamole': {'CNS_pMPO': 0.16, 'CNS_pMPO_withSigmoidal': 0.11},
'Dirithromycin': {'CNS_pMPO': 0.24, 'CNS_pMPO_withSigmoidal': 0.242},
'Disopyramide': {'CNS_pMPO': 0.91, 'CNS_pMPO_withSigmoidal': 0.787},
'Dofetilide': {'CNS_pMPO': 0.47, 'CNS_pMPO_withSigmoidal': 0.127},
'Dolutegravir': {'CNS_pMPO': 0.29, 'CNS_pMPO_withSigmoidal': 0.011},
'Domperidone': {'CNS_pMPO': 0.67, 'CNS_pMPO_withSigmoidal': 0.421},
'Doxazosin': {'CNS_pMPO': 0.54, 'CNS_pMPO_withSigmoidal': 0.263},
'Doxercalciferol': {'CNS_pMPO': 0.54, 'CNS_pMPO_withSigmoidal': 0.328},
'Drospirenone': {'CNS_pMPO': 0.69, 'CNS_pMPO_withSigmoidal': 0.63},
'Dydrogesterone': {'CNS_pMPO': 0.66, 'CNS_pMPO_withSigmoidal': 0.665},
'Dyphylline': {'CNS_pMPO': 0.4, 'CNS_pMPO_withSigmoidal': 0.147},
'Edoxaban': {'CNS_pMPO': 0.24, 'CNS_pMPO_withSigmoidal': 0.087},
'Eltrombopag': {'CNS_pMPO': 0.24, 'CNS_pMPO_withSigmoidal': 0.123},
'Empagliflozin': {'CNS_pMPO': 0.18, 'CNS_pMPO_withSigmoidal': 0.091},
'Emtricitabine': {'CNS_pMPO': 0.48, 'CNS_pMPO_withSigmoidal': 0.146},
'Enalapril': {'CNS_pMPO': 0.45, 'CNS_pMPO_withSigmoidal': 0.044},
'Entecavir': {'CNS_pMPO': 0.22, 'CNS_pMPO_withSigmoidal': 0.152},
'Eplerenone': {'CNS_pMPO': 0.52, 'CNS_pMPO_withSigmoidal': 0.21},
'Eprosartan': {'CNS_pMPO': 0.56, 'CNS_pMPO_withSigmoidal': 0.247},
'Estradiol': {'CNS_pMPO': 0.66, 'CNS_pMPO_withSigmoidal': 0.537},
'Estramustine': {'CNS_pMPO': 0.68, 'CNS_pMPO_withSigmoidal': 0.623},
'Ethacrynic acid': {'CNS_pMPO': 0.79, 'CNS_pMPO_withSigmoidal': 0.699},
'Ethambutol': {'CNS_pMPO': 0.49, 'CNS_pMPO_withSigmoidal': 0.45},
'Ethoxzolamide': {'CNS_pMPO': 0.71, 'CNS_pMPO_withSigmoidal': 0.552},
'Ethylestrenol': {'CNS_pMPO': 0.61, 'CNS_pMPO_withSigmoidal': 0.615},
'Ethynodiol diacetate': {'CNS_pMPO': 0.68, 'CNS_pMPO_withSigmoidal': 0.554},
'Etodolac': {'CNS_pMPO': 0.71, 'CNS_pMPO_withSigmoidal': 0.456},
'Etoposide': {'CNS_pMPO': 0.12, 'CNS_pMPO_withSigmoidal': 0.0},
'Etravirine': {'CNS_pMPO': 0.28, 'CNS_pMPO_withSigmoidal': 0.069},
'Ezetimibe': {'CNS_pMPO': 0.64, 'CNS_pMPO_withSigmoidal': 0.41},
'Fenoprofen': {'CNS_pMPO': 0.83, 'CNS_pMPO_withSigmoidal': 0.725},
'Fesoterodine': {'CNS_pMPO': 0.87, 'CNS_pMPO_withSigmoidal': 0.785},
'Fexofenadine': {'CNS_pMPO': 0.44, 'CNS_pMPO_withSigmoidal': 0.239},
'Flavoxate': {'CNS_pMPO': 0.74, 'CNS_pMPO_withSigmoidal': 0.643},
'Flecainide': {'CNS_pMPO': 0.74, 'CNS_pMPO_withSigmoidal': 0.412},
'Fludarabine': {'CNS_pMPO': 0.22, 'CNS_pMPO_withSigmoidal': 0.155},
'Fluoxymesterone': {'CNS_pMPO': 0.74, 'CNS_pMPO_withSigmoidal': 0.595},
'Fluvastatin': {'CNS_pMPO': 0.38, 'CNS_pMPO_withSigmoidal': 0.022},
'Fosfomycin': {'CNS_pMPO': 0.37, 'CNS_pMPO_withSigmoidal': 0.083},
'Fosinopril': {'CNS_pMPO': 0.4, 'CNS_pMPO_withSigmoidal': 0.381},
'Furazolidone': {'CNS_pMPO': 0.38, 'CNS_pMPO_withSigmoidal': 0.251},
'Furosemide': {'CNS_pMPO': 0.27, 'CNS_pMPO_withSigmoidal': 0.147},
'Gatifloxacin': {'CNS_pMPO': 0.59, 'CNS_pMPO_withSigmoidal': 0.181},
'Gefitinib': {'CNS_pMPO': 0.77, 'CNS_pMPO_withSigmoidal': 0.583},
'Gemifloxacin': {'CNS_pMPO': 0.38, 'CNS_pMPO_withSigmoidal': 0.124},
'Glimepiride': {'CNS_pMPO': 0.17, 'CNS_pMPO_withSigmoidal': 0.03},
'Glipizide': {'CNS_pMPO': 0.16, 'CNS_pMPO_withSigmoidal': 0.0},
'Glyburide': {'CNS_pMPO': 0.2, 'CNS_pMPO_withSigmoidal': 0.127},
'Glycopyrrolate': {'CNS_pMPO': 0.85, 'CNS_pMPO_withSigmoidal': 0.751},
'Guaifenesin': {'CNS_pMPO': 0.65, 'CNS_pMPO_withSigmoidal': 0.407},
'Guanadrel': {'CNS_pMPO': 0.46, 'CNS_pMPO_withSigmoidal': 0.147},
'Guanethidine': {'CNS_pMPO': 0.55, 'CNS_pMPO_withSigmoidal': 0.329},
'Hetacillin': {'CNS_pMPO': 0.45, 'CNS_pMPO_withSigmoidal': 0.024},
'Hexocyclium': {'CNS_pMPO': 0.78, 'CNS_pMPO_withSigmoidal': 0.711},
'Hydralazine': {'CNS_pMPO': 0.69, 'CNS_pMPO_withSigmoidal': 0.362},
'Hydrocortisone': {'CNS_pMPO': 0.37, 'CNS_pMPO_withSigmoidal': 0.2},
'Ibandronate': {'CNS_pMPO': 0.25, 'CNS_pMPO_withSigmoidal': 0.249},
'Ibrutinib': {'CNS_pMPO': 0.53, 'CNS_pMPO_withSigmoidal': 0.371},
'Idelalisib': {'CNS_pMPO': 0.43, 'CNS_pMPO_withSigmoidal': 0.117},
'Imatinib': {'CNS_pMPO': 0.55, 'CNS_pMPO_withSigmoidal': 0.258},
'Indapamide': {'CNS_pMPO': 0.51, 'CNS_pMPO_withSigmoidal': 0.202},
'Indinavir': {'CNS_pMPO': 0.22, 'CNS_pMPO_withSigmoidal': 0.109},
'Irbesartan': {'CNS_pMPO': 0.6, 'CNS_pMPO_withSigmoidal': 0.388},
'Isoniazid': {'CNS_pMPO': 0.52, 'CNS_pMPO_withSigmoidal': 0.265},
'Isopropamide': {'CNS_pMPO': 0.73, 'CNS_pMPO_withSigmoidal': 0.694},
'Itraconazole': {'CNS_pMPO': 0.36, 'CNS_pMPO_withSigmoidal': 0.197},
'Ivacaftor': {'CNS_pMPO': 0.34, 'CNS_pMPO_withSigmoidal': 0.071},
'Ketoconazole': {'CNS_pMPO': 0.61, 'CNS_pMPO_withSigmoidal': 0.436},
'Ketorolac': {'CNS_pMPO': 0.77, 'CNS_pMPO_withSigmoidal': 0.713},
'Labetalol': {'CNS_pMPO': 0.44, 'CNS_pMPO_withSigmoidal': 0.247},
'Lactulose': {'CNS_pMPO': 0.15, 'CNS_pMPO_withSigmoidal': 0.131},
'Lapatinib': {'CNS_pMPO': 0.32, 'CNS_pMPO_withSigmoidal': 0.04},
'Lenvatinib': {'CNS_pMPO': 0.24, 'CNS_pMPO_withSigmoidal': 0.091},
'Levofloxacin': {'CNS_pMPO': 0.78, 'CNS_pMPO_withSigmoidal': 0.526},
'Linagliptin': {'CNS_pMPO': 0.47, 'CNS_pMPO_withSigmoidal': 0.342},
'Linezolid': {'CNS_pMPO': 0.83, 'CNS_pMPO_withSigmoidal': 0.573},
'Lisinopril': {'CNS_pMPO': 0.18, 'CNS_pMPO_withSigmoidal': 0.066},
'Lomitapide': {'CNS_pMPO': 0.57, 'CNS_pMPO_withSigmoidal': 0.43},
'Loperamide': {'CNS_pMPO': 0.81, 'CNS_pMPO_withSigmoidal': 0.783},
'Lopinavir': {'CNS_pMPO': 0.03, 'CNS_pMPO_withSigmoidal': 0.009},
'Loracarbef': {'CNS_pMPO': 0.28, 'CNS_pMPO_withSigmoidal': 0.115},
'Lubiprostone': {'CNS_pMPO': 0.51, 'CNS_pMPO_withSigmoidal': 0.036},
'Macitentan': {'CNS_pMPO': 0.29, 'CNS_pMPO_withSigmoidal': 0.097},
'Medroxyprogesterone acetate': {'CNS_pMPO': 0.63, 'CNS_pMPO_withSigmoidal': 0.522},
'Mefenamic acid': {'CNS_pMPO': 0.72, 'CNS_pMPO_withSigmoidal': 0.594},
'Meloxicam': {'CNS_pMPO': 0.44, 'CNS_pMPO_withSigmoidal': 0.121},
'Melphalan': {'CNS_pMPO': 0.73, 'CNS_pMPO_withSigmoidal': 0.505},
'Mepenzolate': {'CNS_pMPO': 0.86, 'CNS_pMPO_withSigmoidal': 0.754},
'Mercaptopurine': {'CNS_pMPO': 0.68, 'CNS_pMPO_withSigmoidal': 0.494},
'Mesalamine': {'CNS_pMPO': 0.3, 'CNS_pMPO_withSigmoidal': 0.06},
'Mesna': {'CNS_pMPO': 0.63, 'CNS_pMPO_withSigmoidal': 0.62},
'Metaproterenol': {'CNS_pMPO': 0.46, 'CNS_pMPO_withSigmoidal': 0.332},
'Metaxalone': {'CNS_pMPO': 0.82, 'CNS_pMPO_withSigmoidal': 0.825},
'Methantheline': {'CNS_pMPO': 0.61, 'CNS_pMPO_withSigmoidal': 0.567},
'Methazolamide': {'CNS_pMPO': 0.51, 'CNS_pMPO_withSigmoidal': 0.385},
'Methenamine': {'CNS_pMPO': 0.49, 'CNS_pMPO_withSigmoidal': 0.438},
'Methimazole': {'CNS_pMPO': 0.51, 'CNS_pMPO_withSigmoidal': 0.438},
'Methscopolamine': {'CNS_pMPO': 0.75, 'CNS_pMPO_withSigmoidal': 0.731},
'Methyltestosterone': {'CNS_pMPO': 0.79, 'CNS_pMPO_withSigmoidal': 0.789},
'Metolazone': {'CNS_pMPO': 0.51, 'CNS_pMPO_withSigmoidal': 0.202},
'Metronidazole': {'CNS_pMPO': 0.58, 'CNS_pMPO_withSigmoidal': 0.336},
'Metyrosine': {'CNS_pMPO': 0.4, 'CNS_pMPO_withSigmoidal': 0.202},
'Midodrine': {'CNS_pMPO': 0.43, 'CNS_pMPO_withSigmoidal': 0.253},
'Miglitol': {'CNS_pMPO': 0.29, 'CNS_pMPO_withSigmoidal': 0.201},
'Miltefosine': {'CNS_pMPO': 0.56, 'CNS_pMPO_withSigmoidal': 0.419},
'Minocycline': {'CNS_pMPO': 0.09, 'CNS_pMPO_withSigmoidal': 0.047},
'Minoxidil': {'CNS_pMPO': 0.25, 'CNS_pMPO_withSigmoidal': 0.099},
'Mirabegron': {'CNS_pMPO': 0.35, 'CNS_pMPO_withSigmoidal': 0.114},
'Mitotane': {'CNS_pMPO': 0.39, 'CNS_pMPO_withSigmoidal': 0.385},
'Montelukast': {'CNS_pMPO': 0.44, 'CNS_pMPO_withSigmoidal': 0.22},
'Moxifloxacin': {'CNS_pMPO': 0.52, 'CNS_pMPO_withSigmoidal': 0.097},
'Mycophenolic acid': {'CNS_pMPO': 0.5, 'CNS_pMPO_withSigmoidal': 0.165},
'Nabumetone': {'CNS_pMPO': 0.6, 'CNS_pMPO_withSigmoidal': 0.598},
'Nadolol': {'CNS_pMPO': 0.48, 'CNS_pMPO_withSigmoidal': 0.269},
'Nalidixic acid': {'CNS_pMPO': 0.79, 'CNS_pMPO_withSigmoidal': 0.562},
'Naproxen': {'CNS_pMPO': 0.81, 'CNS_pMPO_withSigmoidal': 0.712},
'Nateglinide': {'CNS_pMPO': 0.69, 'CNS_pMPO_withSigmoidal': 0.411},
'Nelfinavir': {'CNS_pMPO': 0.19, 'CNS_pMPO_withSigmoidal': 0.105},
'Neostigmine': {'CNS_pMPO': 0.51, 'CNS_pMPO_withSigmoidal': 0.505},
'Nevirapine': {'CNS_pMPO': 0.88, 'CNS_pMPO_withSigmoidal': 0.853},
'Niacin': {'CNS_pMPO': 0.67, 'CNS_pMPO_withSigmoidal': 0.624},
'Niclosamide': {'CNS_pMPO': 0.42, 'CNS_pMPO_withSigmoidal': 0.188},
'Nilotinib': {'CNS_pMPO': 0.33, 'CNS_pMPO_withSigmoidal': 0.039},
'Nilutamide': {'CNS_pMPO': 0.64, 'CNS_pMPO_withSigmoidal': 0.545},
'Nintedanib': {'CNS_pMPO': 0.49, 'CNS_pMPO_withSigmoidal': 0.202},
'Nitisinone': {'CNS_pMPO': 0.42, 'CNS_pMPO_withSigmoidal': 0.289},
'Nizatidine': {'CNS_pMPO': 0.67, 'CNS_pMPO_withSigmoidal': 0.22},
'Norgestimate': {'CNS_pMPO': 0.74, 'CNS_pMPO_withSigmoidal': 0.663},
'Novobiocin': {'CNS_pMPO': 0.09, 'CNS_pMPO_withSigmoidal': 0.0},
'Olaparib': {'CNS_pMPO': 0.59, 'CNS_pMPO_withSigmoidal': 0.287},
'Olsalazine': {'CNS_pMPO': 0.23, 'CNS_pMPO_withSigmoidal': 0.158},
'Orlistat': {'CNS_pMPO': 0.47, 'CNS_pMPO_withSigmoidal': 0.289},
'Oseltamivir': {'CNS_pMPO': 0.64, 'CNS_pMPO_withSigmoidal': 0.28},
'Oxamniquine': {'CNS_pMPO': 0.43, 'CNS_pMPO_withSigmoidal': 0.24},
'Oxandrolone': {'CNS_pMPO': 0.85, 'CNS_pMPO_withSigmoidal': 0.847},
'Oxaprozin': {'CNS_pMPO': 0.84, 'CNS_pMPO_withSigmoidal': 0.772},
'Oxyphenbutazone': {'CNS_pMPO': 0.83, 'CNS_pMPO_withSigmoidal': 0.716},
'Oxyphenonium': {'CNS_pMPO': 0.78, 'CNS_pMPO_withSigmoidal': 0.715},
'Palbociclib': {'CNS_pMPO': 0.44, 'CNS_pMPO_withSigmoidal': 0.122},
'Paliperidone': {'CNS_pMPO': 0.74, 'CNS_pMPO_withSigmoidal': 0.404},
'Pantoprazole': {'CNS_pMPO': 0.66, 'CNS_pMPO_withSigmoidal': 0.417},
'Pargyline': {'CNS_pMPO': 0.48, 'CNS_pMPO_withSigmoidal': 0.392},
'Paricalcitol': {'CNS_pMPO': 0.43, 'CNS_pMPO_withSigmoidal': 0.318},
'Pemoline': {'CNS_pMPO': 0.72, 'CNS_pMPO_withSigmoidal': 0.593},
'Penicillamine': {'CNS_pMPO': 0.42, 'CNS_pMPO_withSigmoidal': 0.353},
'Phenazone': {'CNS_pMPO': 0.52, 'CNS_pMPO_withSigmoidal': 0.427},
'Phenazopyridine': {'CNS_pMPO': 0.49, 'CNS_pMPO_withSigmoidal': 0.205},
'Phenprocoumon': {'CNS_pMPO': 0.87, 'CNS_pMPO_withSigmoidal': 0.876},
'Phensuximide': {'CNS_pMPO': 0.62, 'CNS_pMPO_withSigmoidal': 0.52},
'Phenylephrine': {'CNS_pMPO': 0.53, 'CNS_pMPO_withSigmoidal': 0.49},
'Pilocarpine': {'CNS_pMPO': 0.73, 'CNS_pMPO_withSigmoidal': 0.563},
'Pinacidil': {'CNS_pMPO': 0.7, 'CNS_pMPO_withSigmoidal': 0.385},
'Pipobroman': {'CNS_pMPO': 0.71, 'CNS_pMPO_withSigmoidal': 0.58},
'Pirenzepine': {'CNS_pMPO': 0.85, 'CNS_pMPO_withSigmoidal': 0.604},
'Pitavastatin': {'CNS_pMPO': 0.35, 'CNS_pMPO_withSigmoidal': 0.003},
'Ponatinib': {'CNS_pMPO': 0.77, 'CNS_pMPO_withSigmoidal': 0.722},
'Pralidoxime': {'CNS_pMPO': 0.69, 'CNS_pMPO_withSigmoidal': 0.588},
'Pravastatin': {'CNS_pMPO': 0.11, 'CNS_pMPO_withSigmoidal': 0.0},
'Primaquine': {'CNS_pMPO': 0.74, 'CNS_pMPO_withSigmoidal': 0.524},
'Probenecid': {'CNS_pMPO': 0.74, 'CNS_pMPO_withSigmoidal': 0.525},
'Probucol': {'CNS_pMPO': 0.47, 'CNS_pMPO_withSigmoidal': 0.326},
'Proguanil': {'CNS_pMPO': 0.46, 'CNS_pMPO_withSigmoidal': 0.308},
'Propantheline': {'CNS_pMPO': 0.58, 'CNS_pMPO_withSigmoidal': 0.489},
'Propylthiouracil': {'CNS_pMPO': 0.74, 'CNS_pMPO_withSigmoidal': 0.535},
'Protokylol': {'CNS_pMPO': 0.46, 'CNS_pMPO_withSigmoidal': 0.255},
'Pyridostigmine': {'CNS_pMPO': 0.49, 'CNS_pMPO_withSigmoidal': 0.487},
'Quinestrol': {'CNS_pMPO': 0.65, 'CNS_pMPO_withSigmoidal': 0.593},
'Quinethazone': {'CNS_pMPO': 0.34, 'CNS_pMPO_withSigmoidal': 0.157},
'Quinidine': {'CNS_pMPO': 0.97, 'CNS_pMPO_withSigmoidal': 0.972},
'Rabeprazole': {'CNS_pMPO': 0.77, 'CNS_pMPO_withSigmoidal': 0.548},
'Raloxifene': {'CNS_pMPO': 0.57, 'CNS_pMPO_withSigmoidal': 0.334},
'Raltegravir': {'CNS_pMPO': 0.08, 'CNS_pMPO_withSigmoidal': 0.0},
'Ranolazine': {'CNS_pMPO': 0.65, 'CNS_pMPO_withSigmoidal': 0.328},
'Regorafenib': {'CNS_pMPO': 0.19, 'CNS_pMPO_withSigmoidal': 0.028},
'Repaglinide': {'CNS_pMPO': 0.59, 'CNS_pMPO_withSigmoidal': 0.187},
'Reserpine': {'CNS_pMPO': 0.47, 'CNS_pMPO_withSigmoidal': 0.352},
'Ribavirin': {'CNS_pMPO': 0.14, 'CNS_pMPO_withSigmoidal': 0.129},
'Rifaximin': {'CNS_pMPO': 0.12, 'CNS_pMPO_withSigmoidal': 0.0},
'Riociguat': {'CNS_pMPO': 0.29, 'CNS_pMPO_withSigmoidal': 0.01},
'Risedronate': {'CNS_pMPO': 0.2, 'CNS_pMPO_withSigmoidal': 0.154},
'Ritodrine': {'CNS_pMPO': 0.57, 'CNS_pMPO_withSigmoidal': 0.397},
'Ritonavir': {'CNS_pMPO': 0.03, 'CNS_pMPO_withSigmoidal': 0.025},
'Rivaroxaban': {'CNS_pMPO': 0.59, 'CNS_pMPO_withSigmoidal': 0.395},
'Roflumilast': {'CNS_pMPO': 0.71, 'CNS_pMPO_withSigmoidal': 0.608},
'Rosiglitazone': {'CNS_pMPO': 0.87, 'CNS_pMPO_withSigmoidal': 0.643},
'Rosuvastatin': {'CNS_pMPO': 0.06, 'CNS_pMPO_withSigmoidal': 0.0},
'Ruxolitinib': {'CNS_pMPO': 0.73, 'CNS_pMPO_withSigmoidal': 0.566},
'Sapropterin': {'CNS_pMPO': 0.23, 'CNS_pMPO_withSigmoidal': 0.228},
'Saquinavir': {'CNS_pMPO': 0.16, 'CNS_pMPO_withSigmoidal': 0.161},
'Sibutramine': {'CNS_pMPO': 0.56, 'CNS_pMPO_withSigmoidal': 0.563},
'Sildenafil': {'CNS_pMPO': 0.54, 'CNS_pMPO_withSigmoidal': 0.387},
'Silodosin': {'CNS_pMPO': 0.35, 'CNS_pMPO_withSigmoidal': 0.154},
'Simeprevir': {'CNS_pMPO': 0.25, 'CNS_pMPO_withSigmoidal': 0.113},
'Sitagliptin': {'CNS_pMPO': 0.8, 'CNS_pMPO_withSigmoidal': 0.443},
'Sodium phenylbutyrate': {'CNS_pMPO': 0.58, 'CNS_pMPO_withSigmoidal': 0.506},
'Sofosbuvir': {'CNS_pMPO': 0.16, 'CNS_pMPO_withSigmoidal': 0.126},
'Sotalol': {'CNS_pMPO': 0.4, 'CNS_pMPO_withSigmoidal': 0.204},
'Sparfloxacin': {'CNS_pMPO': 0.37, 'CNS_pMPO_withSigmoidal': 0.122},
'Spirapril': {'CNS_pMPO': 0.39, 'CNS_pMPO_withSigmoidal': 0.01},
'Spironolactone': {'CNS_pMPO': 0.63, 'CNS_pMPO_withSigmoidal': 0.547},
'Stanozolol': {'CNS_pMPO': 0.66, 'CNS_pMPO_withSigmoidal': 0.514},
'Stavudine': {'CNS_pMPO': 0.5, 'CNS_pMPO_withSigmoidal': 0.169},
'Succimer': {'CNS_pMPO': 0.31, 'CNS_pMPO_withSigmoidal': 0.177},
'Sulfacytine': {'CNS_pMPO': 0.36, 'CNS_pMPO_withSigmoidal': 0.167},
'Sulfadoxine': {'CNS_pMPO': 0.35, 'CNS_pMPO_withSigmoidal': 0.167},
'Sulfameter': {'CNS_pMPO': 0.4, 'CNS_pMPO_withSigmoidal': 0.163},
'Sulfamethizole': {'CNS_pMPO': 0.41, 'CNS_pMPO_withSigmoidal': 0.158},
'Sulfamethoxazole': {'CNS_pMPO': 0.43, 'CNS_pMPO_withSigmoidal': 0.147},
'Sulfaphenazole': {'CNS_pMPO': 0.54, 'CNS_pMPO_withSigmoidal': 0.194},
'Sulfasalazine': {'CNS_pMPO': 0.2, 'CNS_pMPO_withSigmoidal': 0.005},
'Sulfinpyrazone': {'CNS_pMPO': 0.63, 'CNS_pMPO_withSigmoidal': 0.463},
'Sulfoxone': {'CNS_pMPO': 0.1, 'CNS_pMPO_withSigmoidal': 0.003},
'Sumatriptan': {'CNS_pMPO': 0.72, 'CNS_pMPO_withSigmoidal': 0.523},
'Sunitinib': {'CNS_pMPO': 0.54, 'CNS_pMPO_withSigmoidal': 0.276},
'Tamsulosin': {'CNS_pMPO': 0.52, 'CNS_pMPO_withSigmoidal': 0.125},
'Tedizolid': {'CNS_pMPO': 0.56, 'CNS_pMPO_withSigmoidal': 0.439},
'Tegaserod': {'CNS_pMPO': 0.53, 'CNS_pMPO_withSigmoidal': 0.38},
'Telaprevir': {'CNS_pMPO': 0.07, 'CNS_pMPO_withSigmoidal': 0.069},
'Telithromycin': {'CNS_pMPO': 0.46, 'CNS_pMPO_withSigmoidal': 0.454},
'Tenofovir': {'CNS_pMPO': 0.21, 'CNS_pMPO_withSigmoidal': 0.156},
'Testolactone': {'CNS_pMPO': 0.74, 'CNS_pMPO_withSigmoidal': 0.743},
'Thiabendazole': {'CNS_pMPO': 0.8, 'CNS_pMPO_withSigmoidal': 0.79},
'Thioguanine': {'CNS_pMPO': 0.4, 'CNS_pMPO_withSigmoidal': 0.147},
'Ticagrelor': {'CNS_pMPO': 0.15, 'CNS_pMPO_withSigmoidal': 0.127},
'Ticlopidine': {'CNS_pMPO': 0.55, 'CNS_pMPO_withSigmoidal': 0.452},
'Tiludronate': {'CNS_pMPO': 0.16, 'CNS_pMPO_withSigmoidal': 0.155},
'Tinidazole': {'CNS_pMPO': 0.43, 'CNS_pMPO_withSigmoidal': 0.272},
'Tiopronin': {'CNS_pMPO': 0.36, 'CNS_pMPO_withSigmoidal': 0.297},
'Tipranavir': {'CNS_pMPO': 0.29, 'CNS_pMPO_withSigmoidal': 0.108},
'Tofacitinib': {'CNS_pMPO': 0.7, 'CNS_pMPO_withSigmoidal': 0.424},
'Tolazamide': {'CNS_pMPO': 0.59, 'CNS_pMPO_withSigmoidal': 0.22},
'Tolrestat': {'CNS_pMPO': 0.8, 'CNS_pMPO_withSigmoidal': 0.693},
'Torsemide': {'CNS_pMPO': 0.41, 'CNS_pMPO_withSigmoidal': 0.244},
'Tranexamic acid': {'CNS_pMPO': 0.57, 'CNS_pMPO_withSigmoidal': 0.408},
'Treprostinil': {'CNS_pMPO': 0.38, 'CNS_pMPO_withSigmoidal': 0.02},
'Triamterene': {'CNS_pMPO': 0.37, 'CNS_pMPO_withSigmoidal': 0.25},
'Tridihexethyl': {'CNS_pMPO': 0.63, 'CNS_pMPO_withSigmoidal': 0.607},
'Trimethobenzamide': {'CNS_pMPO': 0.86, 'CNS_pMPO_withSigmoidal': 0.591},
'Trimethoprim': {'CNS_pMPO': 0.56, 'CNS_pMPO_withSigmoidal': 0.182},
'Trioxsalen': {'CNS_pMPO': 0.64, 'CNS_pMPO_withSigmoidal': 0.641},
'Troleandomycin': {'CNS_pMPO': 0.37, 'CNS_pMPO_withSigmoidal': 0.367},
'Trospium': {'CNS_pMPO': 0.8, 'CNS_pMPO_withSigmoidal': 0.606},
'Trovafloxacin': {'CNS_pMPO': 0.45, 'CNS_pMPO_withSigmoidal': 0.127},
'Uracil mustard': {'CNS_pMPO': 0.7, 'CNS_pMPO_withSigmoidal': 0.51},
'Valsartan': {'CNS_pMPO': 0.32, 'CNS_pMPO_withSigmoidal': 0.01},
'Vandetanib': {'CNS_pMPO': 0.78, 'CNS_pMPO_withSigmoidal': 0.74},
'Vemurafenib': {'CNS_pMPO': 0.38, 'CNS_pMPO_withSigmoidal': 0.11},
'Vismodegib': {'CNS_pMPO': 0.68, 'CNS_pMPO_withSigmoidal': 0.454},
'Vorapaxar': {'CNS_pMPO': 0.56, 'CNS_pMPO_withSigmoidal': 0.375},
'Zafirlukast': {'CNS_pMPO': 0.22, 'CNS_pMPO_withSigmoidal': 0.066},
'Zidovudine': {'CNS_pMPO': 0.36, 'CNS_pMPO_withSigmoidal': 0.156},
'Zileuton': {'CNS_pMPO': 0.62, 'CNS_pMPO_withSigmoidal': 0.452},
'Abiraterone': {'CNS_pMPO': 0.75, 'CNS_pMPO_withSigmoidal': 0.674},
'Acebutolol': {'CNS_pMPO': 0.49, 'CNS_pMPO_withSigmoidal': 0.243},
'Acetaminophen': {'CNS_pMPO': 0.61, 'CNS_pMPO_withSigmoidal': 0.388},
'Acetazolamide': {'CNS_pMPO': 0.33, 'CNS_pMPO_withSigmoidal': 0.118},
'Acetophenazine': {'CNS_pMPO': 0.9, 'CNS_pMPO_withSigmoidal': 0.821},
'Acitretin': {'CNS_pMPO': 0.85, 'CNS_pMPO_withSigmoidal': 0.851},
'Afatinib': {'CNS_pMPO': 0.52, 'CNS_pMPO_withSigmoidal': 0.234},
'Albendazole': {'CNS_pMPO': 0.73, 'CNS_pMPO_withSigmoidal': 0.496},
'Almotriptan': {'CNS_pMPO': 0.92, 'CNS_pMPO_withSigmoidal': 0.824},
'Alprazolam': {'CNS_pMPO': 0.74, 'CNS_pMPO_withSigmoidal': 0.742},
'Alprenolol': {'CNS_pMPO': 0.8, 'CNS_pMPO_withSigmoidal': 0.562},
'Amantadine': {'CNS_pMPO': 0.65, 'CNS_pMPO_withSigmoidal': 0.591},
'Aminoglutethimide': {'CNS_pMPO': 0.66, 'CNS_pMPO_withSigmoidal': 0.4},
'Amitriptyline': {'CNS_pMPO': 0.58, 'CNS_pMPO_withSigmoidal': 0.577},
'Amlodipine': {'CNS_pMPO': 0.52, 'CNS_pMPO_withSigmoidal': 0.234},
'Amoxapine': {'CNS_pMPO': 0.95, 'CNS_pMPO_withSigmoidal': 0.927},
'Amphetamine': {'CNS_pMPO': 0.66, 'CNS_pMPO_withSigmoidal': 0.606},
'Anileridine': {'CNS_pMPO': 0.97, 'CNS_pMPO_withSigmoidal': 0.936},
'Aniracetam': {'CNS_pMPO': 0.67, 'CNS_pMPO_withSigmoidal': 0.579},
'Apomorphine': {'CNS_pMPO': 0.84, 'CNS_pMPO_withSigmoidal': 0.718},
'Aprepitant': {'CNS_pMPO': 0.46, 'CNS_pMPO_withSigmoidal': 0.167},
'Aripiprazole': {'CNS_pMPO': 0.78, 'CNS_pMPO_withSigmoidal': 0.732},
'Armodafinil': {'CNS_pMPO': 0.85, 'CNS_pMPO_withSigmoidal': 0.806},
'Atomoxetine': {'CNS_pMPO': 0.78, 'CNS_pMPO_withSigmoidal': 0.674},
'Atropine': {'CNS_pMPO': 0.88, 'CNS_pMPO_withSigmoidal': 0.835},
'Axitinib': {'CNS_pMPO': 0.59, 'CNS_pMPO_withSigmoidal': 0.264},
'Azatadine': {'CNS_pMPO': 0.65, 'CNS_pMPO_withSigmoidal': 0.654},
'Baclofen': {'CNS_pMPO': 0.66, 'CNS_pMPO_withSigmoidal': 0.459},
'Benzphetamine': {'CNS_pMPO': 0.56, 'CNS_pMPO_withSigmoidal': 0.563},
'Benztropine': {'CNS_pMPO': 0.62, 'CNS_pMPO_withSigmoidal': 0.621},
'Bepridil': {'CNS_pMPO': 0.59, 'CNS_pMPO_withSigmoidal': 0.527},
'Bethanidine': {'CNS_pMPO': 0.57, 'CNS_pMPO_withSigmoidal': 0.382},
'Bexarotene': {'CNS_pMPO': 0.72, 'CNS_pMPO_withSigmoidal': 0.701},
'Biperiden': {'CNS_pMPO': 0.85, 'CNS_pMPO_withSigmoidal': 0.849},
'Bromazepam': {'CNS_pMPO': 0.88, 'CNS_pMPO_withSigmoidal': 0.876},
'Bromocriptine': {'CNS_pMPO': 0.17, 'CNS_pMPO_withSigmoidal': 0.03},
'Bromodiphenhydramine': {'CNS_pMPO': 0.64, 'CNS_pMPO_withSigmoidal': 0.632},
'Brompheniramine': {'CNS_pMPO': 0.68, 'CNS_pMPO_withSigmoidal': 0.68},
'Buclizine': {'CNS_pMPO': 0.42, 'CNS_pMPO_withSigmoidal': 0.251},
'Budipine': {'CNS_pMPO': 0.59, 'CNS_pMPO_withSigmoidal': 0.589},
'Bumetanide': {'CNS_pMPO': 0.27, 'CNS_pMPO_withSigmoidal': 0.07},
'Buprenorphine': {'CNS_pMPO': 0.72, 'CNS_pMPO_withSigmoidal': 0.537},
'Bupropion': {'CNS_pMPO': 0.84, 'CNS_pMPO_withSigmoidal': 0.74},
'Buspirone': {'CNS_pMPO': 0.72, 'CNS_pMPO_withSigmoidal': 0.456},
'Butabarbital': {'CNS_pMPO': 0.59, 'CNS_pMPO_withSigmoidal': 0.327},
'Cabergoline': {'CNS_pMPO': 0.64, 'CNS_pMPO_withSigmoidal': 0.267},
'Caffeine': {'CNS_pMPO': 0.62, 'CNS_pMPO_withSigmoidal': 0.536},
'Carbamazepine': {'CNS_pMPO': 0.83, 'CNS_pMPO_withSigmoidal': 0.833},
'Carbinoxamine': {'CNS_pMPO': 0.76, 'CNS_pMPO_withSigmoidal': 0.76},
'Carisoprodol': {'CNS_pMPO': 0.53, 'CNS_pMPO_withSigmoidal': 0.279},
'Carvedilol': {'CNS_pMPO': 0.55, 'CNS_pMPO_withSigmoidal': 0.309},
'Cevimeline': {'CNS_pMPO': 0.5, 'CNS_pMPO_withSigmoidal': 0.454},
'Chlophedianol': {'CNS_pMPO': 0.85, 'CNS_pMPO_withSigmoidal': 0.853},
'Chloramphenicol': {'CNS_pMPO': 0.32, 'CNS_pMPO_withSigmoidal': 0.188},
'Chlordiazepoxide': {'CNS_pMPO': 0.88, 'CNS_pMPO_withSigmoidal': 0.879},
'Chlormezanone': {'CNS_pMPO': 0.73, 'CNS_pMPO_withSigmoidal': 0.629},
'Chlorphentermine': {'CNS_pMPO': 0.75, 'CNS_pMPO_withSigmoidal': 0.65},
'Chlorpromazine': {'CNS_pMPO': 0.59, 'CNS_pMPO_withSigmoidal': 0.587},
'Chlorprothixene': {'CNS_pMPO': 0.54, 'CNS_pMPO_withSigmoidal': 0.535},
'Chlorzoxazone': {'CNS_pMPO': 0.75, 'CNS_pMPO_withSigmoidal': 0.751},
'Cilostazol': {'CNS_pMPO': 0.68, 'CNS_pMPO_withSigmoidal': 0.445},
'Cinacalcet': {'CNS_pMPO': 0.7, 'CNS_pMPO_withSigmoidal': 0.66},
'Citalopram': {'CNS_pMPO': 0.78, 'CNS_pMPO_withSigmoidal': 0.681},
'Clemastine': {'CNS_pMPO': 0.6, 'CNS_pMPO_withSigmoidal': 0.58},
'Clidinium': {'CNS_pMPO': 0.85, 'CNS_pMPO_withSigmoidal': 0.71},
'Clobazam': {'CNS_pMPO': 0.74, 'CNS_pMPO_withSigmoidal': 0.742},
'Clomipramine': {'CNS_pMPO': 0.58, 'CNS_pMPO_withSigmoidal': 0.582},
'Clonazepam': {'CNS_pMPO': 0.69, 'CNS_pMPO_withSigmoidal': 0.548},
'Clozapine': {'CNS_pMPO': 0.91, 'CNS_pMPO_withSigmoidal': 0.807},
'Cycloserine': {'CNS_pMPO': 0.55, 'CNS_pMPO_withSigmoidal': 0.296},
'Danazol': {'CNS_pMPO': 0.78, 'CNS_pMPO_withSigmoidal': 0.776},
'Dapsone': {'CNS_pMPO': 0.54, 'CNS_pMPO_withSigmoidal': 0.168},
'Dasatinib': {'CNS_pMPO': 0.31, 'CNS_pMPO_withSigmoidal': 0.127},
'Desloratadine': {'CNS_pMPO': 0.78, 'CNS_pMPO_withSigmoidal': 0.776},
'Desvenlafaxine': {'CNS_pMPO': 0.81, 'CNS_pMPO_withSigmoidal': 0.58},
'Dexmethylphenidate': {'CNS_pMPO': 0.88, 'CNS_pMPO_withSigmoidal': 0.782},
'Dextromethorphan': {'CNS_pMPO': 0.65, 'CNS_pMPO_withSigmoidal': 0.65},
'Dicyclomine': {'CNS_pMPO': 0.71, 'CNS_pMPO_withSigmoidal': 0.712},
'Diethylpropion': {'CNS_pMPO': 0.66, 'CNS_pMPO_withSigmoidal': 0.662},
'Difenoxin': {'CNS_pMPO': 0.85, 'CNS_pMPO_withSigmoidal': 0.755},
'Dihydrocodeine': {'CNS_pMPO': 0.95, 'CNS_pMPO_withSigmoidal': 0.857},
'Diltiazem': {'CNS_pMPO': 0.77, 'CNS_pMPO_withSigmoidal': 0.685},
'Diphenylpyraline': {'CNS_pMPO': 0.63, 'CNS_pMPO_withSigmoidal': 0.627},
'Disulfiram': {'CNS_pMPO': 0.47, 'CNS_pMPO_withSigmoidal': 0.469},
'Dolasetron': {'CNS_pMPO': 0.92, 'CNS_pMPO_withSigmoidal': 0.816},
'Donepezil': {'CNS_pMPO': 0.77, 'CNS_pMPO_withSigmoidal': 0.681},
'Dronabinol': {'CNS_pMPO': 0.67, 'CNS_pMPO_withSigmoidal': 0.675},
'Dronedarone': {'CNS_pMPO': 0.52, 'CNS_pMPO_withSigmoidal': 0.386},
'Duloxetine': {'CNS_pMPO': 0.82, 'CNS_pMPO_withSigmoidal': 0.8},
'Dutasteride': {'CNS_pMPO': 0.49, 'CNS_pMPO_withSigmoidal': 0.346},
'Efavirenz': {'CNS_pMPO': 0.76, 'CNS_pMPO_withSigmoidal': 0.763},
'Eletriptan': {'CNS_pMPO': 0.87, 'CNS_pMPO_withSigmoidal': 0.686},
'Eliglustat': {'CNS_pMPO': 0.71, 'CNS_pMPO_withSigmoidal': 0.402},
'Entacapone': {'CNS_pMPO': 0.4, 'CNS_pMPO_withSigmoidal': 0.168},
'Enzalutamide': {'CNS_pMPO': 0.65, 'CNS_pMPO_withSigmoidal': 0.468},
'Erlotinib': {'CNS_pMPO': 0.77, 'CNS_pMPO_withSigmoidal': 0.499},
'Eszopiclone': {'CNS_pMPO': 0.51, 'CNS_pMPO_withSigmoidal': 0.155},
'Ethchlorvynol': {'CNS_pMPO': 0.62, 'CNS_pMPO_withSigmoidal': 0.615},
'Ethinamate': {'CNS_pMPO': 0.78, 'CNS_pMPO_withSigmoidal': 0.776},
'Ethionamide': {'CNS_pMPO': 0.77, 'CNS_pMPO_withSigmoidal': 0.726},
'Ethopropazine': {'CNS_pMPO': 0.57, 'CNS_pMPO_withSigmoidal': 0.566},
'Ethosuximide': {'CNS_pMPO': 0.73, 'CNS_pMPO_withSigmoidal': 0.631},
'Ethotoin': {'CNS_pMPO': 0.8, 'CNS_pMPO_withSigmoidal': 0.691},
'Ezogabine': {'CNS_pMPO': 0.56, 'CNS_pMPO_withSigmoidal': 0.242},
'Famotidine': {'CNS_pMPO': 0.31, 'CNS_pMPO_withSigmoidal': 0.255},
'Febuxostat': {'CNS_pMPO': 0.73, 'CNS_pMPO_withSigmoidal': 0.564},
'Felbamate': {'CNS_pMPO': 0.44, 'CNS_pMPO_withSigmoidal': 0.229},
'Felodipine': {'CNS_pMPO': 0.71, 'CNS_pMPO_withSigmoidal': 0.586},
'Fenofibrate': {'CNS_pMPO': 0.64, 'CNS_pMPO_withSigmoidal': 0.595},
'Fentanyl': {'CNS_pMPO': 0.72, 'CNS_pMPO_withSigmoidal': 0.715},
'Finasteride': {'CNS_pMPO': 0.68, 'CNS_pMPO_withSigmoidal': 0.468},
'Fingolimod': {'CNS_pMPO': 0.69, 'CNS_pMPO_withSigmoidal': 0.625},
'Flibanserin': {'CNS_pMPO': 0.86, 'CNS_pMPO_withSigmoidal': 0.765},
'Fluconazole': {'CNS_pMPO': 0.71, 'CNS_pMPO_withSigmoidal': 0.448},
'Flucytosine': {'CNS_pMPO': 0.46, 'CNS_pMPO_withSigmoidal': 0.268},
'Fluoxetine': {'CNS_pMPO': 0.82, 'CNS_pMPO_withSigmoidal': 0.82},
'Flurazepam': {'CNS_pMPO': 0.75, 'CNS_pMPO_withSigmoidal': 0.66},
'Flurbiprofen': {'CNS_pMPO': 0.81, 'CNS_pMPO_withSigmoidal': 0.719},
'Fluvoxamine': {'CNS_pMPO': 0.96, 'CNS_pMPO_withSigmoidal': 0.919},
'Frovatriptan': {'CNS_pMPO': 0.49, 'CNS_pMPO_withSigmoidal': 0.37},
'Gabapentin': {'CNS_pMPO': 0.61, 'CNS_pMPO_withSigmoidal': 0.42},
'Galantamine': {'CNS_pMPO': 0.97, 'CNS_pMPO_withSigmoidal': 0.926},
'Gemfibrozil': {'CNS_pMPO': 0.86, 'CNS_pMPO_withSigmoidal': 0.857},
'Granisetron': {'CNS_pMPO': 0.85, 'CNS_pMPO_withSigmoidal': 0.819},
'Guanabenz': {'CNS_pMPO': 0.72, 'CNS_pMPO_withSigmoidal': 0.44},
'Guanfacine': {'CNS_pMPO': 0.63, 'CNS_pMPO_withSigmoidal': 0.294},
'Halazepam': {'CNS_pMPO': 0.62, 'CNS_pMPO_withSigmoidal': 0.582},
'Halofantrine': {'CNS_pMPO': 0.59, 'CNS_pMPO_withSigmoidal': 0.576},
'Haloperidol': {'CNS_pMPO': 0.93, 'CNS_pMPO_withSigmoidal': 0.853},
'Hydroxyzine': {'CNS_pMPO': 0.92, 'CNS_pMPO_withSigmoidal': 0.837},
'Ibuprofen': {'CNS_pMPO': 0.77, 'CNS_pMPO_withSigmoidal': 0.66},
'Iloperidone': {'CNS_pMPO': 0.73, 'CNS_pMPO_withSigmoidal': 0.631},
'Indomethacin': {'CNS_pMPO': 0.75, 'CNS_pMPO_withSigmoidal': 0.57},
'Isocarboxazid': {'CNS_pMPO': 0.67, 'CNS_pMPO_withSigmoidal': 0.401},
'Isotretinoin': {'CNS_pMPO': 0.78, 'CNS_pMPO_withSigmoidal': 0.784},
'Isradipine': {'CNS_pMPO': 0.53, 'CNS_pMPO_withSigmoidal': 0.394},
'Ketoprofen': {'CNS_pMPO': 0.8, 'CNS_pMPO_withSigmoidal': 0.731},
'Lacosamide': {'CNS_pMPO': 0.67, 'CNS_pMPO_withSigmoidal': 0.387},
'Lamotrigine': {'CNS_pMPO': 0.53, 'CNS_pMPO_withSigmoidal': 0.151},
'Lenalidomide': {'CNS_pMPO': 0.43, 'CNS_pMPO_withSigmoidal': 0.152},
'Letrozole': {'CNS_pMPO': 0.63, 'CNS_pMPO_withSigmoidal': 0.478},
'Levamisole': {'CNS_pMPO': 0.54, 'CNS_pMPO_withSigmoidal': 0.466},
'Levetiracetam': {'CNS_pMPO': 0.68, 'CNS_pMPO_withSigmoidal': 0.6},
'Levodopa': {'CNS_pMPO': 0.26, 'CNS_pMPO_withSigmoidal': 0.192},
'Levomepromazine': {'CNS_pMPO': 0.65, 'CNS_pMPO_withSigmoidal': 0.649},
'Levomethadyl': {'CNS_pMPO': 0.72, 'CNS_pMPO_withSigmoidal': 0.685},
'Levomilnacipran': {'CNS_pMPO': 0.82, 'CNS_pMPO_withSigmoidal': 0.795},
'Levopropoxyphene': {'CNS_pMPO': 0.72, 'CNS_pMPO_withSigmoidal': 0.714},
'Lidocaine': {'CNS_pMPO': 0.9, 'CNS_pMPO_withSigmoidal': 0.897},
'Lofexidine': {'CNS_pMPO': 0.9, 'CNS_pMPO_withSigmoidal': 0.897},
'Lomustine': {'CNS_pMPO': 0.81, 'CNS_pMPO_withSigmoidal': 0.789},
'Loratadine': {'CNS_pMPO': 0.61, 'CNS_pMPO_withSigmoidal': 0.497},
'Lorazepam': {'CNS_pMPO': 0.73, 'CNS_pMPO_withSigmoidal': 0.579},
'Lorcainide': {'CNS_pMPO': 0.67, 'CNS_pMPO_withSigmoidal': 0.6},
'Lorcaserin': {'CNS_pMPO': 0.65, 'CNS_pMPO_withSigmoidal': 0.556},
'Losartan': {'CNS_pMPO': 0.48, 'CNS_pMPO_withSigmoidal': 0.137},
'Lovastatin': {'CNS_pMPO': 0.66, 'CNS_pMPO_withSigmoidal': 0.469},
'Lurasidone': {'CNS_pMPO': 0.68, 'CNS_pMPO_withSigmoidal': 0.659},
'Maprotiline': {'CNS_pMPO': 0.73, 'CNS_pMPO_withSigmoidal': 0.734},
'Maraviroc': {'CNS_pMPO': 0.77, 'CNS_pMPO_withSigmoidal': 0.671},
'Mazindol': {'CNS_pMPO': 0.77, 'CNS_pMPO_withSigmoidal': 0.721},
'Mecamylamine': {'CNS_pMPO': 0.57, 'CNS_pMPO_withSigmoidal': 0.489},
'Mefloquine': {'CNS_pMPO': 0.8, 'CNS_pMPO_withSigmoidal': 0.548},
'Memantine': {'CNS_pMPO': 0.7, 'CNS_pMPO_withSigmoidal': 0.613},
'Meperidine': {'CNS_pMPO': 0.77, 'CNS_pMPO_withSigmoidal': 0.769},
'Mesoridazine': {'CNS_pMPO': 0.68, 'CNS_pMPO_withSigmoidal': 0.585},
'Metergoline': {'CNS_pMPO': 0.84, 'CNS_pMPO_withSigmoidal': 0.757},
'Metformin': {'CNS_pMPO': 0.17, 'CNS_pMPO_withSigmoidal': 0.041},
'Methamphetamine': {'CNS_pMPO': 0.56, 'CNS_pMPO_withSigmoidal': 0.503},
'Metharbital': {'CNS_pMPO': 0.75, 'CNS_pMPO_withSigmoidal': 0.665},
'Methdilazine': {'CNS_pMPO': 0.59, 'CNS_pMPO_withSigmoidal': 0.595},
'Methixene': {'CNS_pMPO': 0.55, 'CNS_pMPO_withSigmoidal': 0.555},
'Methocarbamol': {'CNS_pMPO': 0.49, 'CNS_pMPO_withSigmoidal': 0.139},
'Methylergonovine': {'CNS_pMPO': 0.68, 'CNS_pMPO_withSigmoidal': 0.587},
'Methyprylon': {'CNS_pMPO': 0.75, 'CNS_pMPO_withSigmoidal': 0.664},
'Metoclopramide': {'CNS_pMPO': 0.76, 'CNS_pMPO_withSigmoidal': 0.487},
'Metoprolol': {'CNS_pMPO': 0.79, 'CNS_pMPO_withSigmoidal': 0.588},
'Metyrapone': {'CNS_pMPO': 0.73, 'CNS_pMPO_withSigmoidal': 0.67},
'Mexiletine': {'CNS_pMPO': 0.85, 'CNS_pMPO_withSigmoidal': 0.753},
'Mifepristone': {'CNS_pMPO': 0.74, 'CNS_pMPO_withSigmoidal': 0.614},
'Minaprine': {'CNS_pMPO': 0.99, 'CNS_pMPO_withSigmoidal': 0.885},
'Mirtazapine': {'CNS_pMPO': 0.71, 'CNS_pMPO_withSigmoidal': 0.71},
'Moclobemide': {'CNS_pMPO': 0.94, 'CNS_pMPO_withSigmoidal': 0.733},
'Molindone': {'CNS_pMPO': 0.97, 'CNS_pMPO_withSigmoidal': 0.872},
'Nabilone': {'CNS_pMPO': 0.72, 'CNS_pMPO_withSigmoidal': 0.644},
'Nalmefene': {'CNS_pMPO': 0.86, 'CNS_pMPO_withSigmoidal': 0.718},
'Naloxegol': {'CNS_pMPO': 0.28, 'CNS_pMPO_withSigmoidal': 0.01},
'Naratriptan': {'CNS_pMPO': 0.73, 'CNS_pMPO_withSigmoidal': 0.503},
'Nebivolol': {'CNS_pMPO': 0.6, 'CNS_pMPO_withSigmoidal': 0.411},
'Nefazodone': {'CNS_pMPO': 0.72, 'CNS_pMPO_withSigmoidal': 0.686},
'Nemonapride': {'CNS_pMPO': 0.82, 'CNS_pMPO_withSigmoidal': 0.597},
'Nicardipine': {'CNS_pMPO': 0.47, 'CNS_pMPO_withSigmoidal': 0.326},
'Nicergoline': {'CNS_pMPO': 0.66, 'CNS_pMPO_withSigmoidal': 0.635},
'Nicotine': {'CNS_pMPO': 0.53, 'CNS_pMPO_withSigmoidal': 0.467},
'Nifedipine': {'CNS_pMPO': 0.56, 'CNS_pMPO_withSigmoidal': 0.492},
'Nortriptyline': {'CNS_pMPO': 0.72, 'CNS_pMPO_withSigmoidal': 0.718},
'Noscapine': {'CNS_pMPO': 0.64, 'CNS_pMPO_withSigmoidal': 0.344},
'Ondansetron': {'CNS_pMPO': 0.85, 'CNS_pMPO_withSigmoidal': 0.833},
'Ospemifene': {'CNS_pMPO': 0.64, 'CNS_pMPO_withSigmoidal': 0.549},
'Oxprenolol': {'CNS_pMPO': 0.8, 'CNS_pMPO_withSigmoidal': 0.586},
'Oxybate': {'CNS_pMPO': 0.48, 'CNS_pMPO_withSigmoidal': 0.347},
'Oxybutynin': {'CNS_pMPO': 0.9, 'CNS_pMPO_withSigmoidal': 0.864},
'Oxyphencyclimine': {'CNS_pMPO': 0.88, 'CNS_pMPO_withSigmoidal': 0.847},
'Palonosetron': {'CNS_pMPO': 0.71, 'CNS_pMPO_withSigmoidal': 0.601},
'Panobinostat': {'CNS_pMPO': 0.58, 'CNS_pMPO_withSigmoidal': 0.412},
'Paramethadione': {'CNS_pMPO': 0.63, 'CNS_pMPO_withSigmoidal': 0.526},
'Paroxetine': {'CNS_pMPO': 0.94, 'CNS_pMPO_withSigmoidal': 0.939},
'Pazopanib': {'CNS_pMPO': 0.43, 'CNS_pMPO_withSigmoidal': 0.137},
'Penbutolol': {'CNS_pMPO': 0.83, 'CNS_pMPO_withSigmoidal': 0.699},
'Pentazocine': {'CNS_pMPO': 0.84, 'CNS_pMPO_withSigmoidal': 0.843},
'Pentoxifylline': {'CNS_pMPO': 0.61, 'CNS_pMPO_withSigmoidal': 0.387},
'Perampanel': {'CNS_pMPO': 0.72, 'CNS_pMPO_withSigmoidal': 0.658},
'Pergolide': {'CNS_pMPO': 0.79, 'CNS_pMPO_withSigmoidal': 0.791},
'Perindopril': {'CNS_pMPO': 0.5, 'CNS_pMPO_withSigmoidal': 0.067},
'Phenacemide': {'CNS_pMPO': 0.57, 'CNS_pMPO_withSigmoidal': 0.233},
'Phenelzine': {'CNS_pMPO': 0.69, 'CNS_pMPO_withSigmoidal': 0.463},
'Phenmetrazine': {'CNS_pMPO': 0.73, 'CNS_pMPO_withSigmoidal': 0.636},
'Phenobarbital': {'CNS_pMPO': 0.61, 'CNS_pMPO_withSigmoidal': 0.348},
'Phenoxybenzamine': {'CNS_pMPO': 0.57, 'CNS_pMPO_withSigmoidal': 0.475},
'Phenylpropanolamine': {'CNS_pMPO': 0.67, 'CNS_pMPO_withSigmoidal': 0.499},
'Phenytoin': {'CNS_pMPO': 0.72, 'CNS_pMPO_withSigmoidal': 0.582},
'Pimozide': {'CNS_pMPO': 0.72, 'CNS_pMPO_withSigmoidal': 0.678},
'Pioglitazone': {'CNS_pMPO': 0.86, 'CNS_pMPO_withSigmoidal': 0.705},
'Pirfenidone': {'CNS_pMPO': 0.53, 'CNS_pMPO_withSigmoidal': 0.528},
'Pramipexole': {'CNS_pMPO': 0.73, 'CNS_pMPO_withSigmoidal': 0.537},
'Prasugrel': {'CNS_pMPO': 0.71, 'CNS_pMPO_withSigmoidal': 0.614},
'Praziquantel': {'CNS_pMPO': 0.73, 'CNS_pMPO_withSigmoidal': 0.734},
'Procainamide': {'CNS_pMPO': 0.72, 'CNS_pMPO_withSigmoidal': 0.531},
'Procarbazine': {'CNS_pMPO': 0.66, 'CNS_pMPO_withSigmoidal': 0.556},
'Prochlorperazine': {'CNS_pMPO': 0.56, 'CNS_pMPO_withSigmoidal': 0.478},
'Propofol': {'CNS_pMPO': 0.58, 'CNS_pMPO_withSigmoidal': 0.577},
'Propranolol': {'CNS_pMPO': 0.81, 'CNS_pMPO_withSigmoidal': 0.592},
'Pseudoephedrine': {'CNS_pMPO': 0.61, 'CNS_pMPO_withSigmoidal': 0.433},
'Pyrazinamide': {'CNS_pMPO': 0.63, 'CNS_pMPO_withSigmoidal': 0.497},
'Pyrilamine': {'CNS_pMPO': 0.76, 'CNS_pMPO_withSigmoidal': 0.656},
'Pyrimethamine': {'CNS_pMPO': 0.7, 'CNS_pMPO_withSigmoidal': 0.322},
'Pyrvinium': {'CNS_pMPO': 0.38, 'CNS_pMPO_withSigmoidal': 0.294},
'Quetiapine': {'CNS_pMPO': 0.93, 'CNS_pMPO_withSigmoidal': 0.742},
'Ramelteon': {'CNS_pMPO': 0.83, 'CNS_pMPO_withSigmoidal': 0.828},
'Rasagiline': {'CNS_pMPO': 0.69, 'CNS_pMPO_withSigmoidal': 0.672},
'Reboxetine': {'CNS_pMPO': 0.97, 'CNS_pMPO_withSigmoidal': 0.974},
'Remifentanil': {'CNS_pMPO': 0.7, 'CNS_pMPO_withSigmoidal': 0.385},
'Riluzole': {'CNS_pMPO': 0.84, 'CNS_pMPO_withSigmoidal': 0.828},
'Rimantadine': {'CNS_pMPO': 0.7, 'CNS_pMPO_withSigmoidal': 0.614},
'Rimonabant': {'CNS_pMPO': 0.66, 'CNS_pMPO_withSigmoidal': 0.611},
'Risperidone': {'CNS_pMPO': 0.77, 'CNS_pMPO_withSigmoidal': 0.677},
'Rivastigmine': {'CNS_pMPO': 0.77, 'CNS_pMPO_withSigmoidal': 0.679},
'Rizatriptan': {'CNS_pMPO': 0.88, 'CNS_pMPO_withSigmoidal': 0.841},
'Rofecoxib': {'CNS_pMPO': 0.74, 'CNS_pMPO_withSigmoidal': 0.719},
'Ropinirole': {'CNS_pMPO': 0.89, 'CNS_pMPO_withSigmoidal': 0.869},
'Ropivacaine': {'CNS_pMPO': 0.91, 'CNS_pMPO_withSigmoidal': 0.916},
'Rufinamide': {'CNS_pMPO': 0.71, 'CNS_pMPO_withSigmoidal': 0.509},
'Saxagliptin': {'CNS_pMPO': 0.58, 'CNS_pMPO_withSigmoidal': 0.284},
'Selegiline': {'CNS_pMPO': 0.53, 'CNS_pMPO_withSigmoidal': 0.514},
'Sertindole': {'CNS_pMPO': 0.8, 'CNS_pMPO_withSigmoidal': 0.747},
'Sertraline': {'CNS_pMPO': 0.76, 'CNS_pMPO_withSigmoidal': 0.761},
'Sulindac': {'CNS_pMPO': 0.83, 'CNS_pMPO_withSigmoidal': 0.691},
'Suvorexant': {'CNS_pMPO': 0.49, 'CNS_pMPO_withSigmoidal': 0.259},
'Tacrine': {'CNS_pMPO': 0.87, 'CNS_pMPO_withSigmoidal': 0.854},
'Tadalafil': {'CNS_pMPO': 0.73, 'CNS_pMPO_withSigmoidal': 0.501},
'Talipexole': {'CNS_pMPO': 0.9, 'CNS_pMPO_withSigmoidal': 0.726},
'Tamoxifen': {'CNS_pMPO': 0.52, 'CNS_pMPO_withSigmoidal': 0.441},
'Tapentadol': {'CNS_pMPO': 0.8, 'CNS_pMPO_withSigmoidal': 0.776},
'Tasimelteon': {'CNS_pMPO': 0.82, 'CNS_pMPO_withSigmoidal': 0.826},
'Telmisartan': {'CNS_pMPO': 0.63, 'CNS_pMPO_withSigmoidal': 0.441},
'Temozolomide': {'CNS_pMPO': 0.43, 'CNS_pMPO_withSigmoidal': 0.342},
'Terbinafine': {'CNS_pMPO': 0.49, 'CNS_pMPO_withSigmoidal': 0.387},
'Terguride': {'CNS_pMPO': 0.82, 'CNS_pMPO_withSigmoidal': 0.681},
'Teriflunomide': {'CNS_pMPO': 0.62, 'CNS_pMPO_withSigmoidal': 0.292},
'Tetrabenazine': {'CNS_pMPO': 0.78, 'CNS_pMPO_withSigmoidal': 0.694},
'Thiopental': {'CNS_pMPO': 0.68, 'CNS_pMPO_withSigmoidal': 0.489},
'Thiothixene': {'CNS_pMPO': 0.72, 'CNS_pMPO_withSigmoidal': 0.674},
'Tiagabine': {'CNS_pMPO': 0.89, 'CNS_pMPO_withSigmoidal': 0.809},
'Tianeptine': {'CNS_pMPO': 0.54, 'CNS_pMPO_withSigmoidal': 0.105},
'Timolol': {'CNS_pMPO': 0.62, 'CNS_pMPO_withSigmoidal': 0.299},
'Tizanidine': {'CNS_pMPO': 0.79, 'CNS_pMPO_withSigmoidal': 0.55},
'Tocainide': {'CNS_pMPO': 0.76, 'CNS_pMPO_withSigmoidal': 0.532},
'Tolcapone': {'CNS_pMPO': 0.48, 'CNS_pMPO_withSigmoidal': 0.287},
'Tolmetin': {'CNS_pMPO': 0.75, 'CNS_pMPO_withSigmoidal': 0.714},
'Topotecan': {'CNS_pMPO': 0.49, 'CNS_pMPO_withSigmoidal': 0.122},
'Tramadol': {'CNS_pMPO': 0.87, 'CNS_pMPO_withSigmoidal': 0.774},
'Tranylcypromine': {'CNS_pMPO': 0.73, 'CNS_pMPO_withSigmoidal': 0.634},
'Trazodone': {'CNS_pMPO': 0.82, 'CNS_pMPO_withSigmoidal': 0.75},
'Trimipramine': {'CNS_pMPO': 0.59, 'CNS_pMPO_withSigmoidal': 0.592},
'Triprolidine': {'CNS_pMPO': 0.67, 'CNS_pMPO_withSigmoidal': 0.674},
'Troglitazone': {'CNS_pMPO': 0.43, 'CNS_pMPO_withSigmoidal': 0.091},
'Tropisetron': {'CNS_pMPO': 0.94, 'CNS_pMPO_withSigmoidal': 0.87},
'Valdecoxib': {'CNS_pMPO': 0.7, 'CNS_pMPO_withSigmoidal': 0.555},
'Valproic acid': {'CNS_pMPO': 0.69, 'CNS_pMPO_withSigmoidal': 0.602},
'Varenicline': {'CNS_pMPO': 0.79, 'CNS_pMPO_withSigmoidal': 0.755},
'Vilazodone': {'CNS_pMPO': 0.37, 'CNS_pMPO_withSigmoidal': 0.123},
'Vinpocetine': {'CNS_pMPO': 0.73, 'CNS_pMPO_withSigmoidal': 0.702},
'Voriconazole': {'CNS_pMPO': 0.75, 'CNS_pMPO_withSigmoidal': 0.471},
'Vorinostat': {'CNS_pMPO': 0.48, 'CNS_pMPO_withSigmoidal': 0.206},
'Vortioxetine': {'CNS_pMPO': 0.79, 'CNS_pMPO_withSigmoidal': 0.786},
'Zaleplon': {'CNS_pMPO': 0.65, 'CNS_pMPO_withSigmoidal': 0.42},
'Ziprasidone': {'CNS_pMPO': 0.89, 'CNS_pMPO_withSigmoidal': 0.813},
'Zolmitriptan': {'CNS_pMPO': 0.78, 'CNS_pMPO_withSigmoidal': 0.58},
'Zolpidem': {'CNS_pMPO': 0.79, 'CNS_pMPO_withSigmoidal': 0.706},
'Zonisamide': {'CNS_pMPO': 0.59, 'CNS_pMPO_withSigmoidal': 0.369}}
# The reference intermediate CSV values from the model building in Hakan's paper
REFERENCE_INTERMEDIATE_VALUES_CSV = """name,p_value,good_mean,good_std,bad_mean,bad_std,good_nsamples,bad_nsamples,cutoff,b,c,z,w
TPSA,1.53302825374e-37,50.7017727635,28.3039124335,86.6483879508,39.3310947469,299,366,65.7447200619,0.151695487107,0.793679783519,0.531479431818,0.333254
HBD,2.6491093047e-25,1.08695652174,0.891691734071,2.03825136612,1.35245625655,299,366,1.46494485092,0.0940054673368,9.51515104848e-05,0.423900227773,0.265798
MW,2.32807969696e-10,304.703053545,94.0468619927,362.423958393,135.961231175,299,366,328.304266431,0.0319893623019,0.829287962918,0.250951625455,0.157354
cLogD_ACD_v15,2.66954664227e-07,1.80861953019,1.93092146089,0.838442630309,2.84658487103,297,366,1.41650379728,0.0208331236351,131996.985929,0.203071818744,0.127332
mbpKa,0.000219929547677,8.07348212768,2.20894961173,7.17077320052,2.65960541699,224,194,7.66390710544,0.0173381729161,1459310.7835,0.185416190602,0.116262
"""
########################################################################################################################
########################################################################################################################
# Transformation of the intermediate CSV values above into something usable
def _read_csv_to_dict(csv_text: str) -> dict:
data = {}
csv_io = StringIO(csv_text)
reader = csv.DictReader(csv_io, delimiter=',')
for row in reader:
name = row.pop('name')
data[name] = row
return data
REFERENCE_INTERMEDIATE_VALUES = _read_csv_to_dict(REFERENCE_INTERMEDIATE_VALUES_CSV)
########################################################################################################################
########################################################################################################################
class test_suite001_building(unittest.TestCase):
def setUp(self):
self.df =
|
pd.read_pickle(REFERENCE_DATAFRAME)
|
pandas.read_pickle
|
from lxml import etree
import numpy as np
import pandas as pd
import re
from sklearn.model_selection import train_test_split
import Bio
from Bio import SeqIO
from pathlib import Path
import glob
#console
from tqdm import tqdm as tqdm
import re
import os
import itertools
#jupyter
#from tqdm import tqdm_notebook as tqdm
#not supported in current tqdm version
#from tqdm.autonotebook import tqdm
#import logging
#logging.getLogger('proteomics_utils').addHandler(logging.NullHandler())
#logger=logging.getLogger('proteomics_utils')
#for cd-hit
import subprocess
from sklearn.metrics import f1_score
import hashlib #for mhcii datasets
from utils.dataset_utils import split_clusters_single,pick_all_members_from_clusters
#######################################################################################################
#Parsing all sorts of protein data
#######################################################################################################
def parse_uniprot_xml(filename,max_entries=0,parse_features=[]):
'''parse uniprot xml file, which contains the full uniprot information (e.g. ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.xml.gz)
using custom low-level https://www.ibm.com/developerworks/xml/library/x-hiperfparse/
c.f. for full format https://www.uniprot.org/docs/uniprot.xsd
parse_features: a list of strings specifying the kind of features to be parsed such as "modified residue" for phosphorylation sites etc. (see https://www.uniprot.org/help/mod_res)
(see the xsd file for all possible entries)
'''
context = etree.iterparse(str(filename), events=["end"], tag="{http://uniprot.org/uniprot}entry")
context = iter(context)
rows =[]
for _, elem in tqdm(context):
parse_func_uniprot(elem,rows,parse_features=parse_features)
elem.clear()
while elem.getprevious() is not None:
del elem.getparent()[0]
if(max_entries > 0 and len(rows)==max_entries):
break
df=pd.DataFrame(rows).set_index("ID")
df['name'] = df.name.astype(str)
df['dataset'] = df.dataset.astype('category')
df['organism'] = df.organism.astype('category')
df['sequence'] = df.sequence.astype(str)
return df
def parse_func_uniprot(elem, rows, parse_features=[]):
'''extracting a single record from uniprot xml'''
seqs = elem.findall("{http://uniprot.org/uniprot}sequence")
sequence=""
#print(seqs)
for s in seqs:
sequence=s.text
#print("sequence",sequence)
if sequence =="" or str(sequence)=="None":
continue
else:
break
#Sequence & fragment
sequence=""
fragment_map = {"single":1, "multiple":2}
fragment = 0
seqs = elem.findall("{http://uniprot.org/uniprot}sequence")
for s in seqs:
if 'fragment' in s.attrib:
fragment = fragment_map[s.attrib["fragment"]]
sequence=s.text
if sequence != "":
break
#print("sequence:",sequence)
#print("fragment:",fragment)
#dataset
dataset=elem.attrib["dataset"]
#accession
accession = ""
accessions = elem.findall("{http://uniprot.org/uniprot}accession")
for a in accessions:
accession=a.text
if accession !="":#primary accession! https://www.uniprot.org/help/accession_numbers!!!
break
#print("accession",accession)
#protein existence (PE in plain text)
proteinexistence_map = {"evidence at protein level":5,"evidence at transcript level":4,"inferred from homology":3,"predicted":2,"uncertain":1}
proteinexistence = -1
accessions = elem.findall("{http://uniprot.org/uniprot}proteinExistence")
for a in accessions:
proteinexistence=proteinexistence_map[a.attrib["type"]]
break
#print("protein existence",proteinexistence)
#name
name = ""
names = elem.findall("{http://uniprot.org/uniprot}name")
for n in names:
name=n.text
break
#print("name",name)
#organism
organism = ""
organisms = elem.findall("{http://uniprot.org/uniprot}organism")
for s in organisms:
s1=s.findall("{http://uniprot.org/uniprot}name")
for s2 in s1:
if(s2.attrib["type"]=='scientific'):
organism=s2.text
break
if organism !="":
break
#print("organism",organism)
#dbReference: PMP,GO,Pfam, EC
ids = elem.findall("{http://uniprot.org/uniprot}dbReference")
pfams = []
gos =[]
ecs = []
pdbs =[]
for i in ids:
#print(i.attrib["id"],i.attrib["type"])
#cf. http://geneontology.org/external2go/uniprotkb_kw2go for Uniprot Keyword<->GO mapping
#http://geneontology.org/ontology/go-basic.obo for List of go terms
#https://www.uniprot.org/help/keywords_vs_go keywords vs. go
if(i.attrib["type"]=="GO"):
tmp1 = i.attrib["id"]
for i2 in i:
if i2.attrib["type"]=="evidence":
tmp2= i2.attrib["value"]
gos.append([int(tmp1[3:]),int(tmp2[4:])]) #first value is go code, second eco evidence ID (see mapping below)
elif(i.attrib["type"]=="Pfam"):
pfams.append(i.attrib["id"])
elif(i.attrib["type"]=="EC"):
ecs.append(i.attrib["id"])
elif(i.attrib["type"]=="PDB"):
pdbs.append(i.attrib["id"])
#print("PMP: ", pmp)
#print("GOs:",gos)
#print("Pfams:",pfam)
#print("ECs:",ecs)
#print("PDBs:",pdbs)
#keyword
keywords = elem.findall("{http://uniprot.org/uniprot}keyword")
keywords_lst = []
#print(keywords)
for k in keywords:
keywords_lst.append(int(k.attrib["id"][-4:]))#remove the KW-
#print("keywords: ",keywords_lst)
#comments = elem.findall("{http://uniprot.org/uniprot}comment")
#comments_lst=[]
##print(comments)
#for c in comments:
# if(c.attrib["type"]=="function"):
# for c1 in c:
# comments_lst.append(c1.text)
#print("function: ",comments_lst)
#ptm etc
if len(parse_features)>0:
ptms=[]
features = elem.findall("{http://uniprot.org/uniprot}feature")
for f in features:
if(f.attrib["type"] in parse_features):#only add features of the requested type
locs=[]
for l in f[0]:
locs.append(int(l.attrib["position"]))
ptms.append([f.attrib["type"],f.attrib["description"] if 'description' in f.attrib else "NaN",locs, f.attrib['evidence'] if 'evidence' in f.attrib else "NaN"])
#print(ptms)
data_dict={"ID": accession, "name": name, "dataset":dataset, "proteinexistence":proteinexistence, "fragment":fragment, "organism":organism, "ecs": ecs, "pdbs": pdbs, "pfams" : pfams, "keywords": keywords_lst, "gos": gos, "sequence": sequence}
if len(parse_features)>0:
data_dict["features"]=ptms
#print("all children:")
#for c in elem:
# print(c)
# print(c.tag)
# print(c.attrib)
rows.append(data_dict)
def parse_uniprot_seqio(filename,max_entries=0):
'''parse uniprot xml file using the SeqIO parser (smaller functionality e.g. does not extract evidence codes for GO)'''
sprot = SeqIO.parse(filename, "uniprot-xml")
rows = []
for p in tqdm(sprot):
accession = str(p.name)
name = str(p.id)
dataset = str(p.annotations['dataset'])
organism = str(p.annotations['organism'])
ecs, pdbs, pfams, gos = [],[],[],[]
for ref in p.dbxrefs:
k = ref.split(':')
if k[0] == 'GO':
gos.append(':'.join(k[1:]))
elif k[0] == 'Pfam':
pfams.append(k[1])
elif k[0] == 'EC':
ecs.append(k[1])
elif k[0] == 'PDB':
pdbs.append(k[1:])
if 'keywords' in p.annotations.keys():
keywords = p.annotations['keywords']
else:
keywords = []
sequence = str(p.seq)
row = {
'ID': accession,
'name':name,
'dataset':dataset,
'organism':organism,
'ecs':ecs,
'pdbs':pdbs,
'pfams':pfams,
'keywords':keywords,
'gos':gos,
'sequence':sequence}
rows.append(row)
if(max_entries>0 and len(rows)==max_entries):
break
df=pd.DataFrame(rows).set_index("ID")
df['name'] = df.name.astype(str)
df['dataset'] = df.dataset.astype('category')
df['organism'] = df.organism.astype('category')
df['sequence'] = df.sequence.astype(str)
return df
def filter_human_proteome(df_sprot):
'''extracts human proteome from swissprot proteines in DataFrame with column organism '''
is_Human = np.char.find(df_sprot.organism.values.astype(str), "Human") !=-1
is_human = np.char.find(df_sprot.organism.values.astype(str), "human") !=-1
is_sapiens = np.char.find(df_sprot.organism.values.astype(str), "sapiens") !=-1
is_Sapiens = np.char.find(df_sprot.organism.values.astype(str), "Sapiens") !=-1
return df_sprot[is_Human|is_human|is_sapiens|is_Sapiens]
def filter_aas(df, exclude_aas=["B","J","X","Z"]):
'''excludes sequences containing exclude_aas: B = D or N, J = I or L, X = unknown, Z = E or Q'''
return df[~df.sequence.apply(lambda x: any([e in x for e in exclude_aas]))]
######################################################################################################
def explode_clusters_df(df_cluster):
'''aux. function to convert cluster dataframe from one row per cluster to one row per ID'''
df=df_cluster.reset_index(level=0)
rows = []
if('repr_accession' in df.columns):#include representative if it exists
_ = df.apply(lambda row: [rows.append([nn,row['entry_id'], row['repr_accession']==nn ]) for nn in row.members], axis=1)
df_exploded = pd.DataFrame(rows, columns=['ID',"cluster_ID","representative"]).set_index(['ID'])
else:
_ = df.apply(lambda row: [rows.append([nn,row['entry_id']]) for nn in row.members], axis=1)
df_exploded = pd.DataFrame(rows, columns=['ID',"cluster_ID"]).set_index(['ID'])
return df_exploded
def parse_uniref(filename,max_entries=0,parse_sequence=False, df_selection=None, exploded=True):
'''parse uniref (clustered sequences) xml ftp://ftp.ebi.ac.uk/pub/databases/uniprot/uniref/uniref50/uniref50.xml.gz unzipped 100GB file
using custom low-level parser https://www.ibm.com/developerworks/xml/library/x-hiperfparse/
max_entries: only return first max_entries entries (0=all)
parse_sequences: return also representative sequence
df_selection: only include entries with accessions that are present in df_selection.index (None keeps all records)
exploded: return one row per ID instead of one row per cluster
c.f. for full format ftp://ftp.ebi.ac.uk/pub/databases/uniprot/uniref/uniref50/README
'''
#issue with long texts https://stackoverflow.com/questions/30577796/etree-incomplete-child-text
#wait for end rather than start tag
context = etree.iterparse(str(filename), events=["end"], tag="{http://uniprot.org/uniref}entry")
context = iter(context)
rows =[]
for _, elem in tqdm(context):
parse_func_uniref(elem,rows,parse_sequence=parse_sequence, df_selection=df_selection)
elem.clear()
while elem.getprevious() is not None:
del elem.getparent()[0]
if(max_entries > 0 and len(rows)==max_entries):
break
df=pd.DataFrame(rows).set_index("entry_id")
df["num_members"]=df.members.apply(len)
if(exploded):
return explode_clusters_df(df)
return df
def parse_func_uniref(elem, rows, parse_sequence=False, df_selection=None):
'''extract a single uniref entry'''
#entry ID
entry_id = elem.attrib["id"]
#print("cluster id",entry_id)
#name
name = ""
names = elem.findall("{http://uniprot.org/uniref}name")
for n in names:
name=n.text[9:]
break
#print("cluster name",name)
members=[]
#representative member
repr_accession = ""
repr_sequence =""
repr = elem.findall("{http://uniprot.org/uniref}representativeMember")
for r in repr:
s1=r.findall("{http://uniprot.org/uniref}dbReference")
for s2 in s1:
for s3 in s2:
if s3.attrib["type"]=="UniProtKB accession":
if(repr_accession == ""):
repr_accession = s3.attrib["value"]#pick primary accession
members.append(s3.attrib["value"])
if parse_sequence is True:
s1=r.findall("{http://uniprot.org/uniref}sequence")
for s2 in s1:
repr_sequence = s2.text
if repr_sequence !="":
break
#print("representative member accession:",repr_accession)
#print("representative member sequence:",repr_sequence)
#all members
repr = elem.findall("{http://uniprot.org/uniref}member")
for r in repr:
s1=r.findall("{http://uniprot.org/uniref}dbReference")
for s2 in s1:
for s3 in s2:
if s3.attrib["type"]=="UniProtKB accession":
members.append(s3.attrib["value"]) #add primary and secondary accessions
#print("members", members)
if(not(df_selection is None)): #apply selection filter
members = [y for y in members if y in df_selection.index]
#print("all children")
#for c in elem:
# print(c)
# print(c.tag)
# print(c.attrib)
if(len(members)>0):
data_dict={"entry_id": entry_id, "name": name, "repr_accession":repr_accession, "members":members}
if parse_sequence is True:
data_dict["repr_sequence"]=repr_sequence
rows.append(data_dict)
###########################################################################################################################
#proteins and peptides from fasta
###########################################################################################################################
def parse_uniprot_fasta(fasta_path, max_entries=0):
'''parse uniprot from fasta file (which contains less information than the corresponding xml but is also much smaller e.g. ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.fasta)'''
rows=[]
dataset_dict={"sp":"Swiss-Prot","tr":"TrEMBL"}
for seq_record in tqdm(SeqIO.parse(fasta_path, "fasta")):
sid=seq_record.id.split("|")
accession = sid[1]
dataset = dataset_dict[sid[0]]
name = sid[2]
description = seq_record.description
sequence=str(seq_record.seq)
#print(description)
m = re.search('PE=\d', description)
pe=int(m.group(0).split("=")[1])
m = re.search('OS=.* (?=OX=)', description)
organism=m.group(0).split("=")[1].strip()
data_dict={"ID": accession, "name": name, "dataset":dataset, "proteinexistence":pe, "organism":organism, "sequence": sequence}
rows.append(data_dict)
if(max_entries > 0 and len(rows)==max_entries):
break
df=pd.DataFrame(rows).set_index("ID")
df['name'] = df.name.astype(str)
df['dataset'] = df.dataset.astype('category')
df['organism'] = df.organism.astype('category')
df['sequence'] = df.sequence.astype(str)
return df
def proteins_from_fasta(fasta_path):
'''load proteins (as seqrecords) from fasta (just redirects)'''
return seqrecords_from_fasta(fasta_path)
def seqrecords_from_fasta(fasta_path):
'''load seqrecords from fasta file'''
seqrecords = list(SeqIO.parse(fasta_path, "fasta"))
return seqrecords
def seqrecords_to_sequences(seqrecords):
'''converts biopythons seqrecords into a plain list of sequences'''
return [str(p.seq) for p in seqrecords]
def sequences_to_fasta(sequences, fasta_path, sequence_id_prefix="s"):
'''save plain list of sequences to fasta'''
with open(fasta_path, "w") as output_handle:
for i,s in tqdm(enumerate(sequences)):
record = Bio.SeqRecord.SeqRecord(Bio.Seq.Seq(s), id=sequence_id_prefix+str(i), description="")
SeqIO.write(record, output_handle, "fasta")
def df_to_fasta(df, fasta_path):
'''Save column "sequence" from pandas DataFrame to fasta file using the index of the DataFrame as ID. Preserves original IDs in contrast to the function sequences_to_fasta()'''
with open(fasta_path, "w") as output_handle:
for row in df.iterrows():
record = Bio.SeqRecord.SeqRecord(Bio.Seq.Seq(row[1]["sequence"]), id=str(row[0]), description="")
SeqIO.write(record, output_handle, "fasta")
def sequences_to_df(sequences, sequence_id_prefix="s"):
data = {'ID': [(sequence_id_prefix+str(i) if sequence_id_prefix!="" else i) for i in range(len(sequences))], 'sequence': sequences}
df=pd.DataFrame.from_dict(data)
return df.set_index("ID")
def fasta_to_df(fasta_path):
seqs=SeqIO.parse(fasta_path, "fasta")
res=[]
for s in seqs:
res.append({"ID":s.id,"sequence":str(s.seq)})
return pd.DataFrame(res)
def peptides_from_proteins(protein_seqrecords, miss_cleavage=2,min_length=5,max_length=300):
'''extract peptides from proteins seqrecords by trypsin digestion
min_length: only return peptides of length min_length or greater (0 for all)
max_length: only return peptides of length max_length or smaller (0 for all)
'''
peptides = []
for seq in tqdm(protein_seqrecords):
peps = trypsin_digest(str(seq.seq), miss_cleavage)
peptides.extend(peps)
tmp=list(set(peptides))
if(min_length>0 and max_length>0):
tmp=[t for t in tmp if (len(t)>=min_length and len(t)<=max_length)]
elif(min_length==0 and max_length>0):
tmp=[t for t in tmp if len(t)<=max_length]
elif(min_length>0 and max_length==0):
tmp=[t for t in tmp if len(t)>=min_length]
print("Extracted",len(tmp),"unique peptides.")
return tmp
def trypsin_digest(proseq, miss_cleavage):
'''trypsin digestion of protein seqrecords
TRYPSIN from https://github.com/yafeng/trypsin/blob/master/trypsin.py'''
peptides=[]
cut_sites=[0]
for i in range(0,len(proseq)-1):
if proseq[i]=='K' and proseq[i+1]!='P':
cut_sites.append(i+1)
elif proseq[i]=='R' and proseq[i+1]!='P':
cut_sites.append(i+1)
if cut_sites[-1]!=len(proseq):
cut_sites.append(len(proseq))
if len(cut_sites)>2:
if miss_cleavage==0:
for j in range(0,len(cut_sites)-1):
peptides.append(proseq[cut_sites[j]:cut_sites[j+1]])
elif miss_cleavage==1:
for j in range(0,len(cut_sites)-2):
peptides.append(proseq[cut_sites[j]:cut_sites[j+1]])
peptides.append(proseq[cut_sites[j]:cut_sites[j+2]])
peptides.append(proseq[cut_sites[-2]:cut_sites[-1]])
elif miss_cleavage==2:
for j in range(0,len(cut_sites)-3):
peptides.append(proseq[cut_sites[j]:cut_sites[j+1]])
peptides.append(proseq[cut_sites[j]:cut_sites[j+2]])
peptides.append(proseq[cut_sites[j]:cut_sites[j+3]])
peptides.append(proseq[cut_sites[-3]:cut_sites[-2]])
peptides.append(proseq[cut_sites[-3]:cut_sites[-1]])
peptides.append(proseq[cut_sites[-2]:cut_sites[-1]])
else: #there is no trypsin site in the protein sequence
peptides.append(proseq)
return list(set(peptides))
###########################################################################
# Processing CD-HIT clusters
###########################################################################
def clusters_df_from_sequence_df(df,threshold=[1.0,0.9,0.5],alignment_coverage=[0.0,0.9,0.8],memory=16000, threads=8, exploded=True, verbose=False):
'''create clusters df from sequence df (using cd hit)
df: dataframe with sequence information
threshold: similarity threshold for clustering (pass a list for hierarchical clustering e.g [1.0, 0.9, 0.5])
alignment_coverage: required minimum coverage of the longer sequence (to mimic uniref https://www.uniprot.org/help/uniref)
memory: limit available memory
threads: limit number of threads
exploded: return exploded view of the dataframe (one row for every member vs. one row for every cluster)
uses CD-HIT for clustering
https://github.com/weizhongli/cdhit/wiki/3.-User's-Guide
copy cd-hit into ~/bin
TODO: extend to psi-cd-hit for thresholds smaller than 0.4
'''
if verbose:
print("Exporting original dataframe as fasta...")
fasta_file = "cdhit.fasta"
df_original_index = list(df.index) #reindex the dataframe since cdhit can only handle 19 letters
df = df.reset_index(drop=True)
df_to_fasta(df, fasta_file)
if(not(isinstance(threshold, list))):
threshold=[threshold]
alignment_coverage=[alignment_coverage]
assert(len(threshold)==len(alignment_coverage))
fasta_files=[]
for i,thr in enumerate(threshold):
if(thr< 0.4):#use psi-cd-hit here
print("thresholds lower than 0.4 require psi-cd-hit.pl require psi-cd-hit.pl (building on BLAST) which is currently not supported")
return pd.DataFrame()
elif(thr<0.5):
wl = 2
elif(thr<0.6):
wl = 3
elif(thr<0.7):
wl = 4
else:
wl = 5
aL = alignment_coverage[i]
#cd-hit -i nr -o nr80 -c 0.8 -n 5
#cd-hit -i nr80 -o nr60 -c 0.6 -n 4
#psi-cd-hit.pl -i nr60 -o nr30 -c 0.3
if verbose:
print("Clustering using cd-hit at threshold", thr, "using wordlength", wl, "and alignment coverage", aL, "...")
fasta_file_new= "cdhit"+str(int(thr*100))+".fasta"
command = "cd-hit -i "+fasta_file+" -o "+fasta_file_new+" -c "+str(thr)+" -n "+str(wl)+" -aL "+str(aL)+" -M "+str(memory)+" -T "+str(threads)
if(verbose):
print(command)
process= subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output, error = process.communicate()
if(verbose):
print(output)
if(error !=""):
print(error)
fasta_files.append(fasta_file)
if(i==len(threshold)-1):
fasta_files.append(fasta_file_new)
fasta_file= fasta_file_new
#join results from all clustering steps
if verbose:
print("Joining results from different clustering steps...")
for i,f in enumerate(reversed(fasta_files[1:])):
if verbose:
print("Processing",f,"...")
if(i==0):
df_clusters = parse_cdhit_clstr(f+".clstr",exploded=False)
else:
df_clusters2 = parse_cdhit_clstr(f+".clstr",exploded=False)
for id,row in df_clusters.iterrows():
members = row['members']
new_members = [list(df_clusters2[df_clusters2.repr_accession==y].members)[0] for y in members]
new_members = [item for sublist in new_members for item in sublist] #flattened
row['members']=new_members
df_clusters["members"]=df_clusters["members"].apply(lambda x:[df_original_index[int(y)] for y in x])
df_clusters["repr_accession"]=df_clusters["repr_accession"].apply(lambda x:df_original_index[int(x)])
if(exploded):
return explode_clusters_df(df_clusters)
return df_clusters
def parse_cdhit_clstr(filename, exploded=True):
'''Aux. Function (used by clusters_df_from_sequence_df) to parse CD-HITs clstr output file in a similar way as the uniref data
for the format see https://github.com/weizhongli/cdhit/wiki/3.-User's-Guide#CDHIT
exploded: single row for every ID instead of single for every cluster
'''
def save_cluster(rows,members,representative):
if(len(members)>0):
rows.append({"entry_id":filename[:-6]+"_"+representative, "members":members, "repr_accession":representative})
rows=[]
with open(filename, 'r') as f:
members=[]
representative=""
for l in tqdm(f):
if(l[0]==">"):
save_cluster(rows,members,representative)
members=[]
representative=""
else:
member=(l.split(">")[1]).split("...")[0]
members.append(member)
if "*" in l:
representative = member
save_cluster(rows,members,representative)
df=pd.DataFrame(rows).set_index("entry_id")
if(exploded):
return explode_clusters_df(df)
return df
###########################################################################
# MHC DATA
###########################################################################
######### Helper functions ##########
def _label_binder(data, threshold=500, measurement_column="meas"):
# Drop entries above IC50 > 500nM with inequality < (ambiguous)
to_drop = (( (data['inequality']=='<')&(data[measurement_column]>threshold))|((data['inequality']=='>')&(data[measurement_column]<threshold))).mean()
if to_drop > 0:
print('Dropping {} % because of ambiguous inequality'.format(to_drop))
data = data[~(( (data['inequality']=='<')&(data[measurement_column]>threshold))|((data['inequality']=='>')&(data[measurement_column]<threshold)))]
# Labeling
data['label'] = (1* data[measurement_column]<=threshold).astype(int)
return data
def _transform_ic50(data, how="log",max_ic50=50000.0, inequality_offset=True, label_column="meas"):
"""Transform ic50 measurements
how: "log" logarithmic transform, inequality "=" mapped to [0,1], inequality ">" mapped to [2,3], inequality "<" mapped to [4,5]
"norm"
"cap"
"""
x = data[label_column]
if how=="cap":
x = np.minimum(x, 50000)
elif how=="norm":
x = np.minimum(x, 50000)
x = (x - x.mean()) / x.std()
elif how=="log":
# log transform
x = 1 - (np.log(x)/np.log(max_ic50))
x = np.minimum(1.0, np.maximum(0.0,x))
if(inequality_offset):
# add offsets for loss
offsets = pd.Series(data['inequality']).map({'=': 0, '>': 2, '<': 4,}).values
x += offsets
return x
def _string_index(data):
# Add prefix letter "a" to the numerical index (such that it is clearly a string in order to avoid later errors).
data["ID"] = data.index
data["ID"] = data["ID"].apply(lambda x: "a"+ str(x))
data = data.set_index(["ID"])
return data
def _format_alleles(x):
if x[:3]=='HLA':
return x[:5]+'-'+x[6:8]+x[9:]
if x[:4]=='Mamu':
return x[:6]+'-'+x[7:]
else:
return x
def _get_allele_ranking(data_dir='.'):
'''
Allele ranking should be the same across different datasets (noMS, withMS) to avoid confusion.
Thus, the ranking is based on the larger withMS dataset
'''
data_dir = Path(data_dir)
curated_withMS_path = data_path/'data_curated.20180219'/'curated_training_data.with_mass_spec.csv'
df = pd.read_csv(curated_withMS_path)
# Drop duplicates
df = df.drop_duplicates(["allele", "peptide","measurement_value"])
lens = df['peptide'].apply(len)
df = df[(lens>7) & (lens<16)]
# Keep only alleles with min 25 peptides like MHC flurry
peptides_per_allele = df.groupby('allele').size()
alleles_select = peptides_per_allele[peptides_per_allele>24].index
df = df[df['allele'].isin(alleles_select)]
mhc_rank = df.groupby('allele').size().sort_values(ascending=False).reset_index()['allele']
return mhc_rank
def netmhpan_4_0_special_allele_map(allele):
minus_idx = allele.find("-")
pre, post = allele[:minus_idx], allele[minus_idx+1:]
if pre=="Mamu":
special_map = {"A01": "A1*00101",
"A02": "A1*00201",
"A07": "A1*00701",
"A11": "A1*01101",
"A2201": "A1*02201",
"A2601": "A1*02601",
'A20102': "A2*00102", # "A2*0102"
"A70103": "A7*00103", # "A7*0103"
"B01": "B*00101",
"B03": "B*00301",
"B04": "B*00401",
"B08": "B*00801",
"B17": "B*01701",
"B52": "B*05201",
"B1001": "B*01001",
'B3901': "B*03901", #?
'B6601': "B*06601", #?
'B8301': "B*08301", #?
'B8701': "B*08701", #?
}
if post in special_map.keys():
post = special_map[post]
elif pre=="BoLA":
#source: select allele menu on http://www.cbs.dtu.dk/services/NetMHCpan-4.0/
special_map = {
"D18.4": "1:02301",
"T2a": "2:01201",
"AW10": "3:00101",
"JSP.1": "3:00201",
"HD6": "6:01301",
"T2b": "6:04101"
}
if post in special_map.keys():
post = special_map[post]
return pre + "-" + post
def prepare_pseudo_mhc_sequences(mhc_class, data_dir='.'):
"""
The pseudo sequences are provided with the NetMHCpan4.1/NetMHCIIpan4.0 data.
"""
data_path = Path(data_dir)
if mhc_class=="II":
pseudo_seq_file = "NetMHCIIpan_train/pseudosequence.2016.all.X.dat"
else:
pseudo_seq_file = "NetMHCpan_4_1_train/MHC_pseudo.dat"
pseudo_mhc = []
with open(data_path/pseudo_seq_file, "r") as f:
for line in f:
allele, seq = line.split()
pseudo_mhc.append((allele,seq))
pseudo_mhc = pd.DataFrame(pseudo_mhc, columns=("allele", "sequence1"))
pseudo_mhc = pseudo_mhc[~pseudo_mhc["allele"].duplicated()]
return pseudo_mhc
########## Generate DataFrame ##########
def generate_mhc_kim(cv_type=None, mhc_select=0, regression=False, transform_ic50=None, to_csv=False, filename=None, data_dir='.', keep_all_alleles=False):
'''
cv_type: string, strategy for 5-fold cross validation, options:
- None: No cv-strategy, cv column is filled with 'TBD'
- sr: removal of similar peptides seperatly in binder/ non-binder set, using similarity threshold of 80%, similarity found with 'Hobohm 1 like algorithm'
- gs: grouping similar peptides in the same cv-partition
- rnd: random partioning
transform_ic50: string, ignnored if not regression
- None: use raw ic50 measurements as labels
- cap: cap ic50 meas at 50000
- norm: cap ic50 meas at 50000 and normalize
- log: take log_50000 and cap at 50000
mhc_select: int between 0 and 50, choose allele by frequency rank in Binding Data 2009
'''
# Binding Data 2009. Used by Kim et al for Cross Validation. Used by MHCnugget for training.
bd09_file = 'bdata.2009.mhci.public.1.txt'
# Similar peptides removed
bd09_cv_sr_file = 'bdata.2009.mhci.public.1.cv_sr.txt'
# Random partioning
bd09_cv_rnd_file = 'bdata.2009.mhci.public.1.cv_rnd.txt'
# Similar peptides grouped
bd09_cv_gs_file = 'bdata.2009.mhci.public.1.cv_gs.txt'
# 'blind' used by Kim et al to estimate true predicitve accuracy. Used by MHCnugget for testing.
# Generated by subtracting BD2009 from BD 2013 and removing similar peptides with respect to BD2009
# (similar = at least 80% similarity and same length)
bdblind_file = 'bdata.2013.mhci.public.blind.1.txt'
data_dir = Path(data_dir)/"benchmark_mhci_reliability/binding"
# Read in data with specified cv type
if cv_type=='sr':
bd09 = pd.read_csv(data_dir/'bd2009.1'/bd09_cv_sr_file, sep='\t')
elif cv_type=='gs':
bd09 = pd.read_csv(data_dir/'bd2009.1'/bd09_cv_gs_file, sep='\t')
elif cv_type=='rnd':
bd09 = pd.read_csv(data_dir/'bd2009.1'/bd09_cv_rnd_file, sep='\t')
else:
bd09 = pd.read_csv(data_dir/'bd2009.1'/bd09_file, sep='\t')
# Read in blind data
bdblind = pd.read_csv(data_dir/'blind.1'/bdblind_file, sep='\t')
# alleles are spelled differently in bdblind and bd2009, change spelling in bdblind
bdblind['mhc'] = bdblind['mhc'].apply(_format_alleles)
# Confirm there is no overlap
print('{} entries from the blind data set are in the 2009 data set'.format(bdblind[['sequence', 'mhc']].isin(bd09[['sequence', 'mhc']]).all(axis=1).sum()))
if regression:
# For now: use only quantitative measurements, later tuple (label, inequality as int)
#print('Using quantitative {} % percent of the data.'.format((bd09['inequality']=='=').mean()))
#bd09 = bd09[bd09['inequality']=='=']
#bd09.rename(columns={'meas':'label'}, inplace=True)
#bdblind = bdblind[bdblind['inequality']=='=']
#bdblind.rename(columns={'meas':'label'}, inplace=True)
# Convert ic50 measurements to range [0,1]
if transform_ic50 is not None:
bd09['label'] = _transform_ic50(bd09, how=transform_ic50)
bdblind['label'] = _transform_ic50(bdblind, how=transform_ic50)
else:
# Labeling for binder/NonBinder
bd09 = _label_binder(bd09)[['mhc', 'sequence', 'label', 'cv']]
#bdblind = _label_binder(bdblind)[['mhc', 'sequence', 'label', 'cv']]
bdblind = bdblind.rename(columns={"meas":"label"})
if not keep_all_alleles:
# in bd09 (train set) keep only entries with mhc also occuring in bdblind (test set)
bd09 = bd09[bd09['mhc'].isin(bdblind['mhc'])]
# Combine
bdblind['cv'] = 'blind'
bd = pd.concat([bd09, bdblind], ignore_index=True)
if not(regression):
# Test if there is at least one binder in bd09 AND bdblind
min_one_binder = pd.concat([(bd09.groupby('mhc')['label'].sum() > 0), (bdblind.groupby('mhc')['label'].sum() > 0)], axis=1).all(axis=1)
print('For {} alleles there is not at least one binder in bd 2009 AND bd blind. These will be dismissed.'.format((~min_one_binder).sum()))
alleles = bd['mhc'].unique()
allesles_to_keep = alleles[min_one_binder]
# Dismiss alleles without at least one binder
bd = bd[bd['mhc'].isin(allesles_to_keep)]
# Make allele ranking based on binding data 2009
mhc_rank = bd[bd['cv']!='blind'].groupby('mhc').size().sort_values(ascending=False).reset_index()['mhc']
# Select allele
if mhc_select is not None:
print('Selecting allele {}'.format(mhc_rank.loc[mhc_select]))
bd = bd[bd['mhc']==mhc_rank.loc[mhc_select]][['sequence', 'label', 'cv']]
# Turn indices into strings
bd = _string_index(bd)
if to_csv and filename is not None:
bd.to_csv(filename)
return bd
def generate_mhc_flurry(ms='noMS', mhc_select=0, regression=False, transform_ic50=None, binder_threshold=500, filter_length=True, label_binary=False, random_seed=42,data_dir='.'):
'''
Load the MHC I data curated and uploaded to https://data.mendeley.com/datasets/8pz43nvvxh/1 by MHCFlurry
Used by them for training and model selection
ms: string, specifies if mass spectroscopy data should be included, options:
- noMS: MHCFlurry no MS dataset
- withMS: MHCFlurry with MS dataset
mhc_select: int between 0 and 150 (noMS)/ 188 (withMS), choose allele by frequency rank
filter_length: boolean, MHCFlurry selected peptides of length 8-15 (their model only deals with these lengths)
'''
data_path = Path(data_dir)
curated_noMS_path = data_path/'data_curated.20180219'/'curated_training_data.no_mass_spec.csv'
curated_withMS_path = data_path/'data_curated.20180219'/'curated_training_data.with_mass_spec.csv'
if ms=='noMS':
df = pd.read_csv(curated_noMS_path)
elif ms=='withMS':
df = pd.read_csv(curated_withMS_path)
if filter_length:
lens = df['peptide'].apply(len)
df = df[(lens>7) & (lens<16)]
# Keep only alleles with min 25 peptides
peptides_per_allele = df.groupby('allele').size()
alleles_select = peptides_per_allele[peptides_per_allele>24].index
df = df[df['allele'].isin(alleles_select)]
df.rename(columns={'measurement_value':'meas', 'measurement_inequality':'inequality', 'peptide':'sequence'}, inplace=True)
# label binder/non binder
if label_binary:
df = _label_binder(df, threshold=binder_threshold, measurement_column='label')
if regression:
df["label"] = _transform_ic50(df, how=transform_ic50)
if mhc_select is not None:
if type(mhc_select)==int:
mhc_rank = df.groupby('allele').size().sort_values(ascending=False).reset_index()['allele']
print('Selecting allele {}'.format(mhc_rank.loc[mhc_select]))
df = df[df['allele']==mhc_rank.loc[mhc_select]]
else:
print('Selecting allele {}'.format(mhc_select))
df = df[df['allele']==mhc_select]
# Mark 10% of the data as validation set
np.random.seed(seed=random_seed)
val_ind = np.random.randint(0,high=df.shape[0],size=int(df.shape[0]/10))
df['cluster_ID'] = (df.reset_index().index.isin(val_ind))*1
df["ID"]=df.sequence.apply(lambda x: hashlib.md5(x.encode('utf-8')).hexdigest())
#df = _string_index(df)
return df
def generate_abelin(mhc_select=0, data_dir='.'):
'''
mhc_select: int in [1, 2, 4, 6, 8, 10, 13, 14, 15, 16, 17, 21, 22, 36, 50, 63]
'''
data_path = Path(data_dir)
abelin = pd.read_csv(data_path/"abelin_peptides.all_predictions.csv")[['hit', 'allele', 'peptide']]
abelin.rename(columns={'peptide':'sequence'}, inplace=True)
# Remove entries present in training set (training data here: noMS as only MHCFlurry noMS is benchmarked with Abelin data)
train = generate_mhc_flurry(ms='noMS',mhc_select=None, data_dir=data_dir)[['allele', 'sequence']]
overlap_ind = abelin[['allele', 'sequence']].merge(train.drop_duplicates(['allele','sequence']).assign(vec=True),how='left', on=['allele', 'sequence']).fillna(False)['vec']
#print(abelin.shape[0], overlap_ind.shape, overlap_ind.sum() )
abelin = abelin[~overlap_ind.values]
# Select allele specific data
if type(mhc_select)==int:
allele_ranking = _get_allele_ranking(data_dir=data_dir)
mhc_select = allele_ranking.iloc[mhc_select]
abelin = abelin[abelin['allele']==mhc_select]
abelin.rename(columns={'hit':'label'}, inplace=True)
abelin['cluster_ID'] = 2
return abelin
def prepare_hpv(mhc_select, data_dir='.'):
'''
To run, download Table S2 from Supplementary Material of [<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, et al. Performance evaluation of MHC class-I binding prediction tools based on an experimentally validated MHC–peptide binding data set. Cancer Immunol Res 2019;7:719–36.] and save as HPV_data.csv in ./data
mhc_select: string from ['HLAA1', 'HLAA2', 'HLAA3', 'HLAA11', 'HLAA24', 'HLAB7', 'HLAB15']
'''
data_path = Path(data_dir)
df = pd.read_csv(data_path/"HPV_data.csv")
df["label"] = df["Experimental binding capacity"].mask(df["Experimental binding capacity"]=="nb")
return df[df["allele"]==mhc_select][["sequence","label"]]
def prepare_sars_cov(mhc_select, mhc_class="I", with_MHC_seq=False, data_dir='.'):
'''
To run, download https://www.immunitrack.com/wp/wp-content/uploads/Covid19-Intavis-Immunitrack-datasetV2.xlsx from
[<NAME>., <NAME>., <NAME>.B. et al.
Identification and validation of 174 COVID-19 vaccine candidate epitopes reveals
low performance of common epitope prediction tools. Sci Rep 10, 20465 (2020).
https://doi.org/10.1038/s41598-020-77466-4]
and save in datadir
mhc_select: string from ["1 A0101",
"2 A0201",
"3 A0301",
"4 A1101",
"5 A2402",
"6 B4001",
"7 C0401",
"8 C0701",
"9 C0702",
"10 C0102",
"11 DRB10401"]
'''
allele_sheets = ["1 A0101",
"2 A0201",
"3 A0301",
"4 A1101",
"5 A2402",
"6 B4001",
"7 C0401",
"8 C0701",
"9 C0702",
"10 C0102",
"11 DRB10401"]
data_path = Path(data_dir)
df = pd.read_excel(data_path/"Covid19-Intavis-Immunitrack-datasetV2.xlsx", sheet_name=allele_sheets)
df = pd.concat(df, sort=True)[["Sequence","Stab %"]]
df.rename(columns={"Sequence":"sequence","Stab %":"label"}, inplace=True)
if mhc_select is not None:
df["allele"] = mhc_select
df = df.loc[mhc_select]
if with_MHC_seq:
df = df.reset_index(level=0).rename(columns={"level_0":"allele"})
if mhc_class=="I":
covid_allele_map = {"1 A0101": 'HLA-A01:01',
"2 A0201": 'HLA-A02:01',
"3 A0301": 'HLA-A03:01',
"4 A1101": 'HLA-A11:01',
"5 A2402": 'HLA-A24:02',
"6 B4001": 'HLA-B40:01',
"7 C0401": 'HLA-C04:01',
"8 C0701": 'HLA-C07:01',
"9 C0702": 'HLA-C07:02'
}
elif mhc_class=="II":
covid_allele_map = {"11 DRB10401": "DRB1_0401"
}
df.allele = df.allele.map(covid_allele_map)
# filter out nan alleles (eg. class i (ii) alleles for class ii (i))
df = df[~df.allele.isnull()]
allele_df = prepare_pseudo_mhc_sequences(mhc_class, data_dir)
df = df.merge(allele_df, on="allele", how="left")
return df
def prepare_mhci_netmhcpan_4(mhc_select, MS=False, with_MHC_seq=False, data_dir="./", netmhc_data_version="4.0"):
"""
Prepare training data of NetMHCpan4.0/NetMHCpan4.1 with test data from
<NAME>., <NAME>., <NAME>. et al.
Identification and validation of 174 COVID-19 vaccine candidate epitopes reveals low performance
of common epitope prediction tools.
Sci Rep 10, 20465 (2020).
https://doi.org/10.1038/s41598-020-77466-4
Download
- Train/Val Data Affinity measurements
- either NetMHCpan4.1 data:
http://www.cbs.dtu.dk/suppl/immunology/NAR_NetMHCpan_NetMHCIIpan/NetMHCpan_train.tar.gz
unpack and rename as NetMHCpan_4_1_train
- or NetMhCpan4.0 data:
from http://www.cbs.dtu.dk/suppl/immunology/NetMHCpan-4.0/ download 0th CV split files f000_ba (train) and c000_ba (val, 20%)
store in data_dir/NetMHCpan_4_0_train
save everything in data_dir
"""
datatype = "ba" if not MS else "el"
data = []
if netmhc_data_version=="4.0":
for file in glob.glob(str(data_dir/"NetMHCpan_4_0_train/*000_{}".format(datatype))):
df = pd.read_csv(file, header=None, delimiter=" " if not MS else "\t", names=("sequence","label","allele","ic50"))
df["cluster_ID"] = 0 if file.split("/")[-1][0]=="f" else 1
data.append(df)
elif netmhc_data_version=="4.1":
for file in glob.glob(str(data_dir/"NetMHCpan_4_1_train/*_ba")):
df = pd.read_csv(file, header=None, delimiter=" ", names=("sequence","label","allele"))
# use one partition as validation set
df["cluster_ID"] = 1 if file.split("/")[-1]=="c004_ba" else 0
data.append(df)
data = pd.concat(data, ignore_index=True, sort=True)
if mhc_select is not None:
data = data[data["allele"]==mhc_select]
if with_MHC_seq:
if netmhc_data_version=="4.0":
# some BoLA and Mamu alleles in NetMhCpan4.0 have names that can't be mapped to ipd alleles names with simple rules
# map these to the convention of NetMhCpan4.1 first (these names can be mapped to ipd allele names)
data.allele = data.allele.apply(netmhpan_4_0_special_allele_map)
allele_df = prepare_pseudo_mhc_sequences("I", data_dir)
data = data.merge(allele_df, on="allele", how="left")
return data
def prepare_mhcii_netmhcpan(mhc_select, MS=False, with_MHC_seq=False, data_dir="./", netmhc_data_version="3.2"):
"""
Prepare training data of NetMHCIIpan3.2/NetMHCIIpan4.0 with test data from
<NAME>., <NAME>., <NAME>. et al.
Identification and validation of 174 COVID-19 vaccine candidate epitopes reveals low performance
of common epitope prediction tools.
Sci Rep 10, 20465 (2020).
https://doi.org/10.1038/s41598-020-77466-4
Download
- Train/Val Data Affinity measurements
- either NetMHCpan4.1 data:
http://www.cbs.dtu.dk/suppl/immunology/NAR_NetMHCpan_NetMHCIIpan/NetMHCpan_train.tar.gz
unpack and rename as NetMHCpan_4_1_train
- or NetMhCpan4.0 data:
from http://www.cbs.dtu.dk/suppl/immunology/NetMHCpan-4.0/ download 0th CV split files f000_ba (train) and c000_ba (val, 20%)
store in data_dir/NetMHCpan_4_0_train
- MS measurement data:
Download http://www.cbs.dtu.dk/suppl/immunology/NAR_NetMHCpan_NetMHCIIpan/NetMHCIIpan_train.tar.gz, unpack and rename as NetMHCIIpan_train
save everything in data_dir
"""
data = []
if MS:
for file in glob.glob(str(data_dir/"NetMHCIIpan_train/*_EL1.txt")):
df = pd.read_csv(file, header=None, delimiter="\t", names=("sequence","label","allele","context"))
# use one partition as validation set
df["cluster_ID"] = 1 if file.split("/")[-1]=="test_EL1.txt" else 0
data.append(df)
else:
if netmhc_data_version=="3.2":
for file in glob.glob(str(data_dir/"NetMHCIIpan_3_2_train/*1.txt")):
#print(file)
df = pd.read_csv(file, header=None, delimiter="\t", names=("sequence","label","allele"))
# use one partition as validation set
df["cluster_ID"] = 1 if file.split("/")[-1]=="test1.txt" else 0
data.append(df)
elif netmhc_data_version=="4.0":
for file in glob.glob(str(data_dir/"NetMHCIIpan_train/*_BA1.txt")):
df = pd.read_csv(file, header=None, delimiter="\t", names=("sequence","label","allele","context"))
# use one partition as validation set
df["cluster_ID"] = 1 if file.split("/")[-1]=="test_BA1.txt" else 0
data.append(df)
data = pd.concat(data, ignore_index=True, sort=True)
if mhc_select is not None:
if MS:
data = data[data["allele"].apply(lambda x: mhc_select in x)]
else:
data = data[data["allele"]==mhc_select]
if with_MHC_seq:
allele_df = prepare_pseudo_mhc_sequences("II", data_dir)
data = data.merge(allele_df, on="allele", how="left")
return data
def prepare_mhcii_iedb2016(mhc_select, cv_fold, path_iedb="../data/iedb2016", path_jensen_csv="../data/jensen_et_al_2018_immunology_supplTabl3.csv"):
'''prepares mhcii iedb 2016 dataset using train1 ... test5 from http://www.cbs.dtu.dk/suppl/immunology/NetMHCIIpan-3.2/'''
def prepare_df(filename):
df =
|
pd.read_csv(filename,header=None,sep="\t")
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import os
import math
import random
import pickle
import time
import feather
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import prismx as px
from prismx.utils import read_gmt, load_correlation, loadPrediction
from prismx.prediction import correlation_scores, loadPredictionsRange
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.preprocessing import MinMaxScaler
from sklearn import mixture
from sklearn.metrics.cluster import homogeneity_score
from scipy import stats
gene_auc = pd.read_csv("test_data/gene_auc.tsv", sep="\t", index_col=0)
set_auc = pd.read_csv("test_data/set_auc.tsv", sep="\t", index_col=0)
diff = gene_auc.iloc[:,5] - gene_auc.iloc[:,0]
diff.sort_values(0,ascending=False).iloc[0:20]
diff = set_auc.iloc[:,5] - set_auc.iloc[:,0]
diff.sort_values(0,ascending=False).iloc[0:20]
nx = "GO_Biological_Process_2018"
set_auc.loc[nx,:]
gene_auc.loc[nx,:]
p1 = pd.read_feather("prediction_folder_300_umap/prediction_0.f").set_index("index")
correlationFolder = "correlation_folder_300"
predictionFolder = "prediction_folder_300_umap"
outfolder = "prismxresult"
clustn = 300
libs = px.list_libraries()
gmt_file = px.load_library(libs[111], overwrite=True)
outname = libs[111]
#px.predict_gmt("gobp_model_"+str(clustn)+".pkl", gmt_file, correlationFolder, predictionFolder, outfolder, libs[111], step_size=200, intersect=True, verbose=True)
gop =
|
pd.read_feather("prismxresult/GO_Biological_Process_2018.f")
|
pandas.read_feather
|
#!/usr/bin/env python3
import argparse
import glob
import numpy as np
import os
import pandas as pd
import quaternion
import sys
import trimesh
import json
from tqdm import tqdm
from scipy.spatial.distance import cdist
import warnings
warnings.filterwarnings("ignore")
__dir__ = os.path.normpath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), '..')
)
sys.path[1:1] = [__dir__]
top_classes = {
"03211117": "display", "04379243": "table",
"02747177": "trashbin", "03001627": "chair",
# "04256520": "sofa", "02808440": "bathtub",
"02933112": "cabinet", "02871439": "bookshelf"
}
from shapefit.utils.utils import get_validation_appearance, get_symmetries, get_gt_dir, \
get_scannet, get_shapenet, make_M_from_tqs, make_tqs_from_M
# helper function to calculate difference between two quaternions
def calc_rotation_diff(q, q00):
rotation_dot = np.dot(quaternion.as_float_array(q00), quaternion.as_float_array(q))
rotation_dot_abs = np.abs(rotation_dot)
try:
error_rotation_rad = 2 * np.arccos(rotation_dot_abs)
except:
return 0.0
error_rotation = np.rad2deg(error_rotation_rad)
return error_rotation
def rotation_error(row):
q = quaternion.quaternion(*row[:4])
q_gt = quaternion.quaternion(*row[4:8])
sym = row[-1]
if sym == "__SYM_ROTATE_UP_2":
m = 2
tmp = [
calc_rotation_diff(q, q_gt * quaternion.from_rotation_vector([0, (i * 2.0 / m) * np.pi, 0]))
for i in range(m)]
return np.min(tmp)
elif sym == "__SYM_ROTATE_UP_4":
m = 4
tmp = [
calc_rotation_diff(q, q_gt * quaternion.from_rotation_vector([0, (i * 2.0 / m) * np.pi, 0]))
for i in range(m)]
return np.min(tmp)
elif sym == "__SYM_ROTATE_UP_INF":
m = 36
tmp = [
calc_rotation_diff(q, q_gt * quaternion.from_rotation_vector([0, (i * 2.0 / m) * np.pi, 0]))
for i in range(m)]
return np.min(tmp)
else:
return calc_rotation_diff(q, q_gt)
def print_to_(verbose, log_file, string):
if verbose:
print(string)
sys.stdout.flush()
with open(log_file, 'a+') as f:
f.write(string + '\n')
def get_init_mesh(scan_id, key):
path = glob.glob(os.path.join(
'/home/ishvlad/workspace/Scan2CAD/MeshDeformation/ARAP/',
'arap_output_GT', scan_id, key + '*', 'init.obj'
))
if len(path) == 0:
return None
return trimesh.load_mesh(path[0], process=False)
def DAME(mesh_1, mesh_2, k=0.59213):
def dihedral(mesh):
unique_faces, _ = np.unique(np.sort(mesh.faces, axis=1), axis=0, return_index=True)
parts_bitriangles_map = []
bitriangles = {}
for face in unique_faces:
edge_1 = tuple(sorted([face[0], face[1]]))
if edge_1 not in bitriangles:
bitriangles[edge_1] = set([face[0], face[1], face[2]])
else:
bitriangles[edge_1].add(face[2])
edge_2 = tuple(sorted([face[1], face[2]]))
if edge_2 not in bitriangles:
bitriangles[edge_2] = set([face[0], face[1], face[2]])
else:
bitriangles[edge_2].add(face[0])
edge_3 = tuple(sorted([face[0], face[2]]))
if edge_3 not in bitriangles:
bitriangles[edge_3] = set([face[0], face[1], face[2]])
else:
bitriangles[edge_3].add(face[1])
bitriangles_aligned = np.empty((len(mesh.edges_unique), 4), dtype=int)
for j, edge in enumerate(mesh.edges_unique):
bitriangle = [*sorted(edge)]
bitriangle += [x for x in list(bitriangles[tuple(sorted(edge))]) if x not in bitriangle]
bitriangles_aligned[j] = bitriangle
vertices_bitriangles_aligned = mesh.vertices[bitriangles_aligned]
normals_1 = np.cross((vertices_bitriangles_aligned[:, 2] - vertices_bitriangles_aligned[:, 0]),
(vertices_bitriangles_aligned[:, 2] - vertices_bitriangles_aligned[:, 1]))
normals_1 = normals_1 / np.sqrt(np.sum(normals_1 ** 2, axis=1)[:, None])
normals_2 = np.cross((vertices_bitriangles_aligned[:, 3] - vertices_bitriangles_aligned[:, 0]),
(vertices_bitriangles_aligned[:, 3] - vertices_bitriangles_aligned[:, 1]))
normals_2 = normals_2 / np.sqrt(np.sum(normals_2 ** 2, axis=1)[:, None])
n1_n2_arccos = np.arccos(np.sum(normals_1 * normals_2, axis=1).clip(-1, 1))
n1_n2_signs = np.sign(
np.sum(normals_1 * (vertices_bitriangles_aligned[:, 3] - vertices_bitriangles_aligned[:, 1]), axis=1))
D_n1_n2 = n1_n2_arccos * n1_n2_signs
return D_n1_n2
D_mesh_1 = dihedral(mesh_1)
D_mesh_2 = dihedral(mesh_2)
mask_1 = np.exp((k * D_mesh_1) ** 2)
per_edge = np.abs(D_mesh_1 - D_mesh_2) * mask_1
result = np.sum(per_edge) / len(mesh_1.edges_unique)
return result, per_edge
def calc_ATSD(dists, border):
return np.minimum(dists.min(1), border).mean()
def calc_F1(dists, border):
return np.sum(dists.min(1) < border) / len(dists)
def calc_CD(dists, border):
return max(np.minimum(dists.min(1), border).mean(), np.minimum(dists.min(0), border).mean())
def calc_metric(scan_mesh, shape_mesh, method='all', border=0.1):
area = border * 2
# get scan bbox
bbox = np.array([shape_mesh.vertices.min(0), shape_mesh.vertices.max(0)])
bbox += [[-area], [area]]
batch = np.array([np.diag(bbox[0]), np.diag(bbox[1]), np.eye(3), -np.eye(3)])
slice_mesh = scan_mesh.copy()
# xyz
for i in range(3):
slice_mesh = slice_mesh.slice_plane(batch[0, i], batch[2, i])
slice_mesh = slice_mesh.slice_plane(batch[1, i], batch[3, i])
if len(slice_mesh.vertices) == 0:
if method == 'all':
return {'ATSD': border, 'CD': border, 'F1': 0.0}
else:
return border
scan_vertices = np.array(slice_mesh.vertices)
if len(scan_vertices) > 20000:
scan_vertices = scan_vertices[::len(scan_vertices) // 20000]
dists = cdist(np.array(shape_mesh.vertices), scan_vertices, metric='minkowski', p=1)
if method == 'ATSD':
return calc_ATSD(dists, border)
elif method == 'CD':
return calc_CD(dists, border)
elif method == 'F1':
return calc_F1(dists, border)
else:
return {
'ATSD': calc_ATSD(dists, border),
'CD': calc_CD(dists, border),
'F1': calc_F1(dists, border),
}
def metric_on_deformation(options):
output_name = options.output_name + '_' + str(options.border) + \
'_' + str(options.val_set) + '_' + str(options.metric_type)
if options.output_type == 'align':
# load needed models
appearance = get_validation_appearance(options.val_set)
# LOAD list of all aligned scenes
csv_files = glob.glob(os.path.join(options.input_dir, '*.csv'))
scenes = [x.split('/')[-1][:-4] for x in csv_files]
# Which scenes do we want to calculate?
scenes = np.intersect1d(scenes, list(appearance.keys()))
batch = []
for s in scenes:
df_scan = pd.read_csv(
os.path.join(options.input_dir, s + '.csv'),
index_col=0, dtype={'objectCategory': str}
)
# Filter: take only objects from appearance
df_scan['key'] = df_scan.objectCategory + '_' + df_scan.alignedModelId
df_scan = df_scan[np.in1d(df_scan['key'].values, list(appearance[s].keys()))]
batch.extend([{
'scan_id': s,
'key': row['key'],
'objectCategory': row['objectCategory'],
'alignedModelId': row['alignedModelId'],
'path': 'path to origin ShapeNet mesh',
'object_num': i,
'T': [row['tx'], row['ty'], row['tz']],
'Q': [row['qw'], row['qx'], row['qy'], row['qz']],
'S': [row['sx'], row['sy'], row['sz']]
} for i, row in df_scan.iterrows()])
df = pd.DataFrame(batch)
else:
# LOAD list of all aligned scenes
in_files = glob.glob(os.path.join(options.input_dir, 'scene*/*/approx.obj'))
if len(in_files) == 0:
in_files = glob.glob(os.path.join(options.input_dir, '*/scene*/*/approx.obj'))
info = []
for x in in_files:
parts = x.split('/')[-3:-1]
if len(parts[1].split('_')) == 3:
category_id, shape_id, object_num = parts[1].split('_')
else:
category_id, shape_id = parts[1].split('_')
object_num = -1
row = [
parts[0], # scan_id
category_id + '_' + shape_id, # key
category_id,
shape_id,
object_num,
x, # path
]
info.append(row)
df = pd.DataFrame(info, columns=['scan_id', 'key', 'objectCategory', 'alignedModelId', 'object_num', 'path'])
transform_files = ['/'.join(x.split('/')[:-1]) + '/transform.json' for x in in_files]
Ts, Qs, Ss = [], [], []
for f in transform_files:
if os.path.exists(f):
matrix = np.array(json.load(open(f, 'rb'))['transform'])
else:
Ts.append(None)
Qs.append(None)
Ss.append(None)
continue
t, q, s = make_tqs_from_M(matrix)
q = quaternion.as_float_array(q)
Ts.append(t)
Qs.append(q)
Ss.append(s)
df['T'] = Ts
df['Q'] = Qs
df['S'] = Ss
metrics = {}
batch = df.groupby('scan_id')
if options.verbose:
batch = tqdm(batch, desc='Scenes')
# CALCULATE METRICS
for scan_id, df_scan in batch:
scan_mesh = get_scannet(scan_id, 'mesh')
scan_batch = df_scan.iterrows()
if options.verbose:
scan_batch = tqdm(scan_batch, total=len(df_scan), desc='Shapes', leave=False)
for i, row in scan_batch:
if options.output_type == 'align':
shape_mesh = get_shapenet(row['objectCategory'], row['alignedModelId'], 'mesh')
else:
try:
shape_mesh = trimesh.load_mesh(row['path'])
except Exception:
metrics[i] = {'ATSD': np.nan, 'CD': np.nan, 'F1': np.nan}
continue
if row['T'] is None:
metrics[i] = {'ATSD': np.nan, 'CD': np.nan, 'F1': np.nan}
continue
T = make_M_from_tqs(row['T'], row['Q'], row['S'])
shape_mesh.apply_transform(T)
metrics[i] = calc_metric(scan_mesh, shape_mesh, border=options.border)
df_final = df.merge(pd.DataFrame(metrics).T, left_index=True, right_index=True)
df_final.to_csv(output_name + '.csv')
if len(df_final) == 0:
print_to_(options.verbose, output_name + '.log', 'No aligned shapes')
return
df_final = df_final[~pd.isna(df_final['ATSD'])]
# Calculate INSTANCE accuracy
acc = df_final[['ATSD', 'CD', 'F1']].mean().values
acc[-1] *= 100
print_string = '#' * 57 + '\nINSTANCE MEAN. ATSD: {:>4.2f}, CD: {:>4.2f}, F1: {:6.2f}\n'.format(
*acc) + '#' * 57
print_to_(options.verbose, output_name + '.log', print_string)
df_final['name'] = [top_classes.get(x, 'zother') for x in df_final.objectCategory]
df_class = df_final.groupby('name').mean()[['ATSD', 'CD', 'F1']]
print_string = '###' + ' ' * 7 + 'CLASS' + ' ' * 4 + '# ATSD # CD # F1 ###'
print_to_(options.verbose, output_name + '.log', print_string)
for name, row in df_class.iterrows():
print_string = '###\t{:10} # {:>4.2f} # {:>4.2f} # {:6.2f} ###'.format(
name, row['ATSD'], row['CD'], row['F1']*100
)
print_to_(options.verbose, output_name + '.log', print_string)
acc = df_class.mean().values
acc[-1] *= 100
print_string = '#' * 57 + '\n CLASS MEAN. ATSD: {:>4.2f}, CD: {:>4.2f}, F1: {:6.2f}\n'.format(
*acc) + '#' * 57
print_to_(options.verbose, output_name + '.log', print_string)
def metric_on_alignment(options):
output_name = options.output_name + '_' + str(options.border) + \
'_' + str(options.val_set) + '_' + str(options.metric_type)
if options.output_type == 'deform':
raise Exception
# LOAD list of all aligned scenes
csv_files = glob.glob(os.path.join(options.input_dir, '*.csv'))
scenes = [x.split('/')[-1][:-4] for x in csv_files]
# Which scenes do we want to calculate?
appearances_cad = get_validation_appearance(options.val_set)
df_appearance = pd.DataFrame(np.concatenate([
[(k, kk, appearances_cad[k][kk]) for kk in appearances_cad[k]] for k in appearances_cad
]), columns=['scan_id', 'key', 'count'])
scenes = np.intersect1d(scenes, list(set(df_appearance.scan_id)))
# LOAD GT and target alignments
gt_dir = get_gt_dir('align')
batch = []
batch_gt = []
for s in scenes:
df = pd.read_csv(os.path.join(gt_dir, s + '.csv'), index_col=0, dtype={'objectCategory': str})
df['scan_id'] = s
# Filter: take only objects from appearance
df['key'] = df.objectCategory + '_' + df.alignedModelId
df = df[np.in1d(df['key'].values, list(appearances_cad[s].keys()))]
batch_gt.append(df)
df = pd.read_csv(os.path.join(options.input_dir, s + '.csv'), index_col=0, dtype={'objectCategory': str})
df['scan_id'] = s
# Filter: take only objects from appearance
df['key'] = df.objectCategory + '_' + df.alignedModelId
df = df[np.in1d(df['key'].values, list(appearances_cad[s].keys()))]
batch.append(df)
df_alignment = pd.concat(batch)
df_alignment.reset_index(drop=True, inplace=True)
df_alignment_gt = pd.concat(batch_gt)
df_alignment_gt.reset_index(drop=True, inplace=True)
# Create index for each GT object
df_alignment_gt.reset_index(inplace=True)
# LOAD Symmetry info
df_symmetry = get_symmetries()
df_alignment = df_alignment.merge(df_symmetry, how='left', on='key')
df_alignment_gt = df_alignment_gt.merge(df_symmetry, how='left', on='key')
# Make pairs for difference calculation
df_mutual = df_alignment_gt.merge(df_alignment, how='left', on=['scan_id', 'objectCategory'], suffixes=('_gt', ''))
# Calculate the difference
t_dist = df_mutual[['tx_gt', 'ty_gt', 'tz_gt']].values - df_mutual[['tx', 'ty', 'tz']].values
df_mutual['t_dist'] = np.linalg.norm(t_dist, ord=2, axis=-1)
s_diff = df_mutual[['sx', 'sy', 'sz']].values / df_mutual[['sx_gt', 'sy_gt', 'sz_gt']].values
df_mutual['s_dist'] = 100 * np.abs(s_diff.mean(-1) - 1)
cols = ['qw', 'qx', 'qy', 'qz', 'qw_gt', 'qx_gt', 'qy_gt', 'qz_gt', 'symmetry']
df_mutual['q_dist'] = [rotation_error(row) for row in df_mutual[cols].values]
df_mutual.q_dist.fillna(0, inplace=True)
# does the aligned shape is near to GT shape
df_mutual['is_fitted'] = (df_mutual.t_dist <= 0.2) & (df_mutual.q_dist <= 20) & (df_mutual.s_dist <= 20)
# GET and SAVE the result
df_fit = df_mutual[['index', 'is_fitted']].groupby('index').max().reset_index()
df_fit = df_alignment_gt.merge(df_fit, on='index')
df_final = df_fit[['scan_id', 'objectCategory', 'alignedModelId', 'is_fitted']]
df_final.to_csv(output_name + '.csv')
# Calculate INSTANCE accuracy
df_scan = df_final.groupby('scan_id').agg({'is_fitted': ('sum', 'count')})
df_scan.columns = ['sum', 'count']
total = df_scan.sum()
acc = total['sum'] / total['count'] * 100
print_string = '#' * 80 + '\n# scenes: {}, # fitted: {}, # total: {}, INSTANCE ACCURACY: {:>4.2f}\n'.format(
len(df_scan), int(total['sum']), int(total['count']), acc
) + '#' * 80
print_to_(options.verbose, output_name + '.log', print_string)
# Calculate CLASS accuracy
df_final['name'] = [top_classes.get(x, 'zother') for x in df_final.objectCategory]
df_class = df_final.groupby('name').agg({'is_fitted': ('sum', 'count')})
df_class.columns = ['sum', 'count']
df_class.sort_index()
for name, row in df_class.iterrows():
print_string = '\t{:10} # fitted: {:4}, # total: {:4}, CLASS ACCURACY: {:6.2f}'.format(
name, int(row['sum']), int(row['count']), row['sum'] / row['count'] * 100
)
print_to_(options.verbose, output_name + '.log', print_string)
print_string = '#' * 80 + '\n CLASS MEAN ACCURACY: {:>4.2f}'.format(
(df_class['sum'] / df_class['count']).mean() * 100
)
print_to_(options.verbose, output_name + '.log', print_string)
def metric_on_perceptual(options):
output_name = options.output_name + '_' + str(options.border) + \
'_' + str(options.val_set) + '_' + str(options.metric_type)
if options.output_type == 'align':
raise Exception('Perceptual is zero!')
# LOAD list of all aligned scenes
is_zhores = False
paths = glob.glob(os.path.join(options.input_dir, 'scene*/*'))
if len(paths) == 0:
paths = glob.glob(os.path.join(options.input_dir, '*/scene*/*'))
is_zhores = True
if options.verbose:
paths = tqdm(paths)
rows = []
for p in paths:
scan_id = p.split('/')[-2]
if is_zhores:
align, method = p.split('_output_')[-1].split('/')[0].split('_50_')
else:
method, align = p.split('/')[-3].split('_output_')
if len(p.split('/')[-1].split('_')) == 3:
category_id, shape_id, object_num = p.split('/')[-1].split('_')
else:
category_id, shape_id = p.split('/')[-1].split('_')
object_num = -1
init = os.path.join(p, 'init.obj')
if not os.path.exists(init):
continue
init_mesh = get_init_mesh(scan_id, category_id + '_' + shape_id)
if init_mesh is None:
continue
approx_mesh = trimesh.load_mesh(os.path.join(p, 'approx.obj'), process=False)
dame = np.array(DAME(init_mesh, approx_mesh, 0.59213)[1])
dame = dame[~pd.isna(dame)].mean()
rows.append([
scan_id,
category_id + '_' + shape_id, # key
category_id,
shape_id,
object_num,
p, # path
dame
])
cols = ['scan_id', 'key', 'objectCategory', 'alignedModelId', 'object_num', 'path', 'DAME']
df_final =
|
pd.DataFrame(rows, columns=cols)
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.