prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
## Bot for adding Prop65 ID
from wikidataintegrator import wdi_core, wdi_login, wdi_helpers
from wikidataintegrator.ref_handlers import update_retrieved_if_new_multiple_refs
import pandas as pd
from pandas import read_csv
import requests
import time
from datetime import datetime
import copy
## Here are the object QIDs, assuming that a chemical is the subject
object_qid = {'femrep':'Q55427776',
'menrep': 'Q55427774',
'devtox': 'Q72941151',
'cancer': 'Q187661',
'reptox': 'Q55427767'}
list_date = {'femrep':'Female Reproductive Toxicity - Date of Listing',
'menrep':'Male Reproductive Toxicity - Date of Listing',
'devtox':'Male Reproductive Toxicity - Date of Listing',
'cancer': 'None',
'reptox': 'None'}
list_prop = "P31"
def create_reference(prop65_url):
refStatedIn = wdi_core.WDItemID(value="Q28455381", prop_nr="P248", is_reference=True)
timeStringNow = datetime.now().strftime("+%Y-%m-%dT00:00:00Z")
refRetrieved = wdi_core.WDTime(timeStringNow, prop_nr="P813", is_reference=True)
refURL = wdi_core.WDUrl(value=prop65_url, prop_nr="P854", is_reference=True)
return [refStatedIn, refRetrieved, refURL]
def check_statement_status(wd_item,object_qid):
new_statetypes = []
new_dep_types = []
dep_no_change = []
no_change=[]
rank_delist = []
rank_relist = []
for object_type in object_qid.keys():
if eachitem in prop_65_mapped['WDID'].loc[prop_65_mapped[object_type+' delisted']==True].tolist():
if eachitem in deprecated_df['WDID'].loc[deprecated_df['deprecated_type']==object_type+' delisted'].tolist():
dep_no_change.append(object_type)
elif eachitem in current_df['WDID'].loc[current_df['ObjectType']==object_type+' current'].tolist():
rank_delist.append(object_type)
else:
new_dep_types.append(object_type)
if eachitem in prop_65_mapped['WDID'].loc[prop_65_mapped[object_type+' current']==True].tolist():
if eachitem in deprecated_df['WDID'].loc[deprecated_df['deprecated_type']==object_type+' delisted'].tolist():
rank_relist.append(object_type)
elif eachitem in current_df['WDID'].loc[current_df['ObjectType']==object_type+' current'].tolist():
no_change.append(object_type)
else:
new_statetypes.append(object_type)
comparison_dict = {'new_statetypes':new_statetypes,
'new_dep_types':new_dep_types,
'dep_no_change':dep_no_change,
'no_change':no_change,
'rank_delist':rank_delist,
'rank_relist':rank_relist}
return(comparison_dict)
#### This is just some logic for handling reptox vs femrep, menrep and devtox
def rep_redundancy_check (repcheck, comparison_dict):
if (((len(repcheck.intersection(set(comparison_dict['new_statetypes']))) >= 1) or
(len(repcheck.intersection(set(comparison_dict['no_change']))) >= 1)) and
('reptox' in comparison_dict['new_statetypes'])):
comparison_dict['new_statetypes'].remove('reptox')
if (((len(repcheck.intersection(set(comparison_dict['new_dep_types']))) >= 1) or
(len(repcheck.intersection(set(comparison_dict['dep_no_change']))) >= 1)) and
('reptox' in comparison_dict['new_dep_types'])):
comparison_dict['new_dep_types'].remove('reptox')
if len(repcheck.intersection(set(comparison_dict['rank_delist']))) >= 1 and 'reptox' in comparison_dict['rank_delist']:
comparison_dict['rank_delist'].remove('reptox')
if len(repcheck.intersection(set(comparison_dict['rank_relist']))) >= 1 and 'reptox' in comparison_dict['rank_relist']:
comparison_dict['rank_relist'].remove('reptox')
return(comparison_dict)
#### Include statement on why it's deprecated if it's deprecated
def generate_statements(statetype_set,dep_list,eachitem_row):
statements_to_add = []
for j in range(len(statetype_set)):
run_type = statetype_set[j]
run_object_wdid = object_qid[run_type]
date_type = list_date[run_type]
qualifier_list = []
if date_type != 'None':
runlist_date = str(eachitem_row.iloc[0][date_type])
if runlist_date != 'None':
list_qualifier = wdi_core.WDTime(datetime.strptime(runlist_date,'%m/%d/%Y').strftime("+%Y-%m-%dT00:00:00Z"), prop_nr='P580', is_qualifier=True)
qualifier_list.append(list_qualifier)
if run_type in dep_list:
qualifier_list.append(delist_reason)
state_rank = 'deprecated'
else:
state_rank = 'normal'
prop65_statement = wdi_core.WDItemID(value=run_object_wdid, prop_nr=list_prop, rank=state_rank,
qualifiers = qualifier_list, references=[copy.deepcopy(reference)])
statements_to_add.append(prop65_statement)
j=j+1
return(statements_to_add)
## Retrieve previous statements that need to be changed
def retrieve_prev_state_list(subject_qid,dep_object_qid_list):
wd_item = wdi_core.WDItemEngine(wd_item_id=subject_qid)
mass_statement = wd_item.get_wd_json_representation()['claims'][list_prop]
states_to_keep = []
for i in range(len(mass_statement)):
sub_qid = mass_statement[i]['mainsnak']['datavalue']['value']['id']
state_rank = mass_statement[i]['rank']
qualifier_list = mass_statement[i]['qualifiers']
reference = mass_statement[i]['references']
if sub_qid in dep_object_qid_list:
continue
else:
saved_statement = wdi_core.WDItemID(value=sub_qid, prop_nr=list_prop, rank=state_rank,
qualifiers = qualifier_list, references=[copy.deepcopy(reference)])
states_to_keep.append(saved_statement)
return(states_to_keep)
## Login for Scheduled bot
print("Logging in...")
try:
from scheduled_bots.local import WDUSER, WDPASS
except ImportError:
if "WDUSER" in os.environ and "WDPASS" in os.environ:
WDUSER = os.environ['WDUSER']
WDPASS = os.environ['WDPASS']
else:
raise ValueError("WDUSER and WDPASS must be specified in local.py or as environment variables")
## Add Prop 65 CA IDs relations from CA OEHHA chemicals list Wikidata
prop65_chems = | read_csv('data/prop65_chems.tsv',delimiter='\t',header=0, encoding='utf-8', index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
import pandas as pd
import pytest
from feature_engine.encoding import OneHotEncoder
def test_encode_categories_in_k_binary_plus_select_vars_automatically(df_enc_big):
# test case 1: encode all categories into k binary variables, select variables
# automatically
encoder = OneHotEncoder(top_categories=None, variables=None, drop_last=False)
X = encoder.fit_transform(df_enc_big)
# test init params
assert encoder.top_categories is None
assert encoder.variables is None
assert encoder.drop_last is False
# test fit attr
transf = {
"var_A_A": 6,
"var_A_B": 10,
"var_A_C": 4,
"var_A_D": 10,
"var_A_E": 2,
"var_A_F": 2,
"var_A_G": 6,
"var_B_A": 10,
"var_B_B": 6,
"var_B_C": 4,
"var_B_D": 10,
"var_B_E": 2,
"var_B_F": 2,
"var_B_G": 6,
"var_C_A": 4,
"var_C_B": 6,
"var_C_C": 10,
"var_C_D": 10,
"var_C_E": 2,
"var_C_F": 2,
"var_C_G": 6,
}
assert encoder.variables_ == ["var_A", "var_B", "var_C"]
assert encoder.variables_binary_ == []
assert encoder.n_features_in_ == 3
assert encoder.encoder_dict_ == {
"var_A": ["A", "B", "C", "D", "E", "F", "G"],
"var_B": ["A", "B", "C", "D", "E", "F", "G"],
"var_C": ["A", "B", "C", "D", "E", "F", "G"],
}
# test transform output
assert X.sum().to_dict() == transf
assert "var_A" not in X.columns
def test_encode_categories_in_k_minus_1_binary_plus_list_of_variables(df_enc_big):
# test case 2: encode all categories into k-1 binary variables,
# pass list of variables
encoder = OneHotEncoder(
top_categories=None, variables=["var_A", "var_B"], drop_last=True
)
X = encoder.fit_transform(df_enc_big)
# test init params
assert encoder.top_categories is None
assert encoder.variables == ["var_A", "var_B"]
assert encoder.drop_last is True
# test fit attr
transf = {
"var_A_A": 6,
"var_A_B": 10,
"var_A_C": 4,
"var_A_D": 10,
"var_A_E": 2,
"var_A_F": 2,
"var_B_A": 10,
"var_B_B": 6,
"var_B_C": 4,
"var_B_D": 10,
"var_B_E": 2,
"var_B_F": 2,
}
assert encoder.variables_ == ["var_A", "var_B"]
assert encoder.variables_binary_ == []
assert encoder.n_features_in_ == 3
assert encoder.encoder_dict_ == {
"var_A": ["A", "B", "C", "D", "E", "F"],
"var_B": ["A", "B", "C", "D", "E", "F"],
}
# test transform output
for col in transf.keys():
assert X[col].sum() == transf[col]
assert "var_B" not in X.columns
assert "var_B_G" not in X.columns
assert "var_C" in X.columns
def test_encode_top_categories():
# test case 3: encode only the most popular categories
df = pd.DataFrame(
{
"var_A": ["A"] * 5
+ ["B"] * 11
+ ["C"] * 4
+ ["D"] * 9
+ ["E"] * 2
+ ["F"] * 2
+ ["G"] * 7,
"var_B": ["A"] * 11
+ ["B"] * 7
+ ["C"] * 4
+ ["D"] * 9
+ ["E"] * 2
+ ["F"] * 2
+ ["G"] * 5,
"var_C": ["A"] * 4
+ ["B"] * 5
+ ["C"] * 11
+ ["D"] * 9
+ ["E"] * 2
+ ["F"] * 2
+ ["G"] * 7,
}
)
encoder = OneHotEncoder(top_categories=4, variables=None, drop_last=False)
X = encoder.fit_transform(df)
# test init params
assert encoder.top_categories == 4
# test fit attr
transf = {
"var_A_D": 9,
"var_A_B": 11,
"var_A_A": 5,
"var_A_G": 7,
"var_B_A": 11,
"var_B_D": 9,
"var_B_G": 5,
"var_B_B": 7,
"var_C_D": 9,
"var_C_C": 11,
"var_C_G": 7,
"var_C_B": 5,
}
# test fit attr
assert encoder.variables_ == ["var_A", "var_B", "var_C"]
assert encoder.variables_binary_ == []
assert encoder.n_features_in_ == 3
assert encoder.encoder_dict_ == {
"var_A": ["B", "D", "G", "A"],
"var_B": ["A", "D", "B", "G"],
"var_C": ["C", "D", "G", "B"],
}
# test transform output
for col in transf.keys():
assert X[col].sum() == transf[col]
assert "var_B" not in X.columns
assert "var_B_F" not in X.columns
def test_error_if_top_categories_not_integer():
with pytest.raises(ValueError):
OneHotEncoder(top_categories=0.5)
def test_error_if_drop_last_not_bool():
with pytest.raises(ValueError):
OneHotEncoder(drop_last=0.5)
def test_raises_error_if_df_contains_na(df_enc_big, df_enc_big_na):
# test case 4: when dataset contains na, fit method
with pytest.raises(ValueError):
encoder = OneHotEncoder()
encoder.fit(df_enc_big_na)
# test case 4: when dataset contains na, transform method
with pytest.raises(ValueError):
encoder = OneHotEncoder()
encoder.fit(df_enc_big)
encoder.transform(df_enc_big_na)
def test_encode_numerical_variables(df_enc_numeric):
encoder = OneHotEncoder(
top_categories=None,
variables=None,
drop_last=False,
ignore_format=True,
)
X = encoder.fit_transform(df_enc_numeric[["var_A", "var_B"]])
# test fit attr
transf = {
"var_A_1": [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"var_A_2": [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
"var_A_3": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
"var_B_1": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"var_B_2": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
"var_B_3": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
}
transf = pd.DataFrame(transf).astype("int32")
X = pd.DataFrame(X).astype("int32")
assert encoder.variables_ == ["var_A", "var_B"]
assert encoder.variables_binary_ == []
assert encoder.n_features_in_ == 2
assert encoder.encoder_dict_ == {"var_A": [1, 2, 3], "var_B": [1, 2, 3]}
# test transform output
pd.testing.assert_frame_equal(X, transf)
def test_variables_cast_as_category(df_enc_numeric):
encoder = OneHotEncoder(
top_categories=None,
variables=None,
drop_last=False,
ignore_format=True,
)
df = df_enc_numeric.copy()
df[["var_A", "var_B"]] = df[["var_A", "var_B"]].astype("category")
X = encoder.fit_transform(df[["var_A", "var_B"]])
# test fit attr
transf = {
"var_A_1": [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"var_A_2": [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
"var_A_3": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
"var_B_1": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"var_B_2": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
"var_B_3": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
}
transf = | pd.DataFrame(transf) | pandas.DataFrame |
from datetime import (
datetime,
timedelta,
timezone,
)
import numpy as np
import pytest
import pytz
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesFillNA:
def test_fillna_nat(self):
series = Series([0, 1, 2, NaT.value], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="pad")
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
series = Series([NaT.value, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="bfill")
filled2 = df.fillna(value=series[1])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
def test_fillna_value_or_method(self, datetime_series):
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
datetime_series.fillna(value=0, method="ffill")
def test_fillna(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method="ffill"))
ts[2] = np.NaN
exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="ffill"), exp)
exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="backfill"), exp)
exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
ts.fillna()
def test_fillna_nonscalar(self):
# GH#5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.0])
tm.assert_series_equal(result, expected)
result = s1.fillna({})
tm.assert_series_equal(result, s1)
result = s1.fillna(Series((), dtype=object))
tm.assert_series_equal(result, s1)
result = s2.fillna(s1)
tm.assert_series_equal(result, s2)
result = s1.fillna({0: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna({1: 1})
tm.assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
tm.assert_series_equal(result, s1)
def test_fillna_aligns(self):
s1 = Series([0, 1, 2], list("abc"))
s2 = Series([0, np.nan, 2], list("bac"))
result = s2.fillna(s1)
expected = Series([0, 0, 2.0], list("bac"))
tm.assert_series_equal(result, expected)
def test_fillna_limit(self):
ser = Series(np.nan, index=[0, 1, 2])
result = ser.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
result = ser.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
def test_fillna_dont_cast_strings(self):
# GH#9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ["0", "1.5", "-0.3"]
for val in vals:
ser = Series([0, 1, np.nan, np.nan, 4], dtype="float64")
result = ser.fillna(val)
expected = Series([0, 1, val, val, 4], dtype="object")
tm.assert_series_equal(result, expected)
def test_fillna_consistency(self):
# GH#16402
# fillna with a tz aware to a tz-naive, should result in object
ser = Series([Timestamp("20130101"), NaT])
result = ser.fillna(Timestamp("20130101", tz="US/Eastern"))
expected = Series(
[Timestamp("20130101"), Timestamp("2013-01-01", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(result, expected)
msg = "The 'errors' keyword in "
with tm.assert_produces_warning(FutureWarning, match=msg):
# where (we ignore the errors=)
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
# with a non-datetime
result = ser.fillna("foo")
expected = Series([Timestamp("20130101"), "foo"])
tm.assert_series_equal(result, expected)
# assignment
ser2 = ser.copy()
ser2[1] = "foo"
tm.assert_series_equal(ser2, expected)
def test_fillna_downcast(self):
# GH#15277
# infer int64 from float64
ser = Series([1.0, np.nan])
result = ser.fillna(0, downcast="infer")
expected = Series([1, 0])
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
# -*- coding: utf-8 -*-
"""
@author: hkaneko
"""
import math
import sys
import numpy as np
import pandas as pd
import sample_functions
from sklearn import metrics, model_selection, svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
# 下の y_name を 'pIC50_class', 'pIGC50_class' のいずれかにしてください。
# descriptors_with_[y_name].csv というファイルを dataset として読み込み計算します。
# さらに、y_name を別の名前に変えて、ご自身で別途 sample_program_6_8_0_csv.py もしくは
# sample_program_6_8_0_sdf.py で descriptors_with_[y_name].csv というファイルを、
# 他のファイルと同様の形式で準備すれば、同じように計算することができます。
y_name = 'pIC50_class'
# 'pIC50_class' : クラス分類用の薬理活性のデータセットの場合
# 'pIGC50_class' : クラス分類用の環境毒性のデータセットの場合
rate_of_test_samples = 0.25 # テストデータのサンプル数の割合。0 より大きく 1 未満
method_name = 'rf' # 'knn' or 'svm' or 'rf'
number_of_submodels = 50 # サブモデルの数
rate_of_selected_x_variables = 0.7 # 各サブデータセットで選択される説明変数の数の割合。0 より大きく 1 未満
add_nonlinear_terms_flag = False # True (二乗項・交差項を追加) or False (追加しない)
fold_number = 5 # N-fold CV の N
max_number_of_k = 20 # 使用する k の最大値
svm_cs = 2 ** np.arange(-5, 11, dtype=float)
svm_gammas = 2 ** np.arange(-20, 11, dtype=float)
rf_number_of_trees = 300 # RF における決定木の数
rf_x_variables_rates = np.arange(1, 11, dtype=float) / 10 # 1 つの決定木における説明変数の数の割合の候補
if method_name != 'knn' and method_name != 'svm' and method_name != 'rf':
sys.exit('\'{0}\' というクラス分類手法はありません。method_name を見直してください。'.format(method_name))
dataset = pd.read_csv('descriptors_with_{0}.csv'.format(y_name), index_col=0) # 物性・活性と記述子のデータセットの読み込み
y = dataset.iloc[:, 0].copy()
x = dataset.iloc[:, 1:]
x = x.replace(np.inf, np.nan).fillna(np.nan) # inf を NaN に置き換え
nan_variable_flags = x.isnull().any() # NaN を含む変数
x = x.drop(x.columns[nan_variable_flags], axis=1) # NaN を含む変数を削除
number_of_test_samples = round(dataset.shape[0] * rate_of_test_samples)
# ランダムにトレーニングデータとテストデータとに分割
# random_state に数字を与えることで、別のときに同じ数字を使えば、ランダムとはいえ同じ結果にすることができます
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=number_of_test_samples, shuffle=True,
random_state=0)
class_types = list(set(y_train)) # クラスの種類
class_types.sort(reverse=True) # 並び替え
# 標準偏差が 0 の説明変数を削除
std_0_variable_flags = x_train.std() == 0
x_train = x_train.drop(x_train.columns[std_0_variable_flags], axis=1)
x_test = x_test.drop(x_test.columns[std_0_variable_flags], axis=1)
if add_nonlinear_terms_flag:
x_train = pd.read_csv('x_train_{0}.csv'.format(y_name), index_col=0) # 物性・活性と記述子のデータセットの読み込み
x_test = pd.read_csv('x_test_{0}.csv'.format(y_name), index_col=0) # 物性・活性と記述子のデータセットの読み込み
# x_train = sample_functions.add_nonlinear_terms(x_train) # 説明変数の二乗項や交差項を追加
# x_test = sample_functions.add_nonlinear_terms(x_test) # 説明変数の二乗項や交差項を追加
# 標準偏差が 0 の説明変数を削除
std_0_nonlinear_variable_flags = x_train.std() == 0
x_train = x_train.drop(x_train.columns[std_0_nonlinear_variable_flags], axis=1)
x_test = x_test.drop(x_test.columns[std_0_nonlinear_variable_flags], axis=1)
# オートスケーリング
autoscaled_x_train = (x_train - x_train.mean()) / x_train.std()
autoscaled_x_test = (x_test - x_train.mean()) / x_train.std()
if method_name == 'svm':
# 時間短縮のため、最初だけグラム行列の分散を最大化することによる γ の最適化
optimal_svm_gamma = sample_functions.gamma_optimization_with_variance(autoscaled_x_train, svm_gammas)
number_of_x_variables = int(np.ceil(x_train.shape[1] * rate_of_selected_x_variables))
print('各サブデータセットの説明変数の数 :', number_of_x_variables)
estimated_y_train_all = pd.DataFrame() # 空の DataFrame 型を作成し、ここにサブモデルごとのトレーニングデータの y の推定結果を追加
selected_x_variable_numbers = [] # 空の list 型の変数を作成し、ここに各サブデータセットの説明変数の番号を追加
submodels = [] # 空の list 型の変数を作成し、ここに構築済みの各サブモデルを追加
for submodel_number in range(number_of_submodels):
print(submodel_number + 1, '/', number_of_submodels) # 進捗状況の表示
# 説明変数の選択
# 0 から 1 までの間に一様に分布する乱数を説明変数の数だけ生成して、その乱数値が小さい順に説明変数を選択
random_x_variables = np.random.rand(x_train.shape[1])
selected_x_variable_numbers_tmp = random_x_variables.argsort()[:number_of_x_variables]
selected_autoscaled_x_train = autoscaled_x_train.iloc[:, selected_x_variable_numbers_tmp]
selected_x_variable_numbers.append(selected_x_variable_numbers_tmp)
if method_name == 'knn':
# CV による k の最適化
accuracy_in_cv_all = [] # 空の list の変数を作成して、成分数ごとのクロスバリデーション後の 正解率 をこの変数に追加していきます
ks = [] # 同じく k の値をこの変数に追加していきます
for k in range(1, max_number_of_k + 1):
model = KNeighborsClassifier(n_neighbors=k, metric='euclidean') # k-NN モデルの宣言
# クロスバリデーション推定値の計算し、DataFrame型に変換
estimated_y_in_cv = pd.DataFrame(
model_selection.cross_val_predict(model, selected_autoscaled_x_train, y_train,
cv=fold_number))
accuracy_in_cv = metrics.accuracy_score(y_train, estimated_y_in_cv) # 正解率を計算
accuracy_in_cv_all.append(accuracy_in_cv) # r2 を追加
ks.append(k) # k の値を追加
optimal_k = ks[accuracy_in_cv_all.index(max(accuracy_in_cv_all))]
submodel = KNeighborsClassifier(n_neighbors=optimal_k, metric='euclidean') # k-NN モデルの宣言
elif method_name == 'svm':
# CV による C の最適化
model_in_cv = GridSearchCV(svm.SVC(kernel='rbf', gamma=optimal_svm_gamma),
{'C': svm_cs}, cv=fold_number)
model_in_cv.fit(selected_autoscaled_x_train, y_train)
optimal_svm_c = model_in_cv.best_params_['C']
# CV による γ の最適化
model_in_cv = GridSearchCV(svm.SVC(kernel='rbf', C=optimal_svm_c),
{'gamma': svm_gammas}, cv=fold_number)
model_in_cv.fit(selected_autoscaled_x_train, y_train)
optimal_svm_gamma = model_in_cv.best_params_['gamma']
submodel = svm.SVC(kernel='rbf', C=optimal_svm_c, gamma=optimal_svm_gamma) # SVM モデルの宣言
elif method_name == 'rf':
# OOB (Out-Of-Bugs) による説明変数の数の割合の最適化
accuracy_oob = []
for index, x_variables_rate in enumerate(rf_x_variables_rates):
model_in_validation = RandomForestClassifier(n_estimators=rf_number_of_trees, max_features=int(
max(math.ceil(selected_autoscaled_x_train.shape[1] * x_variables_rate), 1)), oob_score=True)
model_in_validation.fit(selected_autoscaled_x_train, y_train)
accuracy_oob.append(model_in_validation.oob_score_)
optimal_x_variables_rate = rf_x_variables_rates[accuracy_oob.index(max(accuracy_oob))]
submodel = RandomForestClassifier(n_estimators=rf_number_of_trees,
max_features=int(max(math.ceil(
selected_autoscaled_x_train.shape[1] * optimal_x_variables_rate), 1)),
oob_score=True) # RF モデルの宣言
submodel.fit(selected_autoscaled_x_train, y_train) # モデルの構築
submodels.append(submodel)
# サブデータセットの説明変数の種類やサブモデルを保存。同じ名前のファイルがあるときは上書きされるため注意
pd.to_pickle(selected_x_variable_numbers, 'selected_x_variable_numbers.bin')
pd.to_pickle(submodels, 'submodels.bin')
# サブデータセットの説明変数の種類やサブモデルを読み込み
# 今回は、保存した後にすぐ読み込んでいるため、あまり意味はありませんが、サブデータセットの説明変数の種類やサブモデルを
# 保存しておくことで、後で新しいサンプルを予測したいときにモデル構築の過程を省略できます
selected_x_variable_numbers = pd.read_pickle('selected_x_variable_numbers.bin')
submodels = pd.read_pickle('submodels.bin')
# テストデータの y の推定
# estimated_y_test_all = pd.DataFrame() # 空の DataFrame 型を作成し、ここにサブモデルごとのテストデータの y の推定結果を追加
estimated_y_test_count = np.zeros([x_test.shape[0], len(class_types)]) # クラスごとに、推定したサブモデルの数をカウントして値をここに格納
for submodel_number in range(number_of_submodels):
# 説明変数の選択
selected_autoscaled_x_test = autoscaled_x_test.iloc[:, selected_x_variable_numbers[submodel_number]]
# テストデータの y の推定
estimated_y_test = pd.DataFrame(
submodels[submodel_number].predict(selected_autoscaled_x_test)) # テストデータの y の値を推定し、Pandas の DataFrame 型に変換
# estimated_y_test_all = pd.concat([estimated_y_test_all, estimated_y_test], axis=1)
for sample_number in range(estimated_y_test.shape[0]):
estimated_y_test_count[sample_number, class_types.index(estimated_y_test.iloc[sample_number, 0])] += 1
# テストデータにおける、クラスごとの推定したサブモデルの数
estimated_y_test_count = | pd.DataFrame(estimated_y_test_count, index=x_test.index, columns=class_types) | pandas.DataFrame |
import os
import random
import soundfile as sf
import torch
import yaml
import json
import argparse
import numpy as np
import pandas as pd
from tqdm import tqdm
from pprint import pprint
from asteroid import DCUNet
from asteroid.metrics import get_metrics
from asteroid.losses import PITLossWrapper, pairwise_neg_sisdr
from asteroid.data.bbcso_dataset import BBCSODataset
from asteroid.utils import tensors_to_device
from asteroid.models import save_publishable
from asteroid.dsp.normalization import normalize_estimates
parser = argparse.ArgumentParser()
parser.add_argument(
"--json_dir", type=str, required=True, help="directory including the wav source and mix files"
)
parser.add_argument("--n_src", type=int, default=2, help="Number of sources")
parser.add_argument("--batch_size", type=int, default=1, help="Batch size")
parser.add_argument(
"--use_gpu", type=int, default=1, help="Whether to use the GPU for model execution"
)
parser.add_argument("--exp_dir", default="exp/tmp", help="Experiment root")
parser.add_argument(
"--n_save_ex", type=int, default=-1, help="Number of audio examples to save, -1 means all"
)
compute_metrics = ["si_sdr", "sdr", "sir", "sar"]
def main(conf):
model_path = os.path.join(conf["exp_dir"], "best_model.pth")
#model = ConvTasNet.from_pretrained(model_path)
model = DCUNet.from_pretrained(model_path)
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = BBCSODataset(
conf["json_dir"],
conf["n_src"],
conf["sample_rate"],
conf["batch_size"],
220500,
train = False
)
# Uses all segment length
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
mix = mix.unsqueeze(0)
sources = sources.unsqueeze(0)
est_sources = model(mix)
loss, reordered_sources = loss_func(est_sources, sources, return_est=True)
#mix_np = mix.squeeze(0).cpu().data.numpy()
mix_np = mix.cpu().data.numpy()
sources_np = sources.squeeze(0).cpu().data.numpy()
est_sources_np = reordered_sources.squeeze(0).cpu().data.numpy()
utt_metrics = get_metrics(
mix_np,
sources_np,
est_sources_np,
sample_rate=conf["sample_rate"],
metrics_list=compute_metrics,
)
#utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append( | pd.Series(utt_metrics) | pandas.Series |
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
import math
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day
from qteasy.utilfuncs import next_market_trade_day, unify, mask_to_signal, list_or_slice, labels_to_dict
from qteasy.utilfuncs import weekday_name, prev_market_trade_day, is_number_like, list_truncate, input_to_list
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator, TimingDMA, TimingMACD, TimingCDL, TimingTRIX
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic, stock_company
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.history import stack_dataframes, dataframe_to_hp, HistoryPanel
from qteasy.database import DataSource
from qteasy.strategy import Strategy, SimpleTiming, RollingTiming, SimpleSelecting, FactoralSelecting
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
from qteasy.blender import _exp_to_token, blender_parser, signal_blend
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000., 20000., 10000.])
self.op = np.array([0., 1., -0.33333333])
self.amounts_to_sell = np.array([0., 0., -3333.3333])
self.cash_to_spend = np.array([0., 20000., 0.])
self.prices = np.array([10., 20., 10.])
self.r = qt.Cost(0.0)
def test_rate_creation(self):
"""测试对象生成"""
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
self.assertEqual(self.r.buy_fix, 0)
self.assertEqual(self.r.sell_fix, 0)
def test_rate_operations(self):
"""测试交易费率对象"""
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r.calculate(self.amounts),
[0.003, 0.003, 0.003]),
True,
'fee calculation wrong')
def test_rate_fee(self):
"""测试买卖交易费率"""
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 0.
self.r.sell_min = 0.
self.r.slipage = 0.
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1.))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
"""测试最低交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""测试最低交易费用对其他交易费率参数的影响"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
"""测试固定交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
"""测试交易滑点"""
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
Timestamp('2031-12-29'),
Timestamp('2032-12-29'),
Timestamp('2033-12-29'),
Timestamp('2034-12-29'),
Timestamp('2035-12-29'),
Timestamp('2036-12-29'),
Timestamp('2037-12-29'),
Timestamp('2038-12-29'),
Timestamp('2039-12-29'),
Timestamp('2040-12-29'),
Timestamp('2041-12-29'),
Timestamp('2042-12-29')])
self.assertEqual(cp_mul_float2.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31')])
self.assertEqual(cp_mul_float2.amounts, [20000.0,
22000.0,
24000.0,
26000.0,
28000.0,
30000.0,
32000.0,
34000.0,
36000.0,
38000.0,
40000.0,
42000.0])
class TestPool(unittest.TestCase):
def setUp(self):
self.p = ResultPool(5)
self.items = ['first', 'second', (1, 2, 3), 'this', 24]
self.perfs = [1, 2, 3, 4, 5]
self.additional_result1 = ('abc', 12)
self.additional_result2 = ([1, 2], -1)
self.additional_result3 = (12, 5)
def test_create(self):
self.assertIsInstance(self.p, ResultPool)
def test_operation(self):
self.p.in_pool(self.additional_result1[0], self.additional_result1[1])
self.p.cut()
self.assertEqual(self.p.item_count, 1)
self.assertEqual(self.p.items, ['abc'])
for item, perf in zip(self.items, self.perfs):
self.p.in_pool(item, perf)
self.assertEqual(self.p.item_count, 6)
self.assertEqual(self.p.items, ['abc', 'first', 'second', (1, 2, 3), 'this', 24])
self.p.cut()
self.assertEqual(self.p.items, ['second', (1, 2, 3), 'this', 24, 'abc'])
self.assertEqual(self.p.perfs, [2, 3, 4, 5, 12])
self.p.in_pool(self.additional_result2[0], self.additional_result2[1])
self.p.in_pool(self.additional_result3[0], self.additional_result3[1])
self.assertEqual(self.p.item_count, 7)
self.p.cut(keep_largest=False)
self.assertEqual(self.p.items, [[1, 2], 'second', (1, 2, 3), 'this', 24])
self.assertEqual(self.p.perfs, [-1, 2, 3, 4, 5])
class TestCoreSubFuncs(unittest.TestCase):
"""Test all functions in core.py"""
def setUp(self):
pass
def test_input_to_list(self):
print('Testing input_to_list() function')
input_str = 'first'
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 3), ['first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 4), ['first', 'first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 2, None), ['first', 'first'])
input_list = ['first', 'second']
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 3), ['first', 'second', None])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 4, 'padder'), ['first', 'second', 'padder', 'padder'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 1), ['first', 'second'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, -5), ['first', 'second'])
def test_point_in_space(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
p2 = (-1, 3, 10)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
self.assertFalse(p2 in sp)
print(f'point {p2} is not in space {sp}')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)], 'conti, conti, enum')
p1 = (5.5, 3.2, 8)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
def test_space_in_space(self):
print('test if a space is in another space')
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
sp2 = Space([(0., 10.), (0., 10.), (0., 10.)])
self.assertTrue(sp2 in sp)
self.assertTrue(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is in space {sp2}\n'
f'they are equal to each other\n')
sp2 = Space([(0, 5.), (2, 7.), (3., 9.)])
self.assertTrue(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'{sp2} is a sub space of {sp}\n')
sp2 = Space([(0, 5), (2, 7), (3., 9)])
self.assertFalse(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)])
self.assertFalse(sp in sp2)
self.assertFalse(sp2 in sp)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
def test_space_around_centre(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
ssp = space_around_centre(space=sp, centre=p1, radius=1.2)
print(ssp.boes)
print('\ntest multiple diameters:')
self.assertEqual(ssp.boes, [(4.3, 6.7), (2.0, 4.4), (5.8, 8.2)])
ssp = space_around_centre(space=sp, centre=p1, radius=[1, 2, 1])
print(ssp.boes)
self.assertEqual(ssp.boes, [(4.5, 6.5), (1.2000000000000002, 5.2), (6.0, 8.0)])
print('\ntest points on edge:')
p2 = (5.5, 3.2, 10)
ssp = space_around_centre(space=sp, centre=p1, radius=3.9)
print(ssp.boes)
self.assertEqual(ssp.boes, [(1.6, 9.4), (0.0, 7.1), (3.1, 10.0)])
print('\ntest enum spaces')
sp = Space([(0, 100), range(40, 3, -2)], 'discr, enum')
p1 = [34, 12]
ssp = space_around_centre(space=sp, centre=p1, radius=5, ignore_enums=False)
self.assertEqual(ssp.boes, [(29, 39), (22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(ssp.boes)
print('\ntest enum space and ignore enum axis')
ssp = space_around_centre(space=sp, centre=p1, radius=5)
self.assertEqual(ssp.boes, [(29, 39),
(40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(sp.boes)
def test_get_stock_pool(self):
print(f'start test building stock pool function\n')
share_basics = stock_basic(fields='ts_code,symbol,name,area,industry,market,list_date,exchange')
print(f'\nselect all stocks by area')
stock_pool = qt.get_stock_pool(area='上海')
print(f'{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "上海"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].eq('上海').all())
print(f'\nselect all stocks by multiple areas')
stock_pool = qt.get_stock_pool(area='贵州,北京,天津')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are in list of ["贵州", "北京", "天津"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['贵州',
'北京',
'天津']).all())
print(f'\nselect all stocks by area and industry')
stock_pool = qt.get_stock_pool(area='四川', industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "四川", and industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['四川']).all())
print(f'\nselect all stocks by industry')
stock_pool = qt.get_stock_pool(industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stocks industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
print(f'\nselect all stocks by market')
stock_pool = qt.get_stock_pool(market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
print(f'\nselect all stocks by market and list date')
stock_pool = qt.get_stock_pool(date='2000-01-01', market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板", and list date after "2000-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('2000-01-01').all())
print(f'\nselect all stocks by list date')
stock_pool = qt.get_stock_pool(date='1997-01-01')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all list date after "1997-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1997-01-01').all())
print(f'\nselect all stocks by exchange')
stock_pool = qt.get_stock_pool(exchange='SSE')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['exchange'].eq('SSE').all())
print(f'\nselect all stocks by industry, area and list date')
industry_list = ['银行', '全国地产', '互联网', '环境保护', '区域地产',
'酒店餐饮', '运输设备', '综合类', '建筑工程', '玻璃',
'家用电器', '文教休闲', '其他商业', '元器件', 'IT设备',
'其他建材', '汽车服务', '火力发电', '医药商业', '汽车配件',
'广告包装', '轻工机械', '新型电力', '多元金融', '饲料']
area_list = ['深圳', '北京', '吉林', '江苏', '辽宁', '广东',
'安徽', '四川', '浙江', '湖南', '河北', '新疆',
'山东', '河南', '山西', '江西', '青海', '湖北',
'内蒙', '海南', '重庆', '陕西', '福建', '广西',
'上海']
stock_pool = qt.get_stock_pool(date='19980101',
industry=industry_list,
area=area_list)
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1998-01-01').all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(industry_list).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(area_list).all())
self.assertRaises(KeyError, qt.get_stock_pool, industry=25)
self.assertRaises(KeyError, qt.get_stock_pool, share_name='000300.SH')
self.assertRaises(KeyError, qt.get_stock_pool, markets='SSE')
class TestEvaluations(unittest.TestCase):
"""Test all evaluation functions in core.py"""
# 以下手动计算结果在Excel文件中
def setUp(self):
"""用np.random生成测试用数据,使用cumsum()模拟股票走势"""
self.test_data1 = pd.DataFrame([5.34892759, 5.65768696, 5.79227076, 5.56266871, 5.88189632,
6.24795001, 5.92755558, 6.38748165, 6.31331899, 5.86001665,
5.61048472, 5.30696736, 5.40406792, 5.03180571, 5.37886353,
5.78608307, 6.26540339, 6.59348026, 6.90943801, 6.70911677,
6.33015954, 6.06697417, 5.9752499, 6.45786408, 6.95273763,
6.7691991, 6.70355481, 6.28048969, 6.61344541, 6.24620003,
6.47409983, 6.4522311, 6.8773094, 6.99727832, 6.59262674,
6.59014938, 6.63758237, 6.38331869, 6.09902105, 6.35390109,
6.51993567, 6.87244592, 6.83963485, 7.08797815, 6.88003144,
6.83657323, 6.97819483, 7.01600276, 7.12554256, 7.58941523,
7.61014457, 7.21224091, 7.48174399, 7.66490854, 7.51371968,
7.11586198, 6.97147399, 6.67453301, 6.2042138, 6.33967015,
6.22187938, 5.98426993, 6.37096079, 6.55897161, 6.26422645,
6.69363762, 7.12668015, 6.83232926, 7.30524081, 7.4262041,
7.54031383, 7.17545919, 7.20659257, 7.44886016, 7.37094393,
6.88011022, 7.08142491, 6.74992833, 6.5967097, 6.21336693,
6.35565105, 6.82347596, 6.44773408, 6.84538053, 6.47966466,
6.09699528, 5.63927014, 6.01081024, 6.20585303, 6.60528206,
7.01594726, 7.03684251, 6.76574977, 7.08740846, 6.65336462,
7.07126686, 6.80058956, 6.79241977, 6.47843472, 6.39245474],
columns=['value'])
self.test_data2 = pd.DataFrame([5.09276527, 4.83828592, 4.6000911, 4.63170487, 4.63566451,
4.50546921, 4.96390044, 4.64557907, 4.25787855, 3.76585551,
3.38826334, 3.76243422, 4.06365426, 3.87084726, 3.91400935,
4.13438822, 4.27064542, 4.56776104, 5.03800296, 5.31070529,
5.39902276, 5.21186286, 5.05683114, 4.68842046, 5.11895168,
5.27151571, 5.72294993, 6.09961056, 6.26569635, 6.48806151,
6.16058885, 6.2582459, 6.38934791, 6.57831057, 6.19508831,
5.70155153, 5.20435735, 5.36538825, 5.40450056, 5.2227697,
5.37828693, 5.53058991, 6.02996797, 5.76802181, 5.66166713,
6.07988994, 5.61794367, 5.63218151, 6.10728013, 6.0324168,
6.27164431, 6.27551239, 6.52329665, 7.00470007, 7.34163113,
7.33699083, 7.67661334, 8.09395749, 7.68086668, 7.58341161,
7.46219819, 7.58671899, 7.19348298, 7.40088323, 7.47562005,
7.93342043, 8.2286081, 8.3521632, 8.43590025, 8.34977395,
8.57563095, 8.81586328, 9.08738649, 9.01542031, 8.8653815,
9.21763111, 9.04233017, 8.59533999, 8.47590075, 8.70857222,
8.78890756, 8.92697606, 9.35743773, 9.68280866, 10.15622021,
10.55908549, 10.6337894, 10.55197128, 10.65435176, 10.54611045,
10.19432562, 10.48320884, 10.36176768, 10.03186854, 10.23656092,
10.0062843, 10.13669686, 10.30758958, 9.87904176, 10.05126375],
columns=['value'])
self.test_data3 = pd.DataFrame([5.02851874, 5.20700348, 5.02410709, 5.49836387, 5.06834371,
5.10956737, 5.15314979, 5.02256472, 5.09746382, 5.23909247,
4.93410336, 4.96316186, 5.40026682, 5.7353255, 5.53438319,
5.79092139, 5.67528173, 5.89840855, 5.75379463, 6.10855386,
5.77322365, 5.84538021, 5.6103973, 5.7518655, 5.49729695,
5.13610628, 5.30524121, 5.68093462, 5.73251319, 6.04420783,
6.26929843, 6.59610234, 6.09872345, 6.25475121, 6.72927396,
6.91395783, 7.00693283, 7.36217783, 7.71516676, 7.67580263,
7.62477511, 7.73600568, 7.53457914, 7.46170277, 7.83658014,
8.11481319, 8.03705544, 7.64948845, 7.52043731, 7.67247943,
7.46511982, 7.43541798, 7.58856517, 7.9392717, 8.25406287,
7.77031632, 8.03223447, 7.86799055, 7.57630999, 7.33230519,
7.22378732, 6.85972264, 7.17548456, 7.5387846, 7.2392632,
6.8455644, 6.59557185, 6.6496796, 6.73685623, 7.18598015,
7.13619128, 6.88060157, 7.1399681, 7.30308077, 6.94942434,
7.0247815, 7.37567798, 7.50080197, 7.59719284, 7.14520561,
7.29913484, 7.79551341, 8.15497781, 8.40456095, 8.86516528,
8.53042688, 8.94268762, 8.52048006, 8.80036284, 8.91602364,
9.19953385, 8.70828953, 8.24613093, 8.18770453, 7.79548389,
7.68627967, 7.23205036, 6.98302636, 7.06515819, 6.95068113],
columns=['value'])
self.test_data4 = pd.DataFrame([4.97926539, 5.44016005, 5.45122915, 5.74485615, 5.45600553,
5.44858945, 5.2435413, 5.47315161, 5.58464303, 5.36179749,
5.38236326, 5.29614981, 5.76523508, 5.75102892, 6.15316618,
6.03852528, 6.01442228, 5.70510182, 5.22748133, 5.46762379,
5.78926267, 5.8221362, 5.61236849, 5.30615725, 5.24200611,
5.41042642, 5.59940342, 5.28306781, 4.99451932, 5.08799266,
5.38865647, 5.58229139, 5.33492845, 5.48206276, 5.09721379,
5.39190493, 5.29965087, 5.0374415, 5.50798022, 5.43107577,
5.22759507, 4.991809, 5.43153084, 5.39966868, 5.59916352,
5.66412137, 6.00611838, 5.63564902, 5.66723484, 5.29863863,
4.91115153, 5.3749929, 5.75082334, 6.08308148, 6.58091182,
6.77848803, 7.19588758, 7.64862286, 7.99818347, 7.91824794,
8.30341071, 8.45984973, 7.98700002, 8.18924931, 8.60755649,
8.66233396, 8.91018407, 9.0782739, 9.33515448, 8.95870245,
8.98426422, 8.50340317, 8.64916085, 8.93592407, 8.63145745,
8.65322862, 8.39543204, 8.37969997, 8.23394504, 8.04062872,
7.91259763, 7.57252171, 7.72670114, 7.74486117, 8.06908188,
7.99166889, 7.92155906, 8.39956136, 8.80181323, 8.47464091,
8.06557064, 7.87145573, 8.0237959, 8.39481998, 8.68525692,
8.81185461, 8.98632237, 9.0989835, 8.89787405, 8.86508591],
columns=['value'])
self.test_data5 = pd.DataFrame([4.50258923, 4.35142568, 4.07459514, 3.87791297, 3.73715985,
3.98455684, 4.07587908, 4.00042472, 4.28276612, 4.01362051,
4.13713565, 4.49312372, 4.48633159, 4.4641207, 4.13444605,
3.79107217, 4.22941629, 4.56548511, 4.92472163, 5.27723158,
5.67409193, 6.00176917, 5.88889928, 5.55256103, 5.39308314,
5.2610492, 5.30738908, 5.22222408, 4.90332238, 4.57499908,
4.96097146, 4.81531011, 4.39115442, 4.63200662, 5.04588813,
4.67866025, 5.01705123, 4.83562258, 4.60381702, 4.66187576,
4.41292828, 4.86604507, 4.42280124, 4.07517294, 4.16317319,
4.10316596, 4.42913598, 4.06609666, 3.96725913, 4.15965746,
4.12379564, 4.04054068, 3.84342851, 3.45902867, 3.17649855,
3.09773586, 3.5502119, 3.66396995, 3.66306483, 3.29131401,
2.79558533, 2.88319542, 3.03671098, 3.44645857, 3.88167161,
3.57961874, 3.60180276, 3.96702102, 4.05429995, 4.40056979,
4.05653231, 3.59600456, 3.60792477, 4.09989922, 3.73503663,
4.01892626, 3.94597242, 3.81466605, 3.71417992, 3.93767156,
4.42806557, 4.06988106, 4.03713636, 4.34408673, 4.79810156,
5.18115011, 4.89798406, 5.3960077, 5.72504875, 5.61894017,
5.1958197, 4.85275896, 5.17550207, 4.71548987, 4.62408567,
4.55488535, 4.36532649, 4.26031979, 4.25225607, 4.58627048],
columns=['value'])
self.test_data6 = pd.DataFrame([5.08639513, 5.05761083, 4.76160923, 4.62166504, 4.62923183,
4.25070173, 4.13447513, 3.90890013, 3.76687608, 3.43342482,
3.67648224, 3.6274775, 3.9385404, 4.39771627, 4.03199346,
3.93265288, 3.50059789, 3.3851961, 3.29743973, 3.2544872,
2.93692949, 2.70893003, 2.55461976, 2.20922332, 2.29054475,
2.2144714, 2.03726827, 2.39007617, 2.29866155, 2.40607111,
2.40440444, 2.79374649, 2.66541922, 2.27018079, 2.08505127,
2.55478864, 2.22415625, 2.58517923, 2.58802256, 2.94870959,
2.69301739, 2.19991535, 2.69473146, 2.64704637, 2.62753542,
2.14240825, 2.38565154, 1.94592117, 2.32243877, 2.69337246,
2.51283854, 2.62484451, 2.15559054, 2.35410875, 2.31219177,
1.96018265, 2.34711266, 2.58083322, 2.40290041, 2.20439791,
2.31472425, 2.16228248, 2.16439749, 2.20080737, 1.73293206,
1.9264407, 2.25089861, 2.69269101, 2.59296687, 2.1420998,
1.67819153, 1.98419023, 2.14479494, 1.89055376, 1.96720648,
1.9916694, 2.37227761, 2.14446036, 2.34573903, 1.86162546,
2.1410721, 2.39204939, 2.52529064, 2.47079939, 2.9299031,
3.09452923, 2.93276708, 3.21731309, 3.06248964, 2.90413406,
2.67844632, 2.45621213, 2.41463398, 2.7373913, 3.14917045,
3.4033949, 3.82283446, 4.02285451, 3.7619638, 4.10346795],
columns=['value'])
self.test_data7 = pd.DataFrame([4.75233583, 4.47668283, 4.55894263, 4.61765848, 4.622892,
4.58941116, 4.32535872, 3.88112797, 3.47237806, 3.50898953,
3.82530406, 3.6718017, 3.78918195, 4.1800752, 4.01818557,
4.40822582, 4.65474654, 4.89287256, 4.40879274, 4.65505126,
4.36876403, 4.58418934, 4.75687172, 4.3689799, 4.16126498,
4.0203982, 3.77148242, 3.38198096, 3.07261764, 2.9014741,
2.5049543, 2.756105, 2.28779058, 2.16986991, 1.8415962,
1.83319008, 2.20898291, 2.00128981, 1.75747025, 1.26676663,
1.40316876, 1.11126484, 1.60376367, 1.22523829, 1.58816681,
1.49705679, 1.80244138, 1.55128293, 1.35339409, 1.50985759,
1.0808451, 1.05892796, 1.43414812, 1.43039101, 1.73631655,
1.43940867, 1.82864425, 1.71088265, 2.12015154, 2.45417128,
2.84777618, 2.7925612, 2.90975121, 3.25920745, 3.13801182,
3.52733677, 3.65468491, 3.69395211, 3.49862035, 3.24786017,
3.64463138, 4.00331929, 3.62509565, 3.78013949, 3.4174012,
3.76312271, 3.62054004, 3.67206716, 3.60596058, 3.38636199,
3.42580676, 3.32921095, 3.02976759, 3.28258676, 3.45760838,
3.24917528, 2.94618304, 2.86980011, 2.63191259, 2.39566759,
2.53159917, 2.96273967, 3.25626185, 2.97425402, 3.16412191,
3.58280763, 3.23257727, 3.62353556, 3.12806399, 2.92532313],
columns=['value'])
# 建立一个长度为 500 个数据点的测试数据, 用于测试数据点多于250个的情况下的评价过程
self.long_data = pd.DataFrame([9.879, 9.916, 10.109, 10.214, 10.361, 10.768, 10.594, 10.288,
10.082, 9.994, 10.125, 10.126, 10.384, 10.734, 10.4, 10.87,
11.338, 11.061, 11.415, 11.724, 12.077, 12.196, 12.064, 12.423,
12.19, 11.729, 11.677, 11.448, 11.485, 10.989, 11.242, 11.239,
11.113, 11.075, 11.471, 11.745, 11.754, 11.782, 12.079, 11.97,
12.178, 11.95, 12.438, 12.612, 12.804, 12.952, 12.612, 12.867,
12.832, 12.832, 13.015, 13.315, 13.249, 12.904, 12.776, 12.64,
12.543, 12.287, 12.225, 11.844, 11.985, 11.945, 11.542, 11.871,
12.245, 12.228, 12.362, 11.899, 11.962, 12.374, 12.816, 12.649,
12.252, 12.579, 12.3, 11.988, 12.177, 12.312, 12.744, 12.599,
12.524, 12.82, 12.67, 12.876, 12.986, 13.271, 13.606, 13.82,
14.161, 13.833, 13.831, 14.137, 13.705, 13.414, 13.037, 12.759,
12.642, 12.948, 13.297, 13.483, 13.836, 14.179, 13.709, 13.655,
13.198, 13.508, 13.953, 14.387, 14.043, 13.987, 13.561, 13.391,
12.923, 12.555, 12.503, 12.292, 11.877, 12.34, 12.141, 11.687,
11.992, 12.458, 12.131, 11.75, 11.739, 11.263, 11.762, 11.976,
11.578, 11.854, 12.136, 12.422, 12.311, 12.56, 12.879, 12.861,
12.973, 13.235, 13.53, 13.531, 13.137, 13.166, 13.31, 13.103,
13.007, 12.643, 12.69, 12.216, 12.385, 12.046, 12.321, 11.9,
11.772, 11.816, 11.871, 11.59, 11.518, 11.94, 11.803, 11.924,
12.183, 12.136, 12.361, 12.406, 11.932, 11.684, 11.292, 11.388,
11.874, 12.184, 12.002, 12.16, 11.741, 11.26, 11.123, 11.534,
11.777, 11.407, 11.275, 11.679, 11.62, 11.218, 11.235, 11.352,
11.366, 11.061, 10.661, 10.582, 10.899, 11.352, 11.792, 11.475,
11.263, 11.538, 11.183, 10.936, 11.399, 11.171, 11.214, 10.89,
10.728, 11.191, 11.646, 11.62, 11.195, 11.178, 11.18, 10.956,
11.205, 10.87, 11.098, 10.639, 10.487, 10.507, 10.92, 10.558,
10.119, 9.882, 9.573, 9.515, 9.845, 9.852, 9.495, 9.726,
10.116, 10.452, 10.77, 11.225, 10.92, 10.824, 11.096, 11.542,
11.06, 10.568, 10.585, 10.884, 10.401, 10.068, 9.964, 10.285,
10.239, 10.036, 10.417, 10.132, 9.839, 9.556, 9.084, 9.239,
9.304, 9.067, 8.587, 8.471, 8.007, 8.321, 8.55, 9.008,
9.138, 9.088, 9.434, 9.156, 9.65, 9.431, 9.654, 10.079,
10.411, 10.865, 10.51, 10.205, 10.519, 10.367, 10.855, 10.642,
10.298, 10.622, 10.173, 9.792, 9.995, 9.904, 9.771, 9.597,
9.506, 9.212, 9.688, 10.032, 9.723, 9.839, 9.918, 10.332,
10.236, 9.989, 10.192, 10.685, 10.908, 11.275, 11.72, 12.158,
12.045, 12.244, 12.333, 12.246, 12.552, 12.958, 13.11, 13.53,
13.123, 13.138, 13.57, 13.389, 13.511, 13.759, 13.698, 13.744,
13.467, 13.795, 13.665, 13.377, 13.423, 13.772, 13.295, 13.073,
12.718, 12.388, 12.399, 12.185, 11.941, 11.818, 11.465, 11.811,
12.163, 11.86, 11.935, 11.809, 12.145, 12.624, 12.768, 12.321,
12.277, 11.889, 12.11, 12.606, 12.943, 12.945, 13.112, 13.199,
13.664, 14.051, 14.189, 14.339, 14.611, 14.656, 15.112, 15.086,
15.263, 15.021, 15.346, 15.572, 15.607, 15.983, 16.151, 16.215,
16.096, 16.089, 16.32, 16.59, 16.657, 16.752, 16.583, 16.743,
16.373, 16.662, 16.243, 16.163, 16.491, 16.958, 16.977, 17.225,
17.637, 17.344, 17.684, 17.892, 18.036, 18.182, 17.803, 17.588,
17.101, 17.538, 17.124, 16.787, 17.167, 17.138, 16.955, 17.148,
17.135, 17.635, 17.718, 17.675, 17.622, 17.358, 17.754, 17.729,
17.576, 17.772, 18.239, 18.441, 18.729, 18.319, 18.608, 18.493,
18.069, 18.122, 18.314, 18.423, 18.709, 18.548, 18.384, 18.391,
17.988, 17.986, 17.653, 17.249, 17.298, 17.06, 17.36, 17.108,
17.348, 17.596, 17.46, 17.635, 17.275, 17.291, 16.933, 17.337,
17.231, 17.146, 17.148, 16.751, 16.891, 17.038, 16.735, 16.64,
16.231, 15.957, 15.977, 16.077, 16.054, 15.797, 15.67, 15.911,
16.077, 16.17, 15.722, 15.258, 14.877, 15.138, 15., 14.811,
14.698, 14.407, 14.583, 14.704, 15.153, 15.436, 15.634, 15.453,
15.877, 15.696, 15.563, 15.927, 16.255, 16.696, 16.266, 16.698,
16.365, 16.493, 16.973, 16.71, 16.327, 16.605, 16.486, 16.846,
16.935, 17.21, 17.389, 17.546, 17.773, 17.641, 17.485, 17.794,
17.354, 16.904, 16.675, 16.43, 16.898, 16.819, 16.921, 17.201,
17.617, 17.368, 17.864, 17.484],
columns=['value'])
self.long_bench = pd.DataFrame([9.7, 10.179, 10.321, 9.855, 9.936, 10.096, 10.331, 10.662,
10.59, 11.031, 11.154, 10.945, 10.625, 10.233, 10.284, 10.252,
10.221, 10.352, 10.444, 10.773, 10.904, 11.104, 10.797, 10.55,
10.943, 11.352, 11.641, 11.983, 11.696, 12.138, 12.365, 12.379,
11.969, 12.454, 12.947, 13.119, 13.013, 12.763, 12.632, 13.034,
12.681, 12.561, 12.938, 12.867, 13.202, 13.132, 13.539, 13.91,
13.456, 13.692, 13.771, 13.904, 14.069, 13.728, 13.97, 14.228,
13.84, 14.041, 13.963, 13.689, 13.543, 13.858, 14.118, 13.987,
13.611, 14.028, 14.229, 14.41, 14.74, 15.03, 14.915, 15.207,
15.354, 15.665, 15.877, 15.682, 15.625, 15.175, 15.105, 14.893,
14.86, 15.097, 15.178, 15.293, 15.238, 15., 15.283, 14.994,
14.907, 14.664, 14.888, 15.297, 15.313, 15.368, 14.956, 14.802,
14.506, 14.257, 14.619, 15.019, 15.049, 14.625, 14.894, 14.978,
15.434, 15.578, 16.038, 16.107, 16.277, 16.365, 16.204, 16.465,
16.401, 16.895, 17.057, 16.621, 16.225, 16.075, 15.863, 16.292,
16.551, 16.724, 16.817, 16.81, 17.192, 16.86, 16.745, 16.707,
16.552, 16.133, 16.301, 16.08, 15.81, 15.75, 15.909, 16.127,
16.457, 16.204, 16.329, 16.748, 16.624, 17.011, 16.548, 16.831,
16.653, 16.791, 16.57, 16.778, 16.928, 16.932, 17.22, 16.876,
17.301, 17.422, 17.689, 17.316, 17.547, 17.534, 17.409, 17.669,
17.416, 17.859, 17.477, 17.307, 17.245, 17.352, 17.851, 17.412,
17.144, 17.138, 17.085, 16.926, 16.674, 16.854, 17.064, 16.95,
16.609, 16.957, 16.498, 16.552, 16.175, 15.858, 15.697, 15.781,
15.583, 15.36, 15.558, 16.046, 15.968, 15.905, 16.358, 16.783,
17.048, 16.762, 17.224, 17.363, 17.246, 16.79, 16.608, 16.423,
15.991, 15.527, 15.147, 14.759, 14.792, 15.206, 15.148, 15.046,
15.429, 14.999, 15.407, 15.124, 14.72, 14.713, 15.022, 15.092,
14.982, 15.001, 14.734, 14.713, 14.841, 14.562, 15.005, 15.483,
15.472, 15.277, 15.503, 15.116, 15.12, 15.442, 15.476, 15.789,
15.36, 15.764, 16.218, 16.493, 16.642, 17.088, 16.816, 16.645,
16.336, 16.511, 16.2, 15.994, 15.86, 15.929, 16.316, 16.416,
16.746, 17.173, 17.531, 17.627, 17.407, 17.49, 17.768, 17.509,
17.795, 18.147, 18.63, 18.945, 19.021, 19.518, 19.6, 19.744,
19.63, 19.32, 18.933, 19.297, 19.598, 19.446, 19.236, 19.198,
19.144, 19.159, 19.065, 19.032, 18.586, 18.272, 18.119, 18.3,
17.894, 17.744, 17.5, 17.083, 17.092, 16.864, 16.453, 16.31,
16.681, 16.342, 16.447, 16.715, 17.068, 17.067, 16.822, 16.673,
16.675, 16.592, 16.686, 16.397, 15.902, 15.597, 15.357, 15.162,
15.348, 15.603, 15.283, 15.257, 15.082, 14.621, 14.366, 14.039,
13.957, 14.141, 13.854, 14.243, 14.414, 14.033, 13.93, 14.104,
14.461, 14.249, 14.053, 14.165, 14.035, 14.408, 14.501, 14.019,
14.265, 14.67, 14.797, 14.42, 14.681, 15.16, 14.715, 14.292,
14.411, 14.656, 15.094, 15.366, 15.055, 15.198, 14.762, 14.294,
13.854, 13.811, 13.549, 13.927, 13.897, 13.421, 13.037, 13.32,
13.721, 13.511, 13.999, 13.529, 13.418, 13.881, 14.326, 14.362,
13.987, 14.015, 13.599, 13.343, 13.307, 13.689, 13.851, 13.404,
13.577, 13.395, 13.619, 13.195, 12.904, 12.553, 12.294, 12.649,
12.425, 11.967, 12.062, 11.71, 11.645, 12.058, 12.136, 11.749,
11.953, 12.401, 12.044, 11.901, 11.631, 11.396, 11.036, 11.244,
10.864, 11.207, 11.135, 11.39, 11.723, 12.084, 11.8, 11.471,
11.33, 11.504, 11.295, 11.3, 10.901, 10.494, 10.825, 11.054,
10.866, 10.713, 10.875, 10.846, 10.947, 11.422, 11.158, 10.94,
10.521, 10.36, 10.411, 10.792, 10.472, 10.305, 10.525, 10.853,
10.556, 10.72, 10.54, 10.583, 10.299, 10.061, 10.004, 9.903,
9.796, 9.472, 9.246, 9.54, 9.456, 9.177, 9.484, 9.557,
9.493, 9.968, 9.536, 9.39, 8.922, 8.423, 8.518, 8.686,
8.771, 9.098, 9.281, 8.858, 9.027, 8.553, 8.784, 8.996,
9.379, 9.846, 9.855, 9.502, 9.608, 9.761, 9.409, 9.4,
9.332, 9.34, 9.284, 8.844, 8.722, 8.376, 8.775, 8.293,
8.144, 8.63, 8.831, 8.957, 9.18, 9.601, 9.695, 10.018,
9.841, 9.743, 9.292, 8.85, 9.316, 9.288, 9.519, 9.738,
9.289, 9.785, 9.804, 10.06, 10.188, 10.095, 9.739, 9.881,
9.7, 9.991, 10.391, 10.002],
columns=['value'])
def test_performance_stats(self):
"""test the function performance_statistics()
"""
pass
def test_fv(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_fv(self.test_data1), 6.39245474)
self.assertAlmostEqual(eval_fv(self.test_data2), 10.05126375)
self.assertAlmostEqual(eval_fv(self.test_data3), 6.95068113)
self.assertAlmostEqual(eval_fv(self.test_data4), 8.86508591)
self.assertAlmostEqual(eval_fv(self.test_data5), 4.58627048)
self.assertAlmostEqual(eval_fv(self.test_data6), 4.10346795)
self.assertAlmostEqual(eval_fv(self.test_data7), 2.92532313)
self.assertAlmostEqual(eval_fv(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
def test_max_drawdown(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_max_drawdown(self.test_data1)[0], 0.264274308)
self.assertEqual(eval_max_drawdown(self.test_data1)[1], 53)
self.assertEqual(eval_max_drawdown(self.test_data1)[2], 86)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data1)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data2)[0], 0.334690849)
self.assertEqual(eval_max_drawdown(self.test_data2)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data2)[2], 10)
self.assertEqual(eval_max_drawdown(self.test_data2)[3], 19)
self.assertAlmostEqual(eval_max_drawdown(self.test_data3)[0], 0.244452899)
self.assertEqual(eval_max_drawdown(self.test_data3)[1], 90)
self.assertEqual(eval_max_drawdown(self.test_data3)[2], 99)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data3)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data4)[0], 0.201849684)
self.assertEqual(eval_max_drawdown(self.test_data4)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4)[2], 50)
self.assertEqual(eval_max_drawdown(self.test_data4)[3], 54)
self.assertAlmostEqual(eval_max_drawdown(self.test_data5)[0], 0.534206456)
self.assertEqual(eval_max_drawdown(self.test_data5)[1], 21)
self.assertEqual(eval_max_drawdown(self.test_data5)[2], 60)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data5)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data6)[0], 0.670062689)
self.assertEqual(eval_max_drawdown(self.test_data6)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data6)[2], 70)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data6)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data7)[0], 0.783577449)
self.assertEqual(eval_max_drawdown(self.test_data7)[1], 17)
self.assertEqual(eval_max_drawdown(self.test_data7)[2], 51)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data7)[3]))
self.assertEqual(eval_max_drawdown(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
# test max drawdown == 0:
# TODO: investigate: how does divide by zero change?
self.assertAlmostEqual(eval_max_drawdown(self.test_data4 - 5)[0], 1.0770474121951792)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[2], 50)
def test_info_ratio(self):
reference = self.test_data1
self.assertAlmostEqual(eval_info_ratio(self.test_data2, reference, 'value'), 0.075553316)
self.assertAlmostEqual(eval_info_ratio(self.test_data3, reference, 'value'), 0.018949457)
self.assertAlmostEqual(eval_info_ratio(self.test_data4, reference, 'value'), 0.056328143)
self.assertAlmostEqual(eval_info_ratio(self.test_data5, reference, 'value'), -0.004270068)
self.assertAlmostEqual(eval_info_ratio(self.test_data6, reference, 'value'), 0.009198027)
self.assertAlmostEqual(eval_info_ratio(self.test_data7, reference, 'value'), -0.000890283)
def test_volatility(self):
self.assertAlmostEqual(eval_volatility(self.test_data1), 0.748646166)
self.assertAlmostEqual(eval_volatility(self.test_data2), 0.75527442)
self.assertAlmostEqual(eval_volatility(self.test_data3), 0.654188853)
self.assertAlmostEqual(eval_volatility(self.test_data4), 0.688375814)
self.assertAlmostEqual(eval_volatility(self.test_data5), 1.089989522)
self.assertAlmostEqual(eval_volatility(self.test_data6), 1.775419308)
self.assertAlmostEqual(eval_volatility(self.test_data7), 1.962758406)
self.assertAlmostEqual(eval_volatility(self.test_data1, logarithm=False), 0.750993311)
self.assertAlmostEqual(eval_volatility(self.test_data2, logarithm=False), 0.75571473)
self.assertAlmostEqual(eval_volatility(self.test_data3, logarithm=False), 0.655331424)
self.assertAlmostEqual(eval_volatility(self.test_data4, logarithm=False), 0.692683021)
self.assertAlmostEqual(eval_volatility(self.test_data5, logarithm=False), 1.09602969)
self.assertAlmostEqual(eval_volatility(self.test_data6, logarithm=False), 1.774789504)
self.assertAlmostEqual(eval_volatility(self.test_data7, logarithm=False), 2.003329156)
self.assertEqual(eval_volatility(pd.DataFrame()), -np.inf)
self.assertRaises(AssertionError, eval_volatility, [1, 2, 3])
# 测试长数据的Volatility计算
expected_volatility = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
0.39955371, 0.39974258, 0.40309866, 0.40486593, 0.4055514,
0.40710639, 0.40708157, 0.40609006, 0.4073625, 0.40835305,
0.41155304, 0.41218193, 0.41207489, 0.41300276, 0.41308415,
0.41292392, 0.41207645, 0.41238397, 0.41229291, 0.41164056,
0.41316317, 0.41348842, 0.41462249, 0.41474574, 0.41652625,
0.41649176, 0.41701556, 0.4166593, 0.41684221, 0.41491689,
0.41435209, 0.41549087, 0.41849338, 0.41998049, 0.41959106,
0.41907311, 0.41916103, 0.42120773, 0.42052391, 0.42111225,
0.42124589, 0.42356445, 0.42214672, 0.42324022, 0.42476639,
0.42621689, 0.42549439, 0.42533678, 0.42539414, 0.42545038,
0.42593637, 0.42652095, 0.42665489, 0.42699563, 0.42798159,
0.42784512, 0.42898006, 0.42868781, 0.42874188, 0.42789631,
0.4277768, 0.42776827, 0.42685216, 0.42660989, 0.42563155,
0.42618281, 0.42606281, 0.42505222, 0.42653242, 0.42555378,
0.42500842, 0.42561939, 0.42442059, 0.42395414, 0.42384356,
0.42319135, 0.42397497, 0.42488579, 0.42449729, 0.42508766,
0.42509878, 0.42456616, 0.42535577, 0.42681884, 0.42688552,
0.42779918, 0.42706058, 0.42792887, 0.42762114, 0.42894045,
0.42977398, 0.42919859, 0.42829041, 0.42780946, 0.42825318,
0.42858952, 0.42858315, 0.42805601, 0.42764751, 0.42744107,
0.42775518, 0.42707283, 0.4258592, 0.42615335, 0.42526286,
0.4248906, 0.42368986, 0.4232565, 0.42265079, 0.42263954,
0.42153046, 0.42132051, 0.41995353, 0.41916605, 0.41914271,
0.41876945, 0.41740175, 0.41583884, 0.41614026, 0.41457908,
0.41472411, 0.41310876, 0.41261041, 0.41212369, 0.41211677,
0.4100645, 0.40852504, 0.40860297, 0.40745338, 0.40698661,
0.40644546, 0.40591375, 0.40640744, 0.40620663, 0.40656649,
0.40727154, 0.40797605, 0.40807137, 0.40808913, 0.40809676,
0.40711767, 0.40724628, 0.40713077, 0.40772698, 0.40765157,
0.40658297, 0.4065991, 0.405011, 0.40537645, 0.40432626,
0.40390177, 0.40237701, 0.40291623, 0.40301797, 0.40324145,
0.40312864, 0.40328316, 0.40190955, 0.40246506, 0.40237663,
0.40198407, 0.401969, 0.40185623, 0.40198313, 0.40005643,
0.39940743, 0.39850438, 0.39845398, 0.39695093, 0.39697295,
0.39663201, 0.39675444, 0.39538699, 0.39331959, 0.39326074,
0.39193287, 0.39157266, 0.39021327, 0.39062591, 0.38917591,
0.38976991, 0.38864187, 0.38872158, 0.38868096, 0.38868377,
0.38842057, 0.38654784, 0.38649517, 0.38600464, 0.38408115,
0.38323049, 0.38260215, 0.38207663, 0.38142669, 0.38003262,
0.37969367, 0.37768092, 0.37732108, 0.37741991, 0.37617779,
0.37698504, 0.37606784, 0.37499276, 0.37533731, 0.37350437,
0.37375172, 0.37385382, 0.37384003, 0.37338938, 0.37212288,
0.37273075, 0.370559, 0.37038506, 0.37062153, 0.36964661,
0.36818564, 0.3656634, 0.36539259, 0.36428672, 0.36502487,
0.3647148, 0.36551435, 0.36409919, 0.36348181, 0.36254383,
0.36166601, 0.36142665, 0.35954942, 0.35846915, 0.35886759,
0.35813867, 0.35642888, 0.35375231, 0.35061783, 0.35078463,
0.34995508, 0.34688918, 0.34548257, 0.34633158, 0.34622833,
0.34652111, 0.34622774, 0.34540951, 0.34418809, 0.34276593,
0.34160916, 0.33811193, 0.33822709, 0.3391685, 0.33883381])
test_volatility = eval_volatility(self.long_data)
test_volatility_roll = self.long_data['volatility'].values
self.assertAlmostEqual(test_volatility, np.nanmean(expected_volatility))
self.assertTrue(np.allclose(expected_volatility, test_volatility_roll, equal_nan=True))
def test_sharp(self):
self.assertAlmostEqual(eval_sharp(self.test_data1, 5, 0), 0.06135557)
self.assertAlmostEqual(eval_sharp(self.test_data2, 5, 0), 0.167858667)
self.assertAlmostEqual(eval_sharp(self.test_data3, 5, 0), 0.09950547)
self.assertAlmostEqual(eval_sharp(self.test_data4, 5, 0), 0.154928241)
self.assertAlmostEqual(eval_sharp(self.test_data5, 5, 0.002), 0.007868673)
self.assertAlmostEqual(eval_sharp(self.test_data6, 5, 0.002), 0.018306537)
self.assertAlmostEqual(eval_sharp(self.test_data7, 5, 0.002), 0.006259971)
# 测试长数据的sharp率计算
expected_sharp = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.02346815, -0.02618783, -0.03763912, -0.03296276, -0.03085698,
-0.02851101, -0.02375842, -0.02016746, -0.01107885, -0.01426613,
-0.00787204, -0.01135784, -0.01164232, -0.01003481, -0.00022512,
-0.00046792, -0.01209378, -0.01278892, -0.01298135, -0.01938214,
-0.01671044, -0.02120509, -0.0244281, -0.02416067, -0.02763238,
-0.027579, -0.02372774, -0.02215294, -0.02467094, -0.02091266,
-0.02590194, -0.03049876, -0.02077131, -0.01483653, -0.02488144,
-0.02671638, -0.02561547, -0.01957986, -0.02479803, -0.02703162,
-0.02658087, -0.01641755, -0.01946472, -0.01647757, -0.01280889,
-0.00893643, -0.00643275, -0.00698457, -0.00549962, -0.00654677,
-0.00494757, -0.0035633, -0.00109037, 0.00750654, 0.00451208,
0.00625502, 0.01221367, 0.01326454, 0.01535037, 0.02269538,
0.02028715, 0.02127712, 0.02333264, 0.02273159, 0.01670643,
0.01376513, 0.01265342, 0.02211647, 0.01612449, 0.00856706,
-0.00077147, -0.00268848, 0.00210993, -0.00443934, -0.00411912,
-0.0018756, -0.00867461, -0.00581601, -0.00660835, -0.00861137,
-0.00678614, -0.01188408, -0.00589617, -0.00244323, -0.00201891,
-0.01042846, -0.01471016, -0.02167034, -0.02258554, -0.01306809,
-0.00909086, -0.01233746, -0.00595166, -0.00184208, 0.00750497,
0.01481886, 0.01761972, 0.01562886, 0.01446414, 0.01285826,
0.01357719, 0.00967613, 0.01636272, 0.01458437, 0.02280183,
0.02151903, 0.01700276, 0.01597368, 0.02114336, 0.02233297,
0.02585631, 0.02768459, 0.03519235, 0.04204535, 0.04328161,
0.04672855, 0.05046191, 0.04619848, 0.04525853, 0.05381529,
0.04598861, 0.03947394, 0.04665006, 0.05586077, 0.05617728,
0.06495018, 0.06205172, 0.05665466, 0.06500615, 0.0632062,
0.06084328, 0.05851466, 0.05659229, 0.05159347, 0.0432977,
0.0474047, 0.04231723, 0.03613176, 0.03618391, 0.03591012,
0.03885674, 0.0402686, 0.03846423, 0.04534014, 0.04721458,
0.05130912, 0.05026281, 0.05394312, 0.05529349, 0.05949243,
0.05463304, 0.06195165, 0.06767606, 0.06880985, 0.07048996,
0.07078815, 0.07420767, 0.06773439, 0.0658441, 0.06470875,
0.06302349, 0.06456876, 0.06411282, 0.06216669, 0.067094,
0.07055075, 0.07254976, 0.07119253, 0.06173308, 0.05393352,
0.05681246, 0.05250643, 0.06099845, 0.0655544, 0.06977334,
0.06636514, 0.06177949, 0.06869908, 0.06719767, 0.06178738,
0.05915714, 0.06882277, 0.06756821, 0.06507994, 0.06489791,
0.06553941, 0.073123, 0.07576757, 0.06805446, 0.06063571,
0.05033801, 0.05206971, 0.05540306, 0.05249118, 0.05755587,
0.0586174, 0.05051288, 0.0564852, 0.05757284, 0.06358355,
0.06130082, 0.04925482, 0.03834472, 0.04163981, 0.04648316,
0.04457858, 0.04324626, 0.04328791, 0.04156207, 0.04818652,
0.04972634, 0.06024123, 0.06489556, 0.06255485, 0.06069815,
0.06466389, 0.07081163, 0.07895358, 0.0881782, 0.09374151,
0.08336506, 0.08764795, 0.09080174, 0.08808926, 0.08641158,
0.07811943, 0.06885318, 0.06479503, 0.06851185, 0.07382819,
0.07047903, 0.06658251, 0.07638379, 0.08667974, 0.08867918,
0.08245323, 0.08961866, 0.09905298, 0.0961908, 0.08562706,
0.0839014, 0.0849072, 0.08338395, 0.08783487, 0.09463609,
0.10332336, 0.11806497, 0.11220297, 0.11589097, 0.11678405])
test_sharp = eval_sharp(self.long_data, 5, 0.00035)
self.assertAlmostEqual(np.nanmean(expected_sharp), test_sharp)
self.assertTrue(np.allclose(self.long_data['sharp'].values, expected_sharp, equal_nan=True))
def test_beta(self):
reference = self.test_data1
self.assertAlmostEqual(eval_beta(self.test_data2, reference, 'value'), -0.017148939)
self.assertAlmostEqual(eval_beta(self.test_data3, reference, 'value'), -0.042204233)
self.assertAlmostEqual(eval_beta(self.test_data4, reference, 'value'), -0.15652986)
self.assertAlmostEqual(eval_beta(self.test_data5, reference, 'value'), -0.049195532)
self.assertAlmostEqual(eval_beta(self.test_data6, reference, 'value'), -0.026995082)
self.assertAlmostEqual(eval_beta(self.test_data7, reference, 'value'), -0.01147809)
self.assertRaises(TypeError, eval_beta, [1, 2, 3], reference, 'value')
self.assertRaises(TypeError, eval_beta, self.test_data3, [1, 2, 3], 'value')
self.assertRaises(KeyError, eval_beta, self.test_data3, reference, 'not_found_value')
# 测试长数据的beta计算
expected_beta = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.04988841, -0.05127618, -0.04692104, -0.04272652, -0.04080598,
-0.0493347, -0.0460858, -0.0416761, -0.03691527, -0.03724924,
-0.03678865, -0.03987324, -0.03488321, -0.02567672, -0.02690303,
-0.03010128, -0.02437967, -0.02571932, -0.02455681, -0.02839811,
-0.03358653, -0.03396697, -0.03466321, -0.03050966, -0.0247583,
-0.01629325, -0.01880895, -0.01480403, -0.01348783, -0.00544294,
-0.00648176, -0.00467036, -0.01135331, -0.0156841, -0.02340763,
-0.02615705, -0.02730771, -0.02906174, -0.02860664, -0.02412914,
-0.02066416, -0.01744816, -0.02185133, -0.02145285, -0.02681765,
-0.02827694, -0.02394581, -0.02744096, -0.02778825, -0.02703065,
-0.03160023, -0.03615371, -0.03681072, -0.04265126, -0.04344738,
-0.04232421, -0.04705272, -0.04533344, -0.04605934, -0.05272737,
-0.05156463, -0.05134196, -0.04730733, -0.04425352, -0.03869831,
-0.04159571, -0.04223998, -0.04346747, -0.04229844, -0.04740093,
-0.04992507, -0.04621232, -0.04477644, -0.0486915, -0.04598224,
-0.04943463, -0.05006391, -0.05362256, -0.04994067, -0.05464769,
-0.05443275, -0.05513493, -0.05173594, -0.04500994, -0.04662891,
-0.03903505, -0.0419592, -0.04307773, -0.03925718, -0.03711574,
-0.03992631, -0.0433058, -0.04533641, -0.0461183, -0.05600344,
-0.05758377, -0.05959874, -0.05605942, -0.06002859, -0.06253002,
-0.06747014, -0.06427915, -0.05931947, -0.05769974, -0.04791515,
-0.05175088, -0.05748039, -0.05385232, -0.05072975, -0.05052637,
-0.05125567, -0.05005785, -0.05325104, -0.04977727, -0.04947867,
-0.05148544, -0.05739156, -0.05742069, -0.06047279, -0.0558414,
-0.06086126, -0.06265151, -0.06411129, -0.06828052, -0.06781762,
-0.07083409, -0.07211207, -0.06799162, -0.06913295, -0.06775162,
-0.0696265, -0.06678248, -0.06867502, -0.06581961, -0.07055823,
-0.06448184, -0.06097973, -0.05795587, -0.0618383, -0.06130145,
-0.06050652, -0.05936661, -0.05749424, -0.0499, -0.05050495,
-0.04962687, -0.05033439, -0.05070116, -0.05422009, -0.05369759,
-0.05548943, -0.05907353, -0.05933035, -0.05927918, -0.06227663,
-0.06011455, -0.05650432, -0.05828134, -0.05620949, -0.05715323,
-0.05482478, -0.05387113, -0.05095559, -0.05377999, -0.05334267,
-0.05220438, -0.04001521, -0.03892434, -0.03660782, -0.04282708,
-0.04324623, -0.04127048, -0.04227559, -0.04275226, -0.04347049,
-0.04125853, -0.03806295, -0.0330632, -0.03155531, -0.03277152,
-0.03304518, -0.03878731, -0.03830672, -0.03727434, -0.0370571,
-0.04509224, -0.04207632, -0.04116198, -0.04545179, -0.04584584,
-0.05287341, -0.05417433, -0.05175836, -0.05005509, -0.04268674,
-0.03442321, -0.03457309, -0.03613426, -0.03524391, -0.03629479,
-0.04361312, -0.02626705, -0.02406115, -0.03046384, -0.03181044,
-0.03375164, -0.03661673, -0.04520779, -0.04926951, -0.05726738,
-0.0584486, -0.06220608, -0.06800563, -0.06797431, -0.07562211,
-0.07481996, -0.07731229, -0.08413381, -0.09031826, -0.09691925,
-0.11018071, -0.11952675, -0.10826026, -0.11173895, -0.10756359,
-0.10775916, -0.11664559, -0.10505051, -0.10606547, -0.09855355,
-0.10004159, -0.10857084, -0.12209301, -0.11605758, -0.11105113,
-0.1155195, -0.11569505, -0.10513348, -0.09611072, -0.10719791,
-0.10843965, -0.11025856, -0.10247839, -0.10554044, -0.10927647,
-0.10645088, -0.09982498, -0.10542734, -0.09631372, -0.08229695])
test_beta_mean = eval_beta(self.long_data, self.long_bench, 'value')
test_beta_roll = self.long_data['beta'].values
self.assertAlmostEqual(test_beta_mean, np.nanmean(expected_beta))
self.assertTrue(np.allclose(test_beta_roll, expected_beta, equal_nan=True))
def test_alpha(self):
reference = self.test_data1
self.assertAlmostEqual(eval_alpha(self.test_data2, 5, reference, 'value', 0.5), 11.63072977)
self.assertAlmostEqual(eval_alpha(self.test_data3, 5, reference, 'value', 0.5), 1.886590071)
self.assertAlmostEqual(eval_alpha(self.test_data4, 5, reference, 'value', 0.5), 6.827021872)
self.assertAlmostEqual(eval_alpha(self.test_data5, 5, reference, 'value', 0.92), -1.192265168)
self.assertAlmostEqual(eval_alpha(self.test_data6, 5, reference, 'value', 0.92), -1.437142359)
self.assertAlmostEqual(eval_alpha(self.test_data7, 5, reference, 'value', 0.92), -1.781311545)
# 测试长数据的alpha计算
expected_alpha = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.09418119, -0.11188463, -0.17938358, -0.15588172, -0.1462678,
-0.13089586, -0.10780125, -0.09102891, -0.03987585, -0.06075686,
-0.02459503, -0.04104284, -0.0444565, -0.04074585, 0.02191275,
0.02255955, -0.05583375, -0.05875539, -0.06055551, -0.09648245,
-0.07913737, -0.10627829, -0.12320965, -0.12368335, -0.1506743,
-0.15768033, -0.13638829, -0.13065298, -0.14537834, -0.127428,
-0.15504529, -0.18184636, -0.12652146, -0.09190138, -0.14847221,
-0.15840648, -0.1525789, -0.11859418, -0.14700954, -0.16295761,
-0.16051645, -0.10364859, -0.11961134, -0.10258267, -0.08090148,
-0.05727746, -0.0429945, -0.04672356, -0.03581408, -0.0439215,
-0.03429495, -0.0260362, -0.01075022, 0.04931808, 0.02779388,
0.03984083, 0.08311951, 0.08995566, 0.10522428, 0.16159058,
0.14238174, 0.14759783, 0.16257712, 0.158908, 0.11302115,
0.0909566, 0.08272888, 0.15261884, 0.10546376, 0.04990313,
-0.01284111, -0.02720704, 0.00454725, -0.03965491, -0.03818265,
-0.02186992, -0.06574751, -0.04846454, -0.05204211, -0.06316498,
-0.05095099, -0.08502656, -0.04681162, -0.02362027, -0.02205091,
-0.07706374, -0.10371841, -0.14434688, -0.14797935, -0.09055402,
-0.06739549, -0.08824959, -0.04855888, -0.02291244, 0.04027138,
0.09370505, 0.11472939, 0.10243593, 0.0921445, 0.07662648,
0.07946651, 0.05450718, 0.10497677, 0.09068334, 0.15462924,
0.14231034, 0.10544952, 0.09980256, 0.14035223, 0.14942974,
0.17624102, 0.19035477, 0.2500807, 0.30724652, 0.31768915,
0.35007521, 0.38412975, 0.34356521, 0.33614463, 0.41206165,
0.33999177, 0.28045963, 0.34076789, 0.42220356, 0.42314636,
0.50790423, 0.47713348, 0.42520169, 0.50488411, 0.48705211,
0.46252601, 0.44325578, 0.42640573, 0.37986783, 0.30652822,
0.34503393, 0.2999069, 0.24928617, 0.24730218, 0.24326897,
0.26657905, 0.27861168, 0.26392824, 0.32552649, 0.34177792,
0.37837011, 0.37025267, 0.4030612, 0.41339361, 0.45076809,
0.40383354, 0.47093422, 0.52505036, 0.53614256, 0.5500943,
0.55319293, 0.59021451, 0.52358459, 0.50605947, 0.49359168,
0.47895956, 0.49320243, 0.4908336, 0.47310767, 0.51821564,
0.55105932, 0.57291504, 0.5599809, 0.46868842, 0.39620087,
0.42086934, 0.38317217, 0.45934108, 0.50048866, 0.53941991,
0.50676751, 0.46500915, 0.52993663, 0.51668366, 0.46405428,
0.44100603, 0.52726147, 0.51565458, 0.49186248, 0.49001081,
0.49367648, 0.56422294, 0.58882785, 0.51334664, 0.44386256,
0.35056709, 0.36490029, 0.39205071, 0.3677061, 0.41134736,
0.42315067, 0.35356394, 0.40324562, 0.41340007, 0.46503322,
0.44355762, 0.34854314, 0.26412842, 0.28633753, 0.32335224,
0.30761141, 0.29709569, 0.29570487, 0.28000063, 0.32802547,
0.33967726, 0.42511212, 0.46252357, 0.44244974, 0.42152907,
0.45436727, 0.50482359, 0.57339198, 0.6573356, 0.70912003,
0.60328917, 0.6395092, 0.67015805, 0.64241557, 0.62779142,
0.55028063, 0.46448736, 0.43709245, 0.46777983, 0.51789439,
0.48594916, 0.4456216, 0.52008189, 0.60548684, 0.62792473,
0.56645031, 0.62766439, 0.71829315, 0.69481356, 0.59550329,
0.58133754, 0.59014148, 0.58026655, 0.61719273, 0.67373203,
0.75573056, 0.89501633, 0.8347253, 0.87964685, 0.89015835])
test_alpha_mean = eval_alpha(self.long_data, 100, self.long_bench, 'value')
test_alpha_roll = self.long_data['alpha'].values
self.assertAlmostEqual(test_alpha_mean, np.nanmean(expected_alpha))
self.assertTrue(np.allclose(test_alpha_roll, expected_alpha, equal_nan=True))
def test_calmar(self):
"""test evaluate function eval_calmar()"""
pass
def test_benchmark(self):
reference = self.test_data1
tr, yr = eval_benchmark(self.test_data2, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data3, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data4, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data5, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data6, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data7, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
def test_evaluate(self):
pass
class TestLoop(unittest.TestCase):
"""通过一个假设但精心设计的例子来测试loop_step以及loop方法的正确性"""
def setUp(self):
# 精心设计的模拟股票名称、交易日期、以及股票价格
self.shares = ['share1', 'share2', 'share3', 'share4', 'share5', 'share6', 'share7']
self.dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08',
'2016/09/09', '2016/09/12', '2016/09/13', '2016/09/14', '2016/09/15',
'2016/09/16', '2016/09/19', '2016/09/20', '2016/09/21', '2016/09/22',
'2016/09/23', '2016/09/26', '2016/09/27', '2016/09/28', '2016/09/29',
'2016/09/30', '2016/10/10', '2016/10/11', '2016/10/12', '2016/10/13',
'2016/10/14', '2016/10/17', '2016/10/18', '2016/10/19', '2016/10/20',
'2016/10/21', '2016/10/23', '2016/10/24', '2016/10/25', '2016/10/26',
'2016/10/27', '2016/10/29', '2016/10/30', '2016/10/31', '2016/11/01',
'2016/11/02', '2016/11/05', '2016/11/06', '2016/11/07', '2016/11/08',
'2016/11/09', '2016/11/12', '2016/11/13', '2016/11/14', '2016/11/15',
'2016/11/16', '2016/11/19', '2016/11/20', '2016/11/21', '2016/11/22']
self.dates = [pd.Timestamp(date_text) for date_text in self.dates]
self.prices = np.array([[5.35, 5.09, 5.03, 4.98, 4.50, 5.09, 4.75],
[5.66, 4.84, 5.21, 5.44, 4.35, 5.06, 4.48],
[5.79, 4.60, 5.02, 5.45, 4.07, 4.76, 4.56],
[5.56, 4.63, 5.50, 5.74, 3.88, 4.62, 4.62],
[5.88, 4.64, 5.07, 5.46, 3.74, 4.63, 4.62],
[6.25, 4.51, 5.11, 5.45, 3.98, 4.25, 4.59],
[5.93, 4.96, 5.15, 5.24, 4.08, 4.13, 4.33],
[6.39, 4.65, 5.02, 5.47, 4.00, 3.91, 3.88],
[6.31, 4.26, 5.10, 5.58, 4.28, 3.77, 3.47],
[5.86, 3.77, 5.24, 5.36, 4.01, 3.43, 3.51],
[5.61, 3.39, 4.93, 5.38, 4.14, 3.68, 3.83],
[5.31, 3.76, 4.96, 5.30, 4.49, 3.63, 3.67],
[5.40, 4.06, 5.40, 5.77, 4.49, 3.94, 3.79],
[5.03, 3.87, 5.74, 5.75, 4.46, 4.40, 4.18],
[5.38, 3.91, 5.53, 6.15, 4.13, 4.03, 4.02],
[5.79, 4.13, 5.79, 6.04, 3.79, 3.93, 4.41],
[6.27, 4.27, 5.68, 6.01, 4.23, 3.50, 4.65],
[6.59, 4.57, 5.90, 5.71, 4.57, 3.39, 4.89],
[6.91, 5.04, 5.75, 5.23, 4.92, 3.30, 4.41],
[6.71, 5.31, 6.11, 5.47, 5.28, 3.25, 4.66],
[6.33, 5.40, 5.77, 5.79, 5.67, 2.94, 4.37],
[6.07, 5.21, 5.85, 5.82, 6.00, 2.71, 4.58],
[5.98, 5.06, 5.61, 5.61, 5.89, 2.55, 4.76],
[6.46, 4.69, 5.75, 5.31, 5.55, 2.21, 4.37],
[6.95, 5.12, 5.50, 5.24, 5.39, 2.29, 4.16],
[6.77, 5.27, 5.14, 5.41, 5.26, 2.21, 4.02],
[6.70, 5.72, 5.31, 5.60, 5.31, 2.04, 3.77],
[6.28, 6.10, 5.68, 5.28, 5.22, 2.39, 3.38],
[6.61, 6.27, 5.73, 4.99, 4.90, 2.30, 3.07],
[6.25, 6.49, 6.04, 5.09, 4.57, 2.41, 2.90],
[6.47, 6.16, 6.27, 5.39, 4.96, 2.40, 2.50],
[6.45, 6.26, 6.60, 5.58, 4.82, 2.79, 2.76],
[6.88, 6.39, 6.10, 5.33, 4.39, 2.67, 2.29],
[7.00, 6.58, 6.25, 5.48, 4.63, 2.27, 2.17],
[6.59, 6.20, 6.73, 5.10, 5.05, 2.09, 1.84],
[6.59, 5.70, 6.91, 5.39, 4.68, 2.55, 1.83],
[6.64, 5.20, 7.01, 5.30, 5.02, 2.22, 2.21],
[6.38, 5.37, 7.36, 5.04, 4.84, 2.59, 2.00],
[6.10, 5.40, 7.72, 5.51, 4.60, 2.59, 1.76],
[6.35, 5.22, 7.68, 5.43, 4.66, 2.95, 1.27],
[6.52, 5.38, 7.62, 5.23, 4.41, 2.69, 1.40],
[6.87, 5.53, 7.74, 4.99, 4.87, 2.20, 1.11],
[6.84, 6.03, 7.53, 5.43, 4.42, 2.69, 1.60],
[7.09, 5.77, 7.46, 5.40, 4.08, 2.65, 1.23],
[6.88, 5.66, 7.84, 5.60, 4.16, 2.63, 1.59],
[6.84, 6.08, 8.11, 5.66, 4.10, 2.14, 1.50],
[6.98, 5.62, 8.04, 6.01, 4.43, 2.39, 1.80],
[7.02, 5.63, 7.65, 5.64, 4.07, 1.95, 1.55],
[7.13, 6.11, 7.52, 5.67, 3.97, 2.32, 1.35],
[7.59, 6.03, 7.67, 5.30, 4.16, 2.69, 1.51],
[7.61, 6.27, 7.47, 4.91, 4.12, 2.51, 1.08],
[7.21, 6.28, 7.44, 5.37, 4.04, 2.62, 1.06],
[7.48, 6.52, 7.59, 5.75, 3.84, 2.16, 1.43],
[7.66, 7.00, 7.94, 6.08, 3.46, 2.35, 1.43],
[7.51, 7.34, 8.25, 6.58, 3.18, 2.31, 1.74],
[7.12, 7.34, 7.77, 6.78, 3.10, 1.96, 1.44],
[6.97, 7.68, 8.03, 7.20, 3.55, 2.35, 1.83],
[6.67, 8.09, 7.87, 7.65, 3.66, 2.58, 1.71],
[6.20, 7.68, 7.58, 8.00, 3.66, 2.40, 2.12],
[6.34, 7.58, 7.33, 7.92, 3.29, 2.20, 2.45],
[6.22, 7.46, 7.22, 8.30, 2.80, 2.31, 2.85],
[5.98, 7.59, 6.86, 8.46, 2.88, 2.16, 2.79],
[6.37, 7.19, 7.18, 7.99, 3.04, 2.16, 2.91],
[6.56, 7.40, 7.54, 8.19, 3.45, 2.20, 3.26],
[6.26, 7.48, 7.24, 8.61, 3.88, 1.73, 3.14],
[6.69, 7.93, 6.85, 8.66, 3.58, 1.93, 3.53],
[7.13, 8.23, 6.60, 8.91, 3.60, 2.25, 3.65],
[6.83, 8.35, 6.65, 9.08, 3.97, 2.69, 3.69],
[7.31, 8.44, 6.74, 9.34, 4.05, 2.59, 3.50],
[7.43, 8.35, 7.19, 8.96, 4.40, 2.14, 3.25],
[7.54, 8.58, 7.14, 8.98, 4.06, 1.68, 3.64],
[7.18, 8.82, 6.88, 8.50, 3.60, 1.98, 4.00],
[7.21, 9.09, 7.14, 8.65, 3.61, 2.14, 3.63],
[7.45, 9.02, 7.30, 8.94, 4.10, 1.89, 3.78],
[7.37, 8.87, 6.95, 8.63, 3.74, 1.97, 3.42],
[6.88, 9.22, 7.02, 8.65, 4.02, 1.99, 3.76],
[7.08, 9.04, 7.38, 8.40, 3.95, 2.37, 3.62],
[6.75, 8.60, 7.50, 8.38, 3.81, 2.14, 3.67],
[6.60, 8.48, 7.60, 8.23, 3.71, 2.35, 3.61],
[6.21, 8.71, 7.15, 8.04, 3.94, 1.86, 3.39],
[6.36, 8.79, 7.30, 7.91, 4.43, 2.14, 3.43],
[6.82, 8.93, 7.80, 7.57, 4.07, 2.39, 3.33],
[6.45, 9.36, 8.15, 7.73, 4.04, 2.53, 3.03],
[6.85, 9.68, 8.40, 7.74, 4.34, 2.47, 3.28],
[6.48, 10.16, 8.87, 8.07, 4.80, 2.93, 3.46],
[6.10, 10.56, 8.53, 7.99, 5.18, 3.09, 3.25],
[5.64, 10.63, 8.94, 7.92, 4.90, 2.93, 2.95],
[6.01, 10.55, 8.52, 8.40, 5.40, 3.22, 2.87],
[6.21, 10.65, 8.80, 8.80, 5.73, 3.06, 2.63],
[6.61, 10.55, 8.92, 8.47, 5.62, 2.90, 2.40],
[7.02, 10.19, 9.20, 8.07, 5.20, 2.68, 2.53],
[7.04, 10.48, 8.71, 7.87, 4.85, 2.46, 2.96],
[6.77, 10.36, 8.25, 8.02, 5.18, 2.41, 3.26],
[7.09, 10.03, 8.19, 8.39, 4.72, 2.74, 2.97],
[6.65, 10.24, 7.80, 8.69, 4.62, 3.15, 3.16],
[7.07, 10.01, 7.69, 8.81, 4.55, 3.40, 3.58],
[6.80, 10.14, 7.23, 8.99, 4.37, 3.82, 3.23],
[6.79, 10.31, 6.98, 9.10, 4.26, 4.02, 3.62],
[6.48, 9.88, 7.07, 8.90, 4.25, 3.76, 3.13],
[6.39, 10.05, 6.95, 8.87, 4.59, 4.10, 2.93]])
# 精心设计的模拟PT持股仓位目标信号:
self.pt_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.000, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.250, 0.150, 0.000, 0.300, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.370, 0.193, 0.120, 0.072, 0.072, 0.072, 0.096],
[0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111],
[0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190],
[0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272],
[0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300]])
# 精心设计的模拟PS比例交易信号,与模拟PT信号高度相似
self.ps_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.100, 0.150],
[0.200, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.100, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, -0.750, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.333, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, -0.500, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, -1.000],
[0.000, 0.000, 0.000, 0.000, 0.200, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.500, 0.000, 0.000, 0.150, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.200, 0.000, -1.000, 0.200, 0.000],
[0.500, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.200, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, -0.500, 0.200],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.150, 0.000, 0.000],
[-1.000, 0.000, 0.000, 0.250, 0.000, 0.250, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.250, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, -1.000, 0.000, -1.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.800, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.100, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, -1.000, 0.000, 0.100],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, -1.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-1.000, 0.000, 0.150, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000]])
# 精心设计的模拟VS股票交易信号,与模拟PS信号类似
self.vs_signals = np.array([[000, 000, 000, 000, 500, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 300, 300],
[400, 400, 000, 000, 000, 000, 000],
[000, 000, 250, 000, 000, 000, 000],
[000, 000, 000, 000, -400, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, -200, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, -300],
[000, 000, 000, 000, 500, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 000, 300, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 400, 000, -300, 600, 000],
[500, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[600, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, -400, 600],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 500, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 300, 000, 000],
[-500, 000, 000, 500, 000, 200, 000],
[000, 000, 000, 000, 000, 000, 000],
[500, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, -700, 000, -600, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-400, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 300, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, -600, 000, 300],
[000, 000, 000, 000, 000, 000, 000],
[000, -300, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 700, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000]])
# 精心设计的模拟多价格交易信号,模拟50个交易日对三只股票的操作
self.multi_shares = ['000010', '000030', '000039']
self.multi_dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08']
self.multi_dates = [pd.Timestamp(date_text) for date_text in self.multi_dates]
# 操作的交易价格包括开盘价、最高价和收盘价
self.multi_prices_open = np.array([[10.02, 9.88, 7.26],
[10.00, 9.88, 7.00],
[9.98, 9.89, 6.88],
[9.97, 9.75, 6.91],
[9.99, 9.74, np.nan],
[10.01, 9.80, 6.81],
[10.04, 9.62, 6.63],
[10.06, 9.65, 6.45],
[10.06, 9.58, 6.16],
[10.11, 9.67, 6.24],
[10.11, 9.81, 5.96],
[10.07, 9.80, 5.97],
[10.06, 10.00, 5.96],
[10.09, 9.95, 6.20],
[10.03, 10.10, 6.35],
[10.02, 10.06, 6.11],
[10.06, 10.14, 6.37],
[10.08, 9.90, 5.58],
[9.99, 10.20, 5.65],
[10.00, 10.29, 5.65],
[10.03, 9.86, 5.19],
[10.02, 9.48, 5.42],
[10.06, 10.01, 6.30],
[10.03, 10.24, 6.15],
[9.97, 10.26, 6.05],
[9.94, 10.24, 5.89],
[9.83, 10.12, 5.22],
[9.78, 10.65, 5.20],
[9.77, 10.64, 5.07],
[9.91, 10.56, 6.04],
[9.92, 10.42, 6.12],
[9.97, 10.43, 5.85],
[9.91, 10.29, 5.67],
[9.90, 10.30, 6.02],
[9.88, 10.44, 6.04],
[9.91, 10.60, 7.07],
[9.63, 10.67, 7.64],
[9.64, 10.46, 7.99],
[9.57, 10.39, 7.59],
[9.55, 10.90, 8.73],
[9.58, 11.01, 8.72],
[9.61, 11.01, 8.97],
[9.62, np.nan, 8.58],
[9.55, np.nan, 8.71],
[9.57, 10.82, 8.77],
[9.61, 11.02, 8.40],
[9.63, 10.96, 7.95],
[9.64, 11.55, 7.76],
[9.61, 11.74, 8.25],
[9.56, 11.80, 7.51]])
self.multi_prices_high = np.array([[10.07, 9.91, 7.41],
[10.00, 10.04, 7.31],
[10.00, 9.93, 7.14],
[10.00, 10.04, 7.00],
[10.03, 9.84, np.nan],
[10.03, 9.88, 6.82],
[10.04, 9.99, 6.96],
[10.09, 9.70, 6.85],
[10.10, 9.67, 6.50],
[10.14, 9.71, 6.34],
[10.11, 9.85, 6.04],
[10.10, 9.90, 6.02],
[10.09, 10.00, 6.12],
[10.09, 10.20, 6.38],
[10.10, 10.11, 6.43],
[10.05, 10.18, 6.46],
[10.07, 10.21, 6.43],
[10.09, 10.26, 6.27],
[10.10, 10.38, 5.77],
[10.00, 10.47, 6.01],
[10.04, 10.42, 5.67],
[10.04, 10.07, 5.67],
[10.06, 10.24, 6.35],
[10.09, 10.27, 6.32],
[10.05, 10.38, 6.43],
[9.97, 10.43, 6.36],
[9.96, 10.39, 5.79],
[9.86, 10.65, 5.47],
[9.77, 10.84, 5.65],
[9.92, 10.65, 6.04],
[9.94, 10.73, 6.14],
[9.97, 10.63, 6.23],
[9.97, 10.51, 5.83],
[9.92, 10.35, 6.25],
[9.92, 10.46, 6.27],
[9.92, 10.63, 7.12],
[9.93, 10.74, 7.82],
[9.64, 10.76, 8.14],
[9.58, 10.54, 8.27],
[9.60, 11.02, 8.92],
[9.58, 11.12, 8.76],
[9.62, 11.17, 9.15],
[9.62, np.nan, 8.90],
[9.64, np.nan, 9.01],
[9.59, 10.92, 9.16],
[9.62, 11.15, 9.00],
[9.63, 11.11, 8.27],
[9.70, 11.55, 7.99],
[9.66, 11.95, 8.33],
[9.64, 11.93, 8.25]])
self.multi_prices_close = np.array([[10.04, 9.68, 6.64],
[10.00, 9.87, 7.26],
[10.00, 9.86, 7.03],
[9.99, 9.87, 6.87],
[9.97, 9.79, np.nan],
[9.99, 9.82, 6.64],
[10.03, 9.80, 6.85],
[10.03, 9.66, 6.70],
[10.06, 9.62, 6.39],
[10.06, 9.58, 6.22],
[10.11, 9.69, 5.92],
[10.09, 9.78, 5.91],
[10.07, 9.75, 6.11],
[10.06, 9.96, 5.91],
[10.09, 9.90, 6.23],
[10.03, 10.04, 6.28],
[10.03, 10.06, 6.28],
[10.06, 10.08, 6.27],
[10.08, 10.24, 5.70],
[10.00, 10.24, 5.56],
[9.99, 10.24, 5.67],
[10.03, 9.86, 5.16],
[10.03, 10.13, 5.69],
[10.06, 10.12, 6.32],
[10.03, 10.10, 6.14],
[9.97, 10.25, 6.25],
[9.94, 10.24, 5.79],
[9.83, 10.22, 5.26],
[9.77, 10.75, 5.05],
[9.84, 10.64, 5.45],
[9.91, 10.56, 6.06],
[9.93, 10.60, 6.21],
[9.96, 10.42, 5.69],
[9.91, 10.25, 5.46],
[9.91, 10.24, 6.02],
[9.88, 10.49, 6.69],
[9.91, 10.57, 7.43],
[9.64, 10.63, 7.72],
[9.56, 10.48, 8.16],
[9.57, 10.37, 7.83],
[9.55, 10.96, 8.70],
[9.57, 11.02, 8.71],
[9.61, np.nan, 8.88],
[9.61, np.nan, 8.54],
[9.55, 10.88, 8.87],
[9.57, 10.87, 8.87],
[9.63, 11.01, 8.18],
[9.64, 11.01, 7.80],
[9.65, 11.58, 7.97],
[9.62, 11.80, 8.25]])
# 交易信号包括三组,分别作用与开盘价、最高价和收盘价
# 此时的关键是股票交割期的处理,交割期不为0时,以交易日为单位交割
self.multi_signals = []
# multisignal的第一组信号为开盘价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.000, 0.000],
[0.000, -0.500, 0.000],
[0.000, -0.500, 0.000],
[0.000, 0.000, 0.000],
[0.150, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.300, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.300],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.350, 0.250],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.100, 0.000, 0.350],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.050, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第二组信号为最高价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.150, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, -0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.200],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第三组信号为收盘价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-0.500, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, -0.800],
[0.000, 0.000, 0.000],
[0.000, -1.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-0.750, 0.000, 0.000],
[0.000, 0.000, -0.850],
[0.000, 0.000, 0.000],
[0.000, -0.700, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, -1.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-1.000, 0.000, 0.000],
[0.000, -1.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 交易回测所需的价格也有三组,分别是开盘价、最高价和收盘价
self.multi_histories = []
# multisignal的第一组信号为开盘价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_open,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第二组信号为最高价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_high,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第三组信号为收盘价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_close,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 设置回测参数
self.cash = qt.CashPlan(['2016/07/01', '2016/08/12', '2016/09/23'], [10000, 10000, 10000])
self.rate = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=0,
sell_min=0,
slipage=0)
self.rate2 = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=10,
sell_min=5,
slipage=0)
self.pt_signal_hp = dataframe_to_hp(
pd.DataFrame(self.pt_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.ps_signal_hp = dataframe_to_hp(
pd.DataFrame(self.ps_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.vs_signal_hp = dataframe_to_hp(
pd.DataFrame(self.vs_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.multi_signal_hp = stack_dataframes(
self.multi_signals,
stack_along='htypes',
htypes='open, high, close'
)
self.history_list = dataframe_to_hp(
pd.DataFrame(self.prices, index=self.dates, columns=self.shares),
htypes='close'
)
self.multi_history_list = stack_dataframes(
self.multi_histories,
stack_along='htypes',
htypes='open, high, close'
)
# 模拟PT信号回测结果
# PT信号,先卖后买,交割期为0
self.pt_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614],
[101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21979.4972],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21880.9628],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21630.0454],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20968.0007],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21729.9339],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21107.6400],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21561.1745],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21553.0916],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22316.9366],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22084.2862],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21777.3543],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22756.8225],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22843.4697],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22762.1766],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22257.0973],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23136.5259],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 21813.7852],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22395.3204],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23717.6858],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22715.4263],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 22498.3254],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23341.1733],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24162.3941],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24847.1508],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23515.9755],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24555.8997],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24390.6372],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24073.3309],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24394.6500],
[2076.3314, 903.0334, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 3487.5655, 0.0000, 34904.8150],
[0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 34198.4475],
[0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 33753.0190],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 34953.8178],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 33230.2498],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35026.7819],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36976.2649],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38673.8147],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38717.3429],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36659.0854],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35877.9607],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36874.4840],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37010.2695],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 38062.3510],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36471.1357],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37534.9927],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37520.2569],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36747.7952],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36387.9409],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 35925.9715],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36950.7028],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37383.2463],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37761.2724],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 39548.2653],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41435.1291],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41651.6261],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41131.9920],
[644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 41286.4702],
[644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 40978.7259],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 40334.5453],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41387.9172],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42492.6707],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42953.7188],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42005.1092],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42017.9106],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 43750.2824],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41766.8679],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 42959.1150],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 41337.9320],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 40290.3688]])
# PT信号,先买后卖,交割期为0
self.pt_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614],
[101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270],
[797.1684, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 2703.5808, 0.0000, 21979.4972],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21700.7241],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21446.6630],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20795.3593],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21557.2924],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 20933.6887],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21392.5581],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21390.2918],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22147.7562],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21910.9053],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21594.2980],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22575.4380],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22655.8312],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22578.4365],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22073.2661],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22955.2367],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 21628.1647],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22203.4237],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 23516.2598],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22505.8428],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22199.1042],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23027.9302],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23848.5806],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24540.8871],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23205.6838],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24267.6685],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24115.3796],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23814.3667],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24133.6611],
[2061.6837, 896.6628, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 3285.8830, 0.0000, 34658.5742],
[0.0000, 896.6628, 507.6643, 466.6033, 0.0000, 1523.7106, 1467.7407, 12328.8684, 0.0000, 33950.7917],
[0.0000, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 4380.3797, 0.0000, 33711.4045],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 34922.0959],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 33237.1081],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35031.8071],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36976.3376],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38658.5245],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38712.2854],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36655.3125],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35904.3692],
[644.1423, 902.2617, 514.8253, 0.0000, 15.5990, 0.0000, 1467.7407, 14821.9004, 0.0000, 36873.9080],
[644.1423, 902.2617, 514.8253, 0.0000, 1220.8683, 0.0000, 1467.7407, 10470.8781, 0.0000, 36727.7895],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37719.9840],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36138.1277],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37204.0760],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37173.1201],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36398.2298],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36034.2178],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 35583.6399],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36599.2645],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37013.3408],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37367.7449],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 39143.8273],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41007.3074],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41225.4657],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 40685.9525],
[644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 1467.7407, 6592.6891, 0.0000, 40851.5435],
[644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 0.0000, 0.0000, 41082.1210],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 40385.0135],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 41455.1513],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42670.6769],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 43213.7233],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42205.2480],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42273.9386],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 44100.0777],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42059.7208],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 43344.9653],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 41621.0324],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 40528.0648]])
# PT信号,先卖后买,交割期为2天(股票)0天(现金)以便利用先卖的现金继续买入
self.pt_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661],
[101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027],
[797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21584.441],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21309.576],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20664.323],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21445.597],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 20806.458],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21288.441],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21294.365],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 22058.784],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21805.540],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21456.333],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22459.720],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22611.602],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22470.912],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21932.634],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22425.864],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21460.103],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22376.968],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23604.295],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22704.826],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22286.293],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23204.755],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24089.017],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24768.185],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23265.196],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24350.540],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24112.706],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23709.076],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24093.545],
[2060.275, 896.050, 504.579, 288.667, 0.000, 763.410, 1577.904, 2835.944, 0.000, 34634.888],
[578.327, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 732.036, 0.000, 33912.261],
[0.000, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 4415.981, 0.000, 33711.951],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 34951.433],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 33224.596],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35065.209],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 37018.699],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38706.035],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38724.569],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 36647.268],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35928.930],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36967.229],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37056.598],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 38129.862],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36489.333],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37599.602],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37566.823],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36799.280],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36431.196],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 35940.942],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36973.050],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37393.292],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37711.276],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 39515.991],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41404.440],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41573.523],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41011.613],
[644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 41160.181],
[644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 40815.512],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 40145.531],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41217.281],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42379.061],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42879.589],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41891.452],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41929.003],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 43718.052],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41685.916],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 42930.410],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 41242.589],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 40168.084]])
# PT信号,先买后卖,交割期为2天(股票)1天(现金)
self.pt_res_bs21 = np.array([
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661],
[101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027],
[797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497],
[797.168, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 2475.037, 0.000, 21584.441],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21266.406],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20623.683],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21404.957],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 20765.509],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21248.748],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21256.041],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 22018.958],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21764.725],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21413.241],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22417.021],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22567.685],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22427.699],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21889.359],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22381.938],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21416.358],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22332.786],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 0.000, 2386.698, 0.000, 23557.595],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 23336.992],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 22907.742],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24059.201],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24941.902],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25817.514],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24127.939],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25459.688],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25147.370],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25005.842],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 1086.639, 2752.004, 0.000, 25598.700],
[2138.154, 929.921, 503.586, 288.667, 0.000, 761.900, 1086.639, 4818.835, 0.000, 35944.098],
[661.356, 929.921, 503.586, 553.843, 0.000, 1954.237, 1086.639, 8831.252, 0.000, 35237.243],
[0.000, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 9460.955, 0.000, 35154.442],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36166.632],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 34293.883],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 35976.901],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37848.552],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39512.574],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39538.024],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37652.984],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36687.909],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37749.277],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37865.518],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38481.190],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37425.087],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38051.341],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38065.478],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37429.495],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37154.479],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 36692.717],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37327.055],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37937.630],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 38298.645],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 39689.369],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40992.397],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 41092.265],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40733.622],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40708.515],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40485.321],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 39768.059],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 40519.595],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 41590.937],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42354.983],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41175.149],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41037.902],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42706.213],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 40539.205],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 41608.692],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39992.148],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39134.828]])
# 模拟PS信号回测结果
# PS信号,先卖后买,交割期为0
self.ps_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111],
[346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118],
[346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621],
[115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21389.4018],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22027.4535],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 20939.9992],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21250.0636],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22282.7812],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21407.0658],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21160.2373],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21826.7682],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22744.9403],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23466.1185],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22017.8821],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23191.4662],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23099.0822],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22684.7671],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22842.1346],
[1073.8232, 416.6787, 735.6442, 269.8496, 1785.2055, 938.6967, 1339.2073, 5001.4246, 0.0000,
33323.8359],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32820.2901],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32891.2308],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34776.5296],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 33909.0325],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34560.1906],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 36080.4552],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38618.4454],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38497.9230],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 37110.0991],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 35455.2467],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35646.1860],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35472.3020],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36636.4694],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35191.7035],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36344.2242],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36221.6005],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35943.5708],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35708.2608],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35589.0286],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36661.0285],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36310.5909],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36466.7637],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 37784.4918],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39587.6766],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 40064.0191],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39521.6439],
[0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39932.2761],
[0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39565.2475],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 38943.1632],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39504.1184],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40317.8004],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40798.5768],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39962.5711],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40194.4793],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 41260.4003],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39966.3024],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 40847.3160],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 39654.5445],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 38914.8151]])
# PS信号,先买后卖,交割期为0
self.ps_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111],
[346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118],
[346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621],
[115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21389.4018],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21625.6913],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 20873.0389],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21450.9447],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 22269.3892],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21969.5329],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21752.6924],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22000.6088],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23072.5655],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23487.5201],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22441.0460],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23201.2700],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23400.9485],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22306.2008],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21989.5913],
[1073.8232, 737.0632, 735.6442, 269.8496, 1708.7766, 938.6967, 0.0000, 5215.4255, 0.0000, 31897.1636],
[0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31509.5059],
[0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31451.7888],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32773.4592],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32287.0318],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32698.1938],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34031.5183],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 35537.8336],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36212.6487],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36007.5294],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34691.3797],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33904.8810],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34341.6098],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 35479.9505],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34418.4455],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34726.7182],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34935.0407],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34136.7505],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33804.1575],
[195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 33653.8970],
[195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 34689.8757],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 34635.7841],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 35253.2755],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 36388.1051],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 37987.4204],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38762.2103],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38574.0544],
[195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39101.9156],
[195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39132.5587],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38873.2941],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39336.6594],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39565.9568],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39583.4317],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39206.8350],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39092.6551],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39666.1834],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38798.0749],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 39143.5561],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38617.8779],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38156.1701]])
# PS信号,先卖后买,交割期为2天(股票)1天(现金)
self.ps_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 205.065, 321.089, 5059.722, 0.000, 9761.111],
[346.982, 416.679, 0.000, 0.000, 555.556, 205.065, 321.089, 1201.278, 0.000, 9646.112],
[346.982, 416.679, 191.037, 0.000, 555.556, 205.065, 321.089, 232.719, 0.000, 9685.586],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9813.218],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9803.129],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9608.020],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9311.573],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8883.625],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8751.390],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8794.181],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9136.570],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9209.359],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9093.829],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9387.554],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9585.959],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 9928.777],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10060.381],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10281.002],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10095.561],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 0.000, 4506.393, 0.000, 10029.957],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9875.613],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9614.946],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9824.172],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9732.574],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9968.339],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 10056.158],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9921.492],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9894.162],
[115.719, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 6179.774, 0.000, 20067.937],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21133.508],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20988.848],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20596.743],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 19910.773],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20776.707],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20051.797],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20725.388],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20828.880],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21647.181],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21310.169],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20852.099],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21912.395],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21937.828],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21962.458],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21389.402],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22027.453],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 20939.999],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21250.064],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22282.781],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21407.066],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21160.237],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21826.768],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22744.940],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23466.118],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22017.882],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23191.466],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23099.082],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22684.767],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22842.135],
[1073.823, 416.679, 735.644, 269.850, 1785.205, 938.697, 1339.207, 5001.425, 0.000, 33323.836],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32820.290],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32891.231],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34776.530],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 33909.032],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34560.191],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 36080.455],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38618.445],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38497.923],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 37110.099],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 35455.247],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35646.186],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35472.302],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36636.469],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35191.704],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36344.224],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36221.601],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35943.571],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35708.261],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35589.029],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36661.029],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36310.591],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36466.764],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 37784.492],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39587.677],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 40064.019],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39521.644],
[0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39932.276],
[0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39565.248],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 38943.163],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39504.118],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40317.800],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40798.577],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39962.571],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40194.479],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 41260.400],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39966.302],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 40847.316],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 39654.544],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 38914.815]])
# PS信号,先买后卖,交割期为2天(股票)1天(现金)
self.ps_res_bs21 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 208.333, 326.206, 5020.833, 0.000, 9761.111],
[351.119, 421.646, 0.000, 0.000, 555.556, 208.333, 326.206, 1116.389, 0.000, 9645.961],
[351.119, 421.646, 190.256, 0.000, 555.556, 208.333, 326.206, 151.793, 0.000, 9686.841],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9813.932],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9803.000],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9605.334],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9304.001],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8870.741],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8738.282],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8780.664],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9126.199],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9199.746],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9083.518],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9380.932],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9581.266],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 9927.154],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10059.283],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10281.669],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10093.263],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 0.000, 4453.525, 0.000, 10026.289],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9870.523],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9606.437],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9818.691],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9726.556],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9964.547],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 10053.449],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9917.440],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9889.495],
[117.098, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 6189.948, 0.000, 20064.523],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21124.484],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20827.077],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20396.124],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19856.445],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20714.156],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19971.485],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20733.948],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20938.903],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21660.772],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21265.298],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20684.378],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21754.770],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21775.215],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21801.488],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21235.427],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21466.714],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 20717.431],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21294.450],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 22100.247],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21802.552],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21593.608],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21840.028],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22907.725],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23325.945],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22291.942],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23053.050],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23260.084],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22176.244],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21859.297],
[1055.763, 740.051, 729.561, 272.237, 1706.748, 932.896, 0.000, 5221.105, 0.000, 31769.617],
[0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31389.961],
[0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31327.498],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32647.140],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32170.095],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32577.742],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 33905.444],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35414.492],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 36082.120],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35872.293],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 34558.132],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33778.138],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34213.578],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 35345.791],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34288.014],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34604.406],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34806.850],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34012.232],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33681.345],
[192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 33540.463],
[192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 34574.280],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 34516.781],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 35134.412],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 36266.530],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 37864.376],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38642.633],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38454.227],
[192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 38982.227],
[192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 39016.154],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38759.803],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39217.182],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39439.690],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39454.081],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39083.341],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38968.694],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39532.030],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38675.507],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 39013.741],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38497.668],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38042.410]])
# 模拟VS信号回测结果
# VS信号,先卖后买,交割期为0
self.vs_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 9925.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954.0000, 0.0000, 9785.0000],
[400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878.0000, 0.0000, 9666.0000],
[400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0.0000, 0.0000, 9731.0000],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9830.9270],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9785.8540],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9614.3412],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9303.1953],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8834.4398],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8712.7554],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8717.9507],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9079.1479],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9166.0276],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9023.6607],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9291.6864],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9411.6371],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357],
[0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20425.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20137.8405],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20711.3567],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21470.3891],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21902.9538],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20962.9538],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21833.5184],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21941.8169],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21278.5184],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21224.4700],
[1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160.0000, 0.0000, 31225.2119],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30894.5748],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30764.3811],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31815.5828],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31615.4215],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 32486.1394],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 33591.2847],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34056.5428],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34756.4863],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34445.5428],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34433.9541],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33870.4703],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34014.3010],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34680.5671],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33890.9945],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34004.6640],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34127.7768],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33421.1638],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33120.9057],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 32613.3171],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 33168.1558],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
33504.6236],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
33652.1318],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
34680.4867],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35557.5191],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35669.7128],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35211.4466],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35550.6079],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35711.6563],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35682.6079],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35880.8336],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36249.8740],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36071.6159],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35846.1562],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35773.3578],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36274.9465],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35739.3094],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 36135.0917],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35286.5835],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35081.3658]])
# VS信号,先买后卖,交割期为0
self.vs_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 10000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 9925],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954, 0.0000, 9785],
[400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878, 0.0000, 9666],
[400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0, 0.0000, 9731],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9830.927022],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9785.854043],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9614.341223],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9303.195266],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8834.439842],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8712.755424],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8717.95069],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9079.147929],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9166.027613],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9023.66075],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9291.686391],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9411.637081],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357],
[0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20425.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20137.84054],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20711.35674],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21470.38914],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21902.95375],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20962.95375],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21833.51837],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21941.81688],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21278.51837],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21224.46995],
[1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160, 0.0000, 31225.21185],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30894.57479],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30764.38113],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31815.5828],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31615.42154],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 32486.13941],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 33591.28466],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34056.54276],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34756.48633],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34445.54276],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34433.95412],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33870.47032],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34014.30104],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34680.56715],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33890.99452],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34004.66398],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34127.77683],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33421.1638],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33120.9057],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 32613.31706],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 33168.15579],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33504.62357],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33652.13176],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 34680.4867],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35557.51909],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35669.71276],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35211.44665],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35550.60792],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35711.65633],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35682.60792],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35880.83362],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36249.87403],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36071.61593],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35846.15615],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35773.35783],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36274.94647],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35739.30941],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 36135.09172],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35286.58353],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35081.36584]])
# VS信号,先卖后买,交割期为2天(股票)1天(现金)
self.vs_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000],
[400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000],
[400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736],
[0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470],
[1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]])
# VS信号,先买后卖,交割期为2天(股票)1天(现金)
self.vs_res_bs21 = np.array(
[[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000],
[400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000],
[400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736],
[0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470],
[1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]])
# Multi信号处理结果,先卖后买,使用卖出的现金买进,交割期为2天(股票)0天(现金)
self.multi_res = np.array(
[[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 9965.1867],
[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 10033.0650],
[0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10034.8513],
[0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10036.6376],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10019.3404],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10027.7062],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10030.1477],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10005.1399],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10002.5054],
[150.3516, 489.4532, 0.0000, 3765.8877, 0.0000, 9967.3860],
[75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10044.4059],
[75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10078.1430],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10138.2709],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10050.4768],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10300.0711],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10392.6970],
[75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10400.5282],
[75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10408.9220],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10376.5914],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10346.8794],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10364.7474],
[75.1758, 381.1856, 645.5014, 2459.1665, 0.0000, 10302.4570],
[18.7939, 381.1856, 645.5014, 3024.6764, 0.0000, 10747.4929],
[18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11150.9107],
[18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11125.2946],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11191.9956],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11145.7486],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11090.0768],
[132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11113.8733],
[132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11456.3281],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21983.7333],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22120.6165],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21654.5327],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21429.6550],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21912.5643],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22516.3100],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23169.0777],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23390.8080],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23743.3742],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 23210.7311],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24290.4375],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24335.3279],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18317.3553],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18023.4660],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24390.0527],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24389.6421],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24483.5953],
[0.0000, 559.9112, 0.0000, 18321.5674, 0.0000, 24486.1895],
[0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389],
[0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389]])
def test_loop_step_pt_sb00(self):
""" test loop step PT-signal, sell first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.pt_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[2][7],
own_amounts=self.pt_res_sb00[2][0:7],
available_cash=self.pt_res_sb00[2][7],
available_amounts=self.pt_res_sb00[2][0:7],
op=self.pt_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[2][7] + c_g + c_s
amounts = self.pt_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[30][7],
own_amounts=self.pt_res_sb00[30][0:7],
available_cash=self.pt_res_sb00[30][7],
available_amounts=self.pt_res_sb00[30][0:7],
op=self.pt_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[30][7] + c_g + c_s
amounts = self.pt_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[59][7] + 10000,
own_amounts=self.pt_res_sb00[59][0:7],
available_cash=self.pt_res_sb00[59][7] + 10000,
available_amounts=self.pt_res_sb00[59][0:7],
op=self.pt_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.pt_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[95][7],
own_amounts=self.pt_res_sb00[95][0:7],
available_cash=self.pt_res_sb00[95][7],
available_amounts=self.pt_res_sb00[95][0:7],
op=self.pt_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[96][7] + c_g + c_s
amounts = self.pt_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[97][0:7]))
def test_loop_step_pt_bs00(self):
""" test loop step PT-signal, buy first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.pt_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[2][7],
own_amounts=self.pt_res_bs00[2][0:7],
available_cash=self.pt_res_bs00[2][7],
available_amounts=self.pt_res_bs00[2][0:7],
op=self.pt_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[2][7] + c_g + c_s
amounts = self.pt_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[30][7],
own_amounts=self.pt_res_bs00[30][0:7],
available_cash=self.pt_res_bs00[30][7],
available_amounts=self.pt_res_bs00[30][0:7],
op=self.pt_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[30][7] + c_g + c_s
amounts = self.pt_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[59][7] + 10000,
own_amounts=self.pt_res_bs00[59][0:7],
available_cash=self.pt_res_bs00[59][7] + 10000,
available_amounts=self.pt_res_bs00[59][0:7],
op=self.pt_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.pt_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[95][7],
own_amounts=self.pt_res_bs00[95][0:7],
available_cash=self.pt_res_bs00[95][7],
available_amounts=self.pt_res_bs00[95][0:7],
op=self.pt_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[96][7] + c_g + c_s
amounts = self.pt_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[97][0:7]))
def test_loop_step_ps_sb00(self):
""" test loop step PS-signal, sell first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.ps_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[2][7],
own_amounts=self.ps_res_sb00[2][0:7],
available_cash=self.ps_res_sb00[2][7],
available_amounts=self.ps_res_sb00[2][0:7],
op=self.ps_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[2][7] + c_g + c_s
amounts = self.ps_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[30][7],
own_amounts=self.ps_res_sb00[30][0:7],
available_cash=self.ps_res_sb00[30][7],
available_amounts=self.ps_res_sb00[30][0:7],
op=self.ps_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[30][7] + c_g + c_s
amounts = self.ps_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[59][7] + 10000,
own_amounts=self.ps_res_sb00[59][0:7],
available_cash=self.ps_res_sb00[59][7] + 10000,
available_amounts=self.ps_res_sb00[59][0:7],
op=self.ps_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.ps_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[95][7],
own_amounts=self.ps_res_sb00[95][0:7],
available_cash=self.ps_res_sb00[95][7],
available_amounts=self.ps_res_sb00[95][0:7],
op=self.ps_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[96][7] + c_g + c_s
amounts = self.ps_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[97][0:7]))
def test_loop_step_ps_bs00(self):
""" test loop step PS-signal, buy first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.ps_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[2][7],
own_amounts=self.ps_res_sb00[2][0:7],
available_cash=self.ps_res_bs00[2][7],
available_amounts=self.ps_res_bs00[2][0:7],
op=self.ps_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[2][7] + c_g + c_s
amounts = self.ps_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[30][7],
own_amounts=self.ps_res_sb00[30][0:7],
available_cash=self.ps_res_bs00[30][7],
available_amounts=self.ps_res_bs00[30][0:7],
op=self.ps_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[30][7] + c_g + c_s
amounts = self.ps_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[59][7] + 10000,
own_amounts=self.ps_res_bs00[59][0:7],
available_cash=self.ps_res_bs00[59][7] + 10000,
available_amounts=self.ps_res_bs00[59][0:7],
op=self.ps_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.ps_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[95][7],
own_amounts=self.ps_res_bs00[95][0:7],
available_cash=self.ps_res_bs00[95][7],
available_amounts=self.ps_res_bs00[95][0:7],
op=self.ps_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[96][7] + c_g + c_s
amounts = self.ps_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[97][0:7]))
def test_loop_step_vs_sb00(self):
"""test loop step of Volume Signal type of signals"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.vs_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7750)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 500., 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[2][7],
own_amounts=self.vs_res_sb00[2][0:7],
available_cash=self.vs_res_sb00[2][7],
available_amounts=self.vs_res_sb00[2][0:7],
op=self.vs_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[2][7] + c_g + c_s
amounts = self.vs_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[30][7],
own_amounts=self.vs_res_sb00[30][0:7],
available_cash=self.vs_res_sb00[30][7],
available_amounts=self.vs_res_sb00[30][0:7],
op=self.vs_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[30][7] + c_g + c_s
amounts = self.vs_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[59][7] + 10000,
own_amounts=self.vs_res_sb00[59][0:7],
available_cash=self.vs_res_sb00[59][7] + 10000,
available_amounts=self.vs_res_sb00[59][0:7],
op=self.vs_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.vs_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[95][7],
own_amounts=self.vs_res_sb00[95][0:7],
available_cash=self.vs_res_sb00[95][7],
available_amounts=self.vs_res_sb00[95][0:7],
op=self.vs_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[96][7] + c_g + c_s
amounts = self.vs_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[97][0:7]))
def test_loop_step_vs_bs00(self):
"""test loop step of Volume Signal type of signals"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.vs_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7750)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 500., 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[2][7],
own_amounts=self.vs_res_bs00[2][0:7],
available_cash=self.vs_res_bs00[2][7],
available_amounts=self.vs_res_bs00[2][0:7],
op=self.vs_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[2][7] + c_g + c_s
amounts = self.vs_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[30][7],
own_amounts=self.vs_res_bs00[30][0:7],
available_cash=self.vs_res_bs00[30][7],
available_amounts=self.vs_res_bs00[30][0:7],
op=self.vs_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[30][7] + c_g + c_s
amounts = self.vs_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[59][7] + 10000,
own_amounts=self.vs_res_bs00[59][0:7],
available_cash=self.vs_res_bs00[59][7] + 10000,
available_amounts=self.vs_res_bs00[59][0:7],
op=self.vs_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.vs_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[95][7],
own_amounts=self.vs_res_bs00[95][0:7],
available_cash=self.vs_res_bs00[95][7],
available_amounts=self.vs_res_bs00[95][0:7],
op=self.vs_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[96][7] + c_g + c_s
amounts = self.vs_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[97][0:7]))
def test_loop_pt(self):
""" Test looping of PT proportion target signals, with
stock delivery delay = 0 days
cash delivery delay = 0 day
buy-sell sequence = sell first
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 0 days \n'
'cash delivery delay = 0 day \n'
'buy-sell sequence = sell first')
res = apply_loop(op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
# print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.pt_res_bs00, 2))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=0,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
# print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_pt_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.pt_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.pt_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_pt_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.pt_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.pt_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps(self):
""" Test looping of PS Proportion Signal type of signals
"""
res = apply_loop(op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.ps_res_bs00, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.ps_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.ps_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.ps_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.ps_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs(self):
""" Test looping of VS Volume Signal type of signals
"""
res = apply_loop(op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.vs_res_bs00, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.vs_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.vs_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.vs_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.vs_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_multiple_signal(self):
""" Test looping of PS Proportion Signal type of signals
"""
res = apply_loop(op_type=1,
op_list=self.multi_signal_hp,
history_list=self.multi_history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
max_cash_usage=True,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.multi_res[i]))
print()
self.assertTrue(np.allclose(res, self.multi_res, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=1,
op_list=self.multi_signal_hp,
history_list=self.multi_history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=0,
stock_delivery_period=2,
max_cash_usage=False,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
class TestStrategy(unittest.TestCase):
""" test all properties and methods of strategy base class"""
def setUp(self) -> None:
pass
class TestLSStrategy(RollingTiming):
"""用于test测试的简单多空蒙板生成策略。基于RollingTiming滚动择时方法生成
该策略有两个参数,N与Price
N用于计算OHLC价格平均值的N日简单移动平均,判断,当移动平均值大于等于Price时,状态为看多,否则为看空
"""
def __init__(self):
super().__init__(stg_name='test_LS',
stg_text='test long/short strategy',
par_count=2,
par_types='discr, conti',
par_bounds_or_enums=([1, 5], [2, 10]),
data_types='close, open, high, low',
data_freq='d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
n, price = params
h = hist_data.T
avg = (h[0] + h[1] + h[2] + h[3]) / 4
ma = sma(avg, n)
if ma[-1] < price:
return 0
else:
return 1
class TestSelStrategy(SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='high, low, close',
data_freq='d',
sample_freq='10d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = np.nanmean(hist_data, axis=(1, 2))
dif = (hist_data[:, :, 2] - np.roll(hist_data[:, :, 2], 1, 1))
dif_no_nan = np.array([arr[~np.isnan(arr)][-1] for arr in dif])
difper = dif_no_nan / avg
large2 = difper.argsort()[1:]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSelStrategyDiffTime(SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
# TODO: This strategy is not working, find out why and improve
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='close, low, open',
data_freq='d',
sample_freq='w',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = hist_data.mean(axis=1).squeeze()
difper = (hist_data[:, :, 0] - np.roll(hist_data[:, :, 0], 1))[:, -1] / avg
large2 = difper.argsort()[0:2]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSigStrategy(SimpleTiming):
"""用于Test测试的简单信号生成策略,基于SimpleTiming策略生成
策略有三个参数,第一个参数为ratio,另外两个参数为price1以及price2
ratio是k线形状比例的阈值,定义为abs((C-O)/(H-L))。当这个比值小于ratio阈值时,判断该K线为十字交叉(其实还有丁字等多种情形,但这里做了
简化处理。
信号生成的规则如下:
1,当某个K线出现十字交叉,且昨收与今收之差大于price1时,买入信号
2,当某个K线出现十字交叉,且昨收与今收之差小于price2时,卖出信号
"""
def __init__(self):
super().__init__(stg_name='test_SIG',
stg_text='test signal creation strategy',
par_count=3,
par_types='conti, conti, conti',
par_bounds_or_enums=([2, 10], [0, 3], [0, 3]),
data_types='close, open, high, low',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
r, price1, price2 = params
h = hist_data.T
ratio = np.abs((h[0] - h[1]) / (h[3] - h[2]))
diff = h[0] - np.roll(h[0], 1)
sig = np.where((ratio < r) & (diff > price1),
1,
np.where((ratio < r) & (diff < price2), -1, 0))
return sig
class MyStg(qt.RollingTiming):
"""自定义双均线择时策略策略"""
def __init__(self):
"""这个均线择时策略只有三个参数:
- SMA 慢速均线,所选择的股票
- FMA 快速均线
- M 边界值
策略的其他说明
"""
"""
必须初始化的关键策略参数清单:
"""
super().__init__(
pars=(20, 100, 0.01),
par_count=3,
par_types=['discr', 'discr', 'conti'],
par_bounds_or_enums=[(10, 250), (10, 250), (0.0, 0.5)],
stg_name='CUSTOM ROLLING TIMING STRATEGY',
stg_text='Customized Rolling Timing Strategy for Testing',
data_types='close',
window_length=100,
)
print(f'=====================\n====================\n'
f'custom strategy initialized, \npars: {self.pars}\npar_count:{self.par_count}\npar_types:'
f'{self.par_types}\n'
f'{self.info()}')
# 策略的具体实现代码写在策略的_realize()函数中
# 这个函数固定接受两个参数: hist_price代表特定组合的历史数据, params代表具体的策略参数
def _realize(self, hist_price, params):
"""策略的具体实现代码:
s:短均线计算日期;l:长均线计算日期;m:均线边界宽度;hesitate:均线跨越类型"""
f, s, m = params
# 临时处理措施,在策略实现层对传入的数据切片,后续应该在策略实现层以外事先对数据切片,保证传入的数据符合data_types参数即可
h = hist_price.T
# 计算长短均线的当前值
s_ma = qt.sma(h[0], s)[-1]
f_ma = qt.sma(h[0], f)[-1]
# 计算慢均线的停止边界,当快均线在停止边界范围内时,平仓,不发出买卖信号
s_ma_u = s_ma * (1 + m)
s_ma_l = s_ma * (1 - m)
# 根据观望模式在不同的点位产生Long/short/empty标记
if f_ma > s_ma_u: # 当快均线在慢均线停止范围以上时,持有多头头寸
return 1
elif s_ma_l < f_ma < s_ma_u: # 当均线在停止边界以内时,平仓
return 0
else: # f_ma < s_ma_l 当快均线在慢均线停止范围以下时,持有空头头寸
return -1
class TestOperator(unittest.TestCase):
"""全面测试Operator对象的所有功能。包括:
1, Strategy 参数的设置
2, 历史数据的获取与分配提取
3, 策略优化参数的批量设置和优化空间的获取
4, 策略输出值的正确性验证
5, 策略结果的混合结果确认
"""
def setUp(self):
"""prepare data for Operator test"""
print('start testing HistoryPanel object\n')
# build up test data: a 4-type, 3-share, 50-day matrix of prices that contains nan values in some days
# for some share_pool
# for share1:
data_rows = 50
share1_close = [10.04, 10, 10, 9.99, 9.97, 9.99, 10.03, 10.03, 10.06, 10.06, 10.11,
10.09, 10.07, 10.06, 10.09, 10.03, 10.03, 10.06, 10.08, 10, 9.99,
10.03, 10.03, 10.06, 10.03, 9.97, 9.94, 9.83, 9.77, 9.84, 9.91, 9.93,
9.96, 9.91, 9.91, 9.88, 9.91, 9.64, 9.56, 9.57, 9.55, 9.57, 9.61, 9.61,
9.55, 9.57, 9.63, 9.64, 9.65, 9.62]
share1_open = [10.02, 10, 9.98, 9.97, 9.99, 10.01, 10.04, 10.06, 10.06, 10.11,
10.11, 10.07, 10.06, 10.09, 10.03, 10.02, 10.06, 10.08, 9.99, 10,
10.03, 10.02, 10.06, 10.03, 9.97, 9.94, 9.83, 9.78, 9.77, 9.91, 9.92,
9.97, 9.91, 9.9, 9.88, 9.91, 9.63, 9.64, 9.57, 9.55, 9.58, 9.61, 9.62,
9.55, 9.57, 9.61, 9.63, 9.64, 9.61, 9.56]
share1_high = [10.07, 10, 10, 10, 10.03, 10.03, 10.04, 10.09, 10.1, 10.14, 10.11, 10.1,
10.09, 10.09, 10.1, 10.05, 10.07, 10.09, 10.1, 10, 10.04, 10.04, 10.06,
10.09, 10.05, 9.97, 9.96, 9.86, 9.77, 9.92, 9.94, 9.97, 9.97, 9.92, 9.92,
9.92, 9.93, 9.64, 9.58, 9.6, 9.58, 9.62, 9.62, 9.64, 9.59, 9.62, 9.63,
9.7, 9.66, 9.64]
share1_low = [9.99, 10, 9.97, 9.97, 9.97, 9.98, 9.99, 10.03, 10.03, 10.04, 10.11, 10.07,
10.05, 10.03, 10.03, 10.01, 9.99, 10.03, 9.95, 10, 9.95, 10, 10.01, 9.99,
9.96, 9.89, 9.83, 9.77, 9.77, 9.8, 9.9, 9.91, 9.89, 9.89, 9.87, 9.85, 9.6,
9.64, 9.53, 9.55, 9.54, 9.55, 9.58, 9.54, 9.53, 9.53, 9.63, 9.64, 9.59, 9.56]
# for share2:
share2_close = [9.68, 9.87, 9.86, 9.87, 9.79, 9.82, 9.8, 9.66, 9.62, 9.58, 9.69, 9.78, 9.75,
9.96, 9.9, 10.04, 10.06, 10.08, 10.24, 10.24, 10.24, 9.86, 10.13, 10.12,
10.1, 10.25, 10.24, 10.22, 10.75, 10.64, 10.56, 10.6, 10.42, 10.25, 10.24,
10.49, 10.57, 10.63, 10.48, 10.37, 10.96, 11.02, np.nan, np.nan, 10.88, 10.87, 11.01,
11.01, 11.58, 11.8]
share2_open = [9.88, 9.88, 9.89, 9.75, 9.74, 9.8, 9.62, 9.65, 9.58, 9.67, 9.81, 9.8, 10,
9.95, 10.1, 10.06, 10.14, 9.9, 10.2, 10.29, 9.86, 9.48, 10.01, 10.24, 10.26,
10.24, 10.12, 10.65, 10.64, 10.56, 10.42, 10.43, 10.29, 10.3, 10.44, 10.6,
10.67, 10.46, 10.39, 10.9, 11.01, 11.01, np.nan, np.nan, 10.82, 11.02, 10.96,
11.55, 11.74, 11.8]
share2_high = [9.91, 10.04, 9.93, 10.04, 9.84, 9.88, 9.99, 9.7, 9.67, 9.71, 9.85, 9.9, 10,
10.2, 10.11, 10.18, 10.21, 10.26, 10.38, 10.47, 10.42, 10.07, 10.24, 10.27,
10.38, 10.43, 10.39, 10.65, 10.84, 10.65, 10.73, 10.63, 10.51, 10.35, 10.46,
10.63, 10.74, 10.76, 10.54, 11.02, 11.12, 11.17, np.nan, np.nan, 10.92, 11.15,
11.11, 11.55, 11.95, 11.93]
share2_low = [9.63, 9.84, 9.81, 9.74, 9.67, 9.72, 9.57, 9.54, 9.51, 9.47, 9.68, 9.63, 9.75,
9.65, 9.9, 9.93, 10.03, 9.8, 10.14, 10.09, 9.78, 9.21, 9.11, 9.68, 10.05,
10.12, 9.89, 9.89, 10.59, 10.43, 10.34, 10.32, 10.21, 10.2, 10.18, 10.36,
10.51, 10.41, 10.32, 10.37, 10.87, 10.95, np.nan, np.nan, 10.65, 10.71, 10.75,
10.91, 11.31, 11.58]
# for share3:
share3_close = [6.64, 7.26, 7.03, 6.87, np.nan, 6.64, 6.85, 6.7, 6.39, 6.22, 5.92, 5.91, 6.11,
5.91, 6.23, 6.28, 6.28, 6.27, np.nan, 5.56, 5.67, 5.16, 5.69, 6.32, 6.14, 6.25,
5.79, 5.26, 5.05, 5.45, 6.06, 6.21, 5.69, 5.46, 6.02, 6.69, 7.43, 7.72, 8.16,
7.83, 8.7, 8.71, 8.88, 8.54, 8.87, 8.87, 8.18, 7.8, 7.97, 8.25]
share3_open = [7.26, 7, 6.88, 6.91, np.nan, 6.81, 6.63, 6.45, 6.16, 6.24, 5.96, 5.97, 5.96,
6.2, 6.35, 6.11, 6.37, 5.58, np.nan, 5.65, 5.19, 5.42, 6.3, 6.15, 6.05, 5.89,
5.22, 5.2, 5.07, 6.04, 6.12, 5.85, 5.67, 6.02, 6.04, 7.07, 7.64, 7.99, 7.59,
8.73, 8.72, 8.97, 8.58, 8.71, 8.77, 8.4, 7.95, 7.76, 8.25, 7.51]
share3_high = [7.41, 7.31, 7.14, 7, np.nan, 6.82, 6.96, 6.85, 6.5, 6.34, 6.04, 6.02, 6.12, 6.38,
6.43, 6.46, 6.43, 6.27, np.nan, 6.01, 5.67, 5.67, 6.35, 6.32, 6.43, 6.36, 5.79,
5.47, 5.65, 6.04, 6.14, 6.23, 5.83, 6.25, 6.27, 7.12, 7.82, 8.14, 8.27, 8.92,
8.76, 9.15, 8.9, 9.01, 9.16, 9, 8.27, 7.99, 8.33, 8.25]
share3_low = [6.53, 6.87, 6.83, 6.7, np.nan, 6.63, 6.57, 6.41, 6.15, 6.07, 5.89, 5.82, 5.73, 5.81,
6.1, 6.06, 6.16, 5.57, np.nan, 5.51, 5.19, 5.12, 5.69, 6.01, 5.97, 5.86, 5.18, 5.19,
4.96, 5.45, 5.84, 5.85, 5.28, 5.42, 6.02, 6.69, 7.28, 7.64, 7.25, 7.83, 8.41, 8.66,
8.53, 8.54, 8.73, 8.27, 7.95, 7.67, 7.8, 7.51]
# for sel_finance test
shares_eps = np.array([[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, 0.2, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[0.1, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.3, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.15, np.nan, np.nan],
[np.nan, 0.1, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.2, np.nan, np.nan],
[np.nan, 0.5, np.nan],
[0.4, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[0.9, np.nan, np.nan],
[np.nan, np.nan, 0.1]])
self.date_indices = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14', '2016-07-15', '2016-07-18',
'2016-07-19', '2016-07-20', '2016-07-21', '2016-07-22',
'2016-07-25', '2016-07-26', '2016-07-27', '2016-07-28',
'2016-07-29', '2016-08-01', '2016-08-02', '2016-08-03',
'2016-08-04', '2016-08-05', '2016-08-08', '2016-08-09',
'2016-08-10', '2016-08-11', '2016-08-12', '2016-08-15',
'2016-08-16', '2016-08-17', '2016-08-18', '2016-08-19',
'2016-08-22', '2016-08-23', '2016-08-24', '2016-08-25',
'2016-08-26', '2016-08-29', '2016-08-30', '2016-08-31',
'2016-09-01', '2016-09-02', '2016-09-05', '2016-09-06',
'2016-09-07', '2016-09-08']
self.shares = ['000010', '000030', '000039']
self.types = ['close', 'open', 'high', 'low']
self.sel_finance_tyeps = ['eps']
self.test_data_3D = np.zeros((3, data_rows, 4))
self.test_data_2D = np.zeros((data_rows, 3))
self.test_data_2D2 = np.zeros((data_rows, 4))
self.test_data_sel_finance = np.empty((3, data_rows, 1))
# Build up 3D data
self.test_data_3D[0, :, 0] = share1_close
self.test_data_3D[0, :, 1] = share1_open
self.test_data_3D[0, :, 2] = share1_high
self.test_data_3D[0, :, 3] = share1_low
self.test_data_3D[1, :, 0] = share2_close
self.test_data_3D[1, :, 1] = share2_open
self.test_data_3D[1, :, 2] = share2_high
self.test_data_3D[1, :, 3] = share2_low
self.test_data_3D[2, :, 0] = share3_close
self.test_data_3D[2, :, 1] = share3_open
self.test_data_3D[2, :, 2] = share3_high
self.test_data_3D[2, :, 3] = share3_low
self.test_data_sel_finance[:, :, 0] = shares_eps.T
self.hp1 = qt.HistoryPanel(values=self.test_data_3D,
levels=self.shares,
columns=self.types,
rows=self.date_indices)
print(f'in test Operator, history panel is created for timing test')
self.hp1.info()
self.hp2 = qt.HistoryPanel(values=self.test_data_sel_finance,
levels=self.shares,
columns=self.sel_finance_tyeps,
rows=self.date_indices)
print(f'in test_Operator, history panel is created for selection finance test:')
self.hp2.info()
self.op = qt.Operator(strategies='dma', signal_type='PS')
self.op2 = qt.Operator(strategies='dma, macd, trix')
def test_init(self):
""" test initialization of Operator class"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.signal_type, 'pt')
self.assertIsInstance(op.strategies, list)
self.assertEqual(len(op.strategies), 0)
op = qt.Operator('dma')
self.assertIsInstance(op, qt.Operator)
self.assertIsInstance(op.strategies, list)
self.assertIsInstance(op.strategies[0], TimingDMA)
op = qt.Operator('dma, macd')
self.assertIsInstance(op, qt.Operator)
op = qt.Operator(['dma', 'macd'])
self.assertIsInstance(op, qt.Operator)
def test_repr(self):
""" test basic representation of Opeartor class"""
op = qt.Operator()
self.assertEqual(op.__repr__(), 'Operator()')
op = qt.Operator('macd, dma, trix, random, avg_low')
self.assertEqual(op.__repr__(), 'Operator(macd, dma, trix, random, avg_low)')
self.assertEqual(op['dma'].__repr__(), 'Q-TIMING(DMA)')
self.assertEqual(op['macd'].__repr__(), 'R-TIMING(MACD)')
self.assertEqual(op['trix'].__repr__(), 'R-TIMING(TRIX)')
self.assertEqual(op['random'].__repr__(), 'SELECT(RANDOM)')
self.assertEqual(op['avg_low'].__repr__(), 'FACTOR(AVG LOW)')
def test_info(self):
"""Test information output of Operator"""
print(f'test printing information of operator object')
self.op.info()
def test_get_strategy_by_id(self):
""" test get_strategy_by_id()"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
self.assertEqual(op.strategy_ids, ['macd', 'dma', 'trix'])
self.assertIs(op.get_strategy_by_id('macd'), op.strategies[0])
self.assertIs(op.get_strategy_by_id(1), op.strategies[1])
self.assertIs(op.get_strategy_by_id('trix'), op.strategies[2])
def test_get_items(self):
""" test method __getitem__(), it should be the same as geting strategies by id"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
self.assertEqual(op.strategy_ids, ['macd', 'dma', 'trix'])
self.assertIs(op['macd'], op.strategies[0])
self.assertIs(op['trix'], op.strategies[2])
self.assertIs(op[1], op.strategies[1])
self.assertIs(op[3], op.strategies[2])
def test_get_strategies_by_price_type(self):
""" test get_strategies_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategies_by_price_type('close')
stg_open = op.get_strategies_by_price_type('open')
stg_high = op.get_strategies_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, [op.strategies[1]])
self.assertEqual(stg_open, [op.strategies[0], op.strategies[2]])
self.assertEqual(stg_high, [])
stg_wrong = op.get_strategies_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_get_strategy_count_by_price_type(self):
""" test get_strategy_count_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_count_by_price_type('close')
stg_open = op.get_strategy_count_by_price_type('open')
stg_high = op.get_strategy_count_by_price_type('high')
self.assertIsInstance(stg_close, int)
self.assertIsInstance(stg_open, int)
self.assertIsInstance(stg_high, int)
self.assertEqual(stg_close, 1)
self.assertEqual(stg_open, 2)
self.assertEqual(stg_high, 0)
stg_wrong = op.get_strategy_count_by_price_type(123)
self.assertIsInstance(stg_wrong, int)
self.assertEqual(stg_wrong, 0)
def test_get_strategy_names_by_price_type(self):
""" test get_strategy_names_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_names_by_price_type('close')
stg_open = op.get_strategy_names_by_price_type('open')
stg_high = op.get_strategy_names_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, ['DMA'])
self.assertEqual(stg_open, ['MACD', 'TRIX'])
self.assertEqual(stg_high, [])
stg_wrong = op.get_strategy_names_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_get_strategy_id_by_price_type(self):
""" test get_strategy_IDs_by_price_type"""
print('-----Test get strategy IDs by price type------\n')
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_id_by_price_type('close')
stg_open = op.get_strategy_id_by_price_type('open')
stg_high = op.get_strategy_id_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, ['dma'])
self.assertEqual(stg_open, ['macd', 'trix'])
self.assertEqual(stg_high, [])
op.add_strategies('dma, macd')
op.set_parameter('dma_1', price_type='open')
op.set_parameter('macd', price_type='open')
op.set_parameter('macd_1', price_type='high')
op.set_parameter('trix', price_type='close')
print(f'Operator strategy id:\n'
f'{op.strategies} on memory pos:\n'
f'{[id(stg) for stg in op.strategies]}')
stg_close = op.get_strategy_id_by_price_type('close')
stg_open = op.get_strategy_id_by_price_type('open')
stg_high = op.get_strategy_id_by_price_type('high')
stg_all = op.get_strategy_id_by_price_type()
print(f'All IDs of strategies:\n'
f'{stg_all}\n'
f'All price types of strategies:\n'
f'{[stg.price_type for stg in op.strategies]}')
self.assertEqual(stg_close, ['dma', 'trix'])
self.assertEqual(stg_open, ['macd', 'dma_1'])
self.assertEqual(stg_high, ['macd_1'])
stg_wrong = op.get_strategy_id_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_property_strategies(self):
""" test property strategies"""
print(f'created a new simple Operator with only one strategy: DMA')
op = qt.Operator('dma')
strategies = op.strategies
self.assertIsInstance(strategies, list)
op.info()
print(f'created the second simple Operator with three strategies')
self.assertIsInstance(strategies[0], TimingDMA)
op = qt.Operator('dma, macd, cdl')
strategies = op.strategies
op.info()
self.assertIsInstance(strategies, list)
self.assertIsInstance(strategies[0], TimingDMA)
self.assertIsInstance(strategies[1], TimingMACD)
self.assertIsInstance(strategies[2], TimingCDL)
def test_property_strategy_count(self):
""" test Property strategy_count, and the method get_strategy_count_by_price_type()"""
self.assertEqual(self.op.strategy_count, 1)
self.assertEqual(self.op2.strategy_count, 3)
self.assertEqual(self.op.get_strategy_count_by_price_type(), 1)
self.assertEqual(self.op2.get_strategy_count_by_price_type(), 3)
self.assertEqual(self.op.get_strategy_count_by_price_type('close'), 1)
self.assertEqual(self.op.get_strategy_count_by_price_type('high'), 0)
self.assertEqual(self.op2.get_strategy_count_by_price_type('close'), 3)
self.assertEqual(self.op2.get_strategy_count_by_price_type('open'), 0)
def test_property_strategy_names(self):
""" test property strategy_ids"""
op = qt.Operator('dma')
self.assertIsInstance(op.strategy_ids, list)
names = op.strategy_ids[0]
print(f'names are {names}')
self.assertEqual(names, 'dma')
op = qt.Operator('dma, macd, trix, cdl')
self.assertIsInstance(op.strategy_ids, list)
self.assertEqual(op.strategy_ids[0], 'dma')
self.assertEqual(op.strategy_ids[1], 'macd')
self.assertEqual(op.strategy_ids[2], 'trix')
self.assertEqual(op.strategy_ids[3], 'cdl')
op = qt.Operator('dma, macd, trix, dma, dma')
self.assertIsInstance(op.strategy_ids, list)
self.assertEqual(op.strategy_ids[0], 'dma')
self.assertEqual(op.strategy_ids[1], 'macd')
self.assertEqual(op.strategy_ids[2], 'trix')
self.assertEqual(op.strategy_ids[3], 'dma_1')
self.assertEqual(op.strategy_ids[4], 'dma_2')
def test_property_strategy_blenders(self):
""" test property strategy blenders including property setter,
and test the method get_blender()"""
print(f'------- Test property strategy blenders ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'pt')
# test adding blender to empty operator
op.strategy_blenders = '1 + 2'
op.signal_type = 'proportion signal'
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'ps')
op.add_strategy('dma')
op.strategy_blenders = '1+2'
self.assertEqual(op.strategy_blenders, {'close': ['+', '2', '1']})
op.clear_strategies()
self.assertEqual(op.strategy_blenders, {})
op.add_strategies('dma, trix, macd, dma')
op.set_parameter('dma', price_type='open')
op.set_parameter('trix', price_type='high')
op.set_blender('open', '1+2')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '2', '1'])
self.assertEqual(blender_close, None)
self.assertEqual(blender_high, None)
op.set_blender('open', '1+2+3')
op.set_blender('abc', '1+2+3')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
blender_abc = op.get_blender('abc')
self.assertEqual(op.strategy_blenders, {'open': ['+', '3', '+', '2', '1']})
self.assertEqual(blender_open, ['+', '3', '+', '2', '1'])
self.assertEqual(blender_close, None)
self.assertEqual(blender_high, None)
self.assertEqual(blender_abc, None)
op.set_blender('open', 123)
blender_open = op.get_blender('open')
self.assertEqual(blender_open, [])
op.set_blender(None, '1+1')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(op.bt_price_types, ['close', 'high', 'open'])
self.assertEqual(op.get_blender(), {'close': ['+', '1', '1'],
'open': ['+', '1', '1'],
'high': ['+', '1', '1']})
self.assertEqual(blender_open, ['+', '1', '1'])
self.assertEqual(blender_close, ['+', '1', '1'])
self.assertEqual(blender_high, ['+', '1', '1'])
op.set_blender(None, ['1+1', '3+4'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '3'])
self.assertEqual(blender_close, ['+', '1', '1'])
self.assertEqual(blender_high, ['+', '4', '3'])
self.assertEqual(op.view_blender('open'), '3+4')
self.assertEqual(op.view_blender('close'), '1+1')
self.assertEqual(op.view_blender('high'), '3+4')
op.strategy_blenders = (['1+2', '2*3', '1+4'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
self.assertEqual(op.view_blender('open'), '1+4')
self.assertEqual(op.view_blender('close'), '1+2')
self.assertEqual(op.view_blender('high'), '2*3')
# test error inputs:
# wrong type of price_type
self.assertRaises(TypeError, op.set_blender, 1, '1+3')
# price_type not found, no change is made
op.set_blender('volume', '1+3')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# price_type not valid, no change is made
op.set_blender('closee', '1+2')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# wrong type of blender, set to empty list
op.set_blender('open', 55)
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# wrong type of blender, set to empty list
op.set_blender('close', ['1+2'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, [])
self.assertEqual(blender_high, ['*', '3', '2'])
# can't parse blender, set to empty list
op.set_blender('high', 'a+bc')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, [])
self.assertEqual(blender_high, [])
def test_property_singal_type(self):
""" test property signal_type"""
op = qt.Operator()
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.signal_type, 'pt')
op = qt.Operator(signal_type='ps')
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator(signal_type='PS')
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator(signal_type='proportion signal')
self.assertEqual(op.signal_type, 'ps')
print(f'"pt" will be the default type if wrong value is given')
op = qt.Operator(signal_type='wrong value')
self.assertEqual(op.signal_type, 'pt')
print(f'test signal_type.setter')
op.signal_type = 'ps'
self.assertEqual(op.signal_type, 'ps')
print(f'test error raising')
self.assertRaises(TypeError, setattr, op, 'signal_type', 123)
self.assertRaises(ValueError, setattr, op, 'signal_type', 'wrong value')
def test_property_op_data_types(self):
""" test property op_data_types"""
op = qt.Operator()
self.assertIsInstance(op.op_data_types, list)
self.assertEqual(op.op_data_types, [])
op = qt.Operator('macd, dma, trix')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
op = qt.Operator('macd, cdl')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
self.assertEqual(dt[1], 'high')
self.assertEqual(dt[2], 'low')
self.assertEqual(dt[3], 'open')
self.assertEqual(dt, ['close', 'high', 'low', 'open'])
op.add_strategy('dma')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
self.assertEqual(dt[1], 'high')
self.assertEqual(dt[2], 'low')
self.assertEqual(dt[3], 'open')
self.assertEqual(dt, ['close', 'high', 'low', 'open'])
def test_property_op_data_type_count(self):
""" test property op_data_type_count"""
op = qt.Operator()
self.assertIsInstance(op.op_data_type_count, int)
self.assertEqual(op.op_data_type_count, 0)
op = qt.Operator('macd, dma, trix')
dtn = op.op_data_type_count
self.assertEqual(dtn, 1)
op = qt.Operator('macd, cdl')
dtn = op.op_data_type_count
self.assertEqual(dtn, 4)
op.add_strategy('dma')
dtn = op.op_data_type_count
self.assertEqual(dtn, 4)
def test_property_op_data_freq(self):
""" test property op_data_freq"""
op = qt.Operator()
self.assertIsInstance(op.op_data_freq, str)
self.assertEqual(len(op.op_data_freq), 0)
self.assertEqual(op.op_data_freq, '')
op = qt.Operator('macd, dma, trix')
dtf = op.op_data_freq
self.assertIsInstance(dtf, str)
self.assertEqual(dtf[0], 'd')
op.set_parameter('macd', data_freq='m')
dtf = op.op_data_freq
self.assertIsInstance(dtf, list)
self.assertEqual(len(dtf), 2)
self.assertEqual(dtf[0], 'd')
self.assertEqual(dtf[1], 'm')
def test_property_bt_price_types(self):
""" test property bt_price_types"""
print('------test property bt_price_tyeps-------')
op = qt.Operator()
self.assertIsInstance(op.bt_price_types, list)
self.assertEqual(len(op.bt_price_types), 0)
self.assertEqual(op.bt_price_types, [])
op = qt.Operator('macd, dma, trix')
btp = op.bt_price_types
self.assertIsInstance(btp, list)
self.assertEqual(btp[0], 'close')
op.set_parameter('macd', price_type='open')
btp = op.bt_price_types
btpc = op.bt_price_type_count
print(f'price_types are \n{btp}')
self.assertIsInstance(btp, list)
self.assertEqual(len(btp), 2)
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
op.add_strategies(['dma', 'macd'])
op.set_parameter('dma_1', price_type='high')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'high')
self.assertEqual(btp[2], 'open')
self.assertEqual(btpc, 3)
op.remove_strategy('dma_1')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
op.remove_strategy('macd_1')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
def test_property_op_data_type_list(self):
""" test property op_data_type_list"""
op = qt.Operator()
self.assertIsInstance(op.op_data_type_list, list)
self.assertEqual(len(op.op_data_type_list), 0)
self.assertEqual(op.op_data_type_list, [])
op = qt.Operator('macd, dma, trix, cdl')
ohd = op.op_data_type_list
print(f'ohd is {ohd}')
self.assertIsInstance(ohd, list)
self.assertEqual(ohd[0], ['close'])
op.set_parameter('macd', data_types='open, close')
ohd = op.op_data_type_list
print(f'ohd is {ohd}')
self.assertIsInstance(ohd, list)
self.assertEqual(len(ohd), 4)
self.assertEqual(ohd[0], ['open', 'close'])
self.assertEqual(ohd[1], ['close'])
self.assertEqual(ohd[2], ['close'])
self.assertEqual(ohd[3], ['open', 'high', 'low', 'close'])
def test_property_op_history_data(self):
""" Test this important function to get operation history data that shall be used in
signal generation
these data are stored in list of nd-arrays, each ndarray represents the data
that is needed for each and every strategy
"""
print(f'------- Test getting operation history data ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.op_history_data, {})
self.assertEqual(op.signal_type, 'pt')
def test_property_opt_space_par(self):
""" test property opt_space_par"""
print(f'-----test property opt_space_par--------:\n')
op = qt.Operator()
self.assertIsInstance(op.opt_space_par, tuple)
self.assertIsInstance(op.opt_space_par[0], list)
self.assertIsInstance(op.opt_space_par[1], list)
self.assertEqual(len(op.opt_space_par), 2)
self.assertEqual(op.opt_space_par, ([], []))
op = qt.Operator('macd, dma, trix, cdl')
osp = op.opt_space_par
print(f'before setting opt_tags opt_space_par is empty:\n'
f'osp is {osp}\n')
self.assertIsInstance(osp, tuple)
self.assertEqual(osp[0], [])
self.assertEqual(osp[1], [])
op.set_parameter('macd', opt_tag=1)
op.set_parameter('dma', opt_tag=1)
osp = op.opt_space_par
print(f'after setting opt_tags opt_space_par is not empty:\n'
f'osp is {osp}\n')
self.assertIsInstance(osp, tuple)
self.assertEqual(len(osp), 2)
self.assertIsInstance(osp[0], list)
self.assertIsInstance(osp[1], list)
self.assertEqual(len(osp[0]), 6)
self.assertEqual(len(osp[1]), 6)
self.assertEqual(osp[0], [(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
self.assertEqual(osp[1], ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
def test_property_opt_types(self):
""" test property opt_tags"""
print(f'-----test property opt_tags--------:\n')
op = qt.Operator()
self.assertIsInstance(op.opt_tags, list)
self.assertEqual(len(op.opt_tags), 0)
self.assertEqual(op.opt_tags, [])
op = qt.Operator('macd, dma, trix, cdl')
otp = op.opt_tags
print(f'before setting opt_tags opt_space_par is empty:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, list)
self.assertEqual(otp, [0, 0, 0, 0])
op.set_parameter('macd', opt_tag=1)
op.set_parameter('dma', opt_tag=1)
otp = op.opt_tags
print(f'after setting opt_tags opt_space_par is not empty:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, list)
self.assertEqual(len(otp), 4)
self.assertEqual(otp, [1, 1, 0, 0])
def test_property_max_window_length(self):
""" test property max_window_length"""
print(f'-----test property max window length--------:\n')
op = qt.Operator()
self.assertIsInstance(op.max_window_length, int)
self.assertEqual(op.max_window_length, 0)
op = qt.Operator('macd, dma, trix, cdl')
mwl = op.max_window_length
print(f'before setting window_length the value is 270:\n'
f'mwl is {mwl}\n')
self.assertIsInstance(mwl, int)
self.assertEqual(mwl, 270)
op.set_parameter('macd', window_length=300)
op.set_parameter('dma', window_length=350)
mwl = op.max_window_length
print(f'after setting window_length the value is new set value:\n'
f'mwl is {mwl}\n')
self.assertIsInstance(mwl, int)
self.assertEqual(mwl, 350)
def test_property_bt_price_type_count(self):
""" test property bt_price_type_count"""
print(f'-----test property bt_price_type_count--------:\n')
op = qt.Operator()
self.assertIsInstance(op.bt_price_type_count, int)
self.assertEqual(op.bt_price_type_count, 0)
op = qt.Operator('macd, dma, trix, cdl')
otp = op.bt_price_type_count
print(f'before setting price_type the price count is 1:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, int)
self.assertEqual(otp, 1)
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='open')
otp = op.bt_price_type_count
print(f'after setting price_type the price type count is 2:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, int)
self.assertEqual(otp, 2)
def test_property_set(self):
""" test all property setters:
setting following properties:
- strategy_blenders
- signal_type
other properties can not be set"""
print(f'------- Test setting properties ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'pt')
op.strategy_blenders = '1 + 2'
op.signal_type = 'proportion signal'
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator('macd, dma, trix, cdl')
# TODO: 修改set_parameter(),使下面的用法成立
# a_to_sell.set_parameter('dma, cdl', price_type='open')
op.set_parameter('dma', price_type='open')
op.set_parameter('cdl', price_type='open')
sb = op.strategy_blenders
st = op.signal_type
self.assertIsInstance(sb, dict)
print(f'before setting: strategy_blenders={sb}')
self.assertEqual(sb, {})
op.strategy_blenders = '1+2 * 3'
sb = op.strategy_blenders
print(f'after setting strategy_blender={sb}')
self.assertEqual(sb, {'close': ['+', '*', '3', '2', '1'],
'open': ['+', '*', '3', '2', '1']})
op.strategy_blenders = ['1+2', '3-4']
sb = op.strategy_blenders
print(f'after setting strategy_blender={sb}')
self.assertEqual(sb, {'close': ['+', '2', '1'],
'open': ['-', '4', '3']})
def test_operator_ready(self):
"""test the method ready of Operator"""
op = qt.Operator()
print(f'operator is ready? "{op.ready}"')
def test_operator_add_strategy(self):
"""test adding strategies to Operator"""
op = qt.Operator('dma, all, urgent')
self.assertIsInstance(op, qt.Operator)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[1], qt.SelectingAll)
self.assertIsInstance(op.strategies[2], qt.RiconUrgent)
self.assertIsInstance(op[0], qt.TimingDMA)
self.assertIsInstance(op[1], qt.SelectingAll)
self.assertIsInstance(op[2], qt.RiconUrgent)
self.assertIsInstance(op['dma'], qt.TimingDMA)
self.assertIsInstance(op['all'], qt.SelectingAll)
self.assertIsInstance(op['urgent'], qt.RiconUrgent)
self.assertEqual(op.strategy_count, 3)
print(f'test adding strategies into existing op')
print('test adding strategy by string')
op.add_strategy('macd')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[3], qt.TimingMACD)
self.assertEqual(op.strategy_count, 4)
op.add_strategy('random')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[4], qt.SelectingRandom)
self.assertEqual(op.strategy_count, 5)
test_ls = TestLSStrategy()
op.add_strategy(test_ls)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[5], TestLSStrategy)
self.assertEqual(op.strategy_count, 6)
print(f'Test different instance of objects are added to operator')
op.add_strategy('dma')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[6], qt.TimingDMA)
self.assertIsNot(op.strategies[0], op.strategies[6])
def test_operator_add_strategies(self):
""" etst adding multiple strategies to Operator"""
op = qt.Operator('dma, all, urgent')
self.assertEqual(op.strategy_count, 3)
print('test adding multiple strategies -- adding strategy by list of strings')
op.add_strategies(['dma', 'macd'])
self.assertEqual(op.strategy_count, 5)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[3], qt.TimingDMA)
self.assertIsInstance(op.strategies[4], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by comma separated strings')
op.add_strategies('dma, macd')
self.assertEqual(op.strategy_count, 7)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[5], qt.TimingDMA)
self.assertIsInstance(op.strategies[6], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by list of strategies')
op.add_strategies([qt.TimingDMA(), qt.TimingMACD()])
self.assertEqual(op.strategy_count, 9)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[7], qt.TimingDMA)
self.assertIsInstance(op.strategies[8], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by list of strategy and str')
op.add_strategies(['DMA', qt.TimingMACD()])
self.assertEqual(op.strategy_count, 11)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[9], qt.TimingDMA)
self.assertIsInstance(op.strategies[10], qt.TimingMACD)
self.assertIsNot(op.strategies[0], op.strategies[9])
self.assertIs(type(op.strategies[0]), type(op.strategies[9]))
print('test adding fault data')
self.assertRaises(AssertionError, op.add_strategies, 123)
self.assertRaises(AssertionError, op.add_strategies, None)
def test_opeartor_remove_strategy(self):
""" test method remove strategy"""
op = qt.Operator('dma, all, urgent')
op.add_strategies(['dma', 'macd'])
op.add_strategies(['DMA', TestLSStrategy()])
self.assertEqual(op.strategy_count, 7)
print('test removing strategies from Operator')
op.remove_strategy('dma')
self.assertEqual(op.strategy_count, 6)
self.assertEqual(op.strategy_ids, ['all', 'urgent', 'dma_1', 'macd', 'dma_2', 'custom'])
self.assertEqual(op.strategies[0], op['all'])
self.assertEqual(op.strategies[1], op['urgent'])
self.assertEqual(op.strategies[2], op['dma_1'])
self.assertEqual(op.strategies[3], op['macd'])
self.assertEqual(op.strategies[4], op['dma_2'])
self.assertEqual(op.strategies[5], op['custom'])
op.remove_strategy('dma_1')
self.assertEqual(op.strategy_count, 5)
self.assertEqual(op.strategy_ids, ['all', 'urgent', 'macd', 'dma_2', 'custom'])
self.assertEqual(op.strategies[0], op['all'])
self.assertEqual(op.strategies[1], op['urgent'])
self.assertEqual(op.strategies[2], op['macd'])
self.assertEqual(op.strategies[3], op['dma_2'])
self.assertEqual(op.strategies[4], op['custom'])
def test_opeartor_clear_strategies(self):
""" test operator clear strategies"""
op = qt.Operator('dma, all, urgent')
op.add_strategies(['dma', 'macd'])
op.add_strategies(['DMA', TestLSStrategy()])
self.assertEqual(op.strategy_count, 7)
print('test removing strategies from Operator')
op.clear_strategies()
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op.add_strategy('dma', pars=(12, 123, 25))
self.assertEqual(op.strategy_count, 1)
self.assertEqual(op.strategy_ids, ['dma'])
self.assertEqual(type(op.strategies[0]), TimingDMA)
self.assertEqual(op.strategies[0].pars, (12, 123, 25))
op.clear_strategies()
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
def test_operator_prepare_data(self):
"""test processes that related to prepare data"""
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(strategies=[test_ls, test_sel, test_sig])
too_early_cash = qt.CashPlan(dates='2016-01-01', amounts=10000)
early_cash = qt.CashPlan(dates='2016-07-01', amounts=10000)
on_spot_cash = qt.CashPlan(dates='2016-07-08', amounts=10000)
no_trade_cash = qt.CashPlan(dates='2016-07-08, 2016-07-30, 2016-08-11, 2016-09-03',
amounts=[10000, 10000, 10000, 10000])
# 在所有策略的参数都设置好之前调用prepare_data会发生assertion Error
self.assertRaises(AssertionError,
self.op.prepare_data,
hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
late_cash = qt.CashPlan(dates='2016-12-31', amounts=10000)
multi_cash = qt.CashPlan(dates='2016-07-08, 2016-08-08', amounts=[10000, 10000])
self.op.set_parameter(stg_id='custom',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.assertEqual(self.op.strategies[0].pars, {'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='custom_1',
pars=())
self.assertEqual(self.op.strategies[1].pars, ()),
self.op.set_parameter(stg_id='custom_2',
pars=(0.2, 0.02, -0.02))
self.assertEqual(self.op.strategies[2].pars, (0.2, 0.02, -0.02)),
self.op.prepare_data(hist_data=self.hp1,
cash_plan=on_spot_cash)
self.assertIsInstance(self.op._op_history_data, dict)
self.assertEqual(len(self.op._op_history_data), 3)
# test if automatic strategy blenders are set
self.assertEqual(self.op.strategy_blenders,
{'close': ['+', '2', '+', '1', '0']})
tim_hist_data = self.op._op_history_data['custom']
sel_hist_data = self.op._op_history_data['custom_1']
ric_hist_data = self.op._op_history_data['custom_2']
print(f'in test_prepare_data in TestOperator:')
print('selecting history data:\n', sel_hist_data)
print('originally passed data in correct sequence:\n', self.test_data_3D[:, 3:, [2, 3, 0]])
print('difference is \n', sel_hist_data - self.test_data_3D[:, :, [2, 3, 0]])
self.assertTrue(np.allclose(sel_hist_data, self.test_data_3D[:, :, [2, 3, 0]], equal_nan=True))
self.assertTrue(np.allclose(tim_hist_data, self.test_data_3D, equal_nan=True))
self.assertTrue(np.allclose(ric_hist_data, self.test_data_3D[:, 3:, :], equal_nan=True))
# raises Value Error if empty history panel is given
empty_hp = qt.HistoryPanel()
correct_hp = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 4)),
columns=self.types,
levels=self.shares,
rows=self.date_indices)
too_many_shares = qt.HistoryPanel(values=np.random.randint(10, size=(5, 50, 4)))
too_many_types = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 5)))
# raises Error when history panel is empty
self.assertRaises(ValueError,
self.op.prepare_data,
empty_hp,
on_spot_cash)
# raises Error when first investment date is too early
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
early_cash)
# raises Error when last investment date is too late
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
late_cash)
# raises Error when some of the investment dates are on no-trade-days
self.assertRaises(ValueError,
self.op.prepare_data,
correct_hp,
no_trade_cash)
# raises Error when number of shares in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_shares,
on_spot_cash)
# raises Error when too early cash investment date
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
too_early_cash)
# raises Error when number of d_types in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_types,
on_spot_cash)
# test the effect of data type sequence in strategy definition
def test_operator_generate(self):
""" Test signal generation process of operator objects
:return:
"""
# 使用test模块的自定义策略生成三种交易策略
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sel2 = TestSelStrategyDiffTime()
test_sig = TestSigStrategy()
print('--Test PT type signal generation--')
# 测试PT类型的信号生成:
# 创建一个Operator对象,信号类型为PT(比例目标信号)
# 这个Operator对象包含两个策略,分别为LS-Strategy以及Sel-Strategy,代表择时和选股策略
# 两个策略分别生成PT信号后混合成一个信号输出
self.op = qt.Operator(strategies=[test_ls, test_sel])
self.op.set_parameter(stg_id='custom',
pars={'000010': (5, 10.),
'000030': (5, 10.),
'000039': (5, 6.)})
self.op.set_parameter(stg_id=1,
pars=())
# self.a_to_sell.set_blender(blender='0+1+2')
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
print('--test operator information in normal mode--')
self.op.info()
self.assertEqual(self.op.strategy_blenders,
{'close': ['+', '1', '0']})
self.op.set_blender(None, '0*1')
self.assertEqual(self.op.strategy_blenders,
{'close': ['*', '1', '0']})
print('--test operation signal created in Proportional Target (PT) Mode--')
op_list = self.op.create_signal(hist_data=self.hp1)
self.assertTrue(isinstance(op_list, HistoryPanel))
backtest_price_types = op_list.htypes
self.assertEqual(backtest_price_types[0], 'close')
self.assertEqual(op_list.shape, (3, 45, 1))
reduced_op_list = op_list.values.squeeze().T
print(f'op_list created, it is a 3 share/45 days/1 htype array, to make comparison happen, \n'
f'it will be squeezed to a 2-d array to compare on share-wise:\n'
f'{reduced_op_list}')
target_op_values = np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0]])
self.assertTrue(np.allclose(target_op_values, reduced_op_list, equal_nan=True))
print('--Test two separate signal generation for different price types--')
# 测试两组PT类型的信号生成:
# 在Operator对象中增加两个SigStrategy策略,策略类型相同但是策略的参数不同,回测价格类型为"OPEN"
# Opeartor应该生成两组交易信号,分别用于"close"和"open"两中不同的价格类型
# 这里需要重新生成两个新的交易策略对象,否则在op的strategies列表中产生重复的对象引用,从而引起错误
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
self.op.add_strategies([test_ls, test_sel])
self.op.set_parameter(stg_id='custom_2',
price_type='open')
self.op.set_parameter(stg_id='custom_3',
price_type='open')
self.assertEqual(self.op['custom'].price_type, 'close')
self.assertEqual(self.op['custom_2'].price_type, 'open')
self.op.set_parameter(stg_id='custom_2',
pars={'000010': (5, 10.),
'000030': (5, 10.),
'000039': (5, 6.)})
self.op.set_parameter(stg_id='custom_3',
pars=())
self.op.set_blender(blender='0 or 1', price_type='open')
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
print('--test how operator information is printed out--')
self.op.info()
self.assertEqual(self.op.strategy_blenders,
{'close': ['*', '1', '0'],
'open': ['or', '1', '0']})
print('--test opeartion signal created in Proportional Target (PT) Mode--')
op_list = self.op.create_signal(hist_data=self.hp1)
self.assertTrue(isinstance(op_list, HistoryPanel))
signal_close = op_list['close'].squeeze().T
signal_open = op_list['open'].squeeze().T
self.assertEqual(signal_close.shape, (45, 3))
self.assertEqual(signal_open.shape, (45, 3))
target_op_close = np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0]])
target_op_open = np.array([[0.5, 0.5, 1.0],
[0.5, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 0.5, 0.0],
[1.0, 0.5, 0.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.5, 1.0, 0.0],
[0.5, 1.0, 0.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0]])
signal_pairs = [[list(sig1), list(sig2), sig1 == sig2]
for sig1, sig2
in zip(list(target_op_close), list(signal_close))]
print(f'signals side by side:\n'
f'{signal_pairs}')
self.assertTrue(np.allclose(target_op_close, signal_close, equal_nan=True))
signal_pairs = [[list(sig1), list(sig2), sig1 == sig2]
for sig1, sig2
in zip(list(target_op_open), list(signal_open))]
print(f'signals side by side:\n'
f'{signal_pairs}')
self.assertTrue(np.allclose(target_op_open, signal_open, equal_nan=True))
print('--Test two separate signal generation for different price types--')
# 更多测试集合
def test_stg_parameter_setting(self):
""" test setting parameters of strategies
test the method set_parameters
:return:
"""
op = qt.Operator(strategies='dma, all, urgent')
print(op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy in new op: \n{op.strategies[0].info()}')
# TODO: allow set_parameters to a list of strategies or str-listed strategies
# TODO: allow set_parameters to all strategies of specific bt price type
print(f'Set up strategy parameters by strategy id')
op.set_parameter('dma',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
op.set_parameter('all',
window_length=20)
op.set_parameter('all', price_type='high')
print(f'Can also set up strategy parameters by strategy index')
op.set_parameter(2, price_type='open')
op.set_parameter(2,
opt_tag=1,
pars=(9, -0.09),
window_length=10)
self.assertEqual(op.strategies[0].pars, (5, 10, 5))
self.assertEqual(op.strategies[0].par_boes, ((5, 10), (5, 15), (10, 15)))
self.assertEqual(op.strategies[2].pars, (9, -0.09))
self.assertEqual(op.op_data_freq, 'd')
self.assertEqual(op.op_data_types, ['close', 'high', 'open'])
self.assertEqual(op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (1, 40), (-0.5, 0.5)],
['discr', 'discr', 'discr', 'discr', 'conti']))
self.assertEqual(op.max_window_length, 20)
print(f'KeyError will be raised if wrong strategy id is given')
self.assertRaises(KeyError, op.set_parameter, stg_id='t-1', pars=(1, 2))
self.assertRaises(KeyError, op.set_parameter, stg_id='wrong_input', pars=(1, 2))
print(f'ValueError will be raised if parameter can be set')
self.assertRaises(ValueError, op.set_parameter, stg_id=0, pars=('wrong input', 'wrong input'))
# test blenders of different price types
# test setting blenders to different price types
# TODO: to allow operands like "and", "or", "not", "xor"
# a_to_sell.set_blender('close', '0 and 1 or 2')
# self.assertEqual(a_to_sell.get_blender('close'), 'str-1.2')
self.assertEqual(op.bt_price_types, ['close', 'high', 'open'])
op.set_blender('open', '0 & 1 | 2')
self.assertEqual(op.get_blender('open'), ['|', '2', '&', '1', '0'])
op.set_blender('high', '(0|1) & 2')
self.assertEqual(op.get_blender('high'), ['&', '2', '|', '1', '0'])
op.set_blender('close', '0 & 1 | 2')
self.assertEqual(op.get_blender(), {'close': ['|', '2', '&', '1', '0'],
'high': ['&', '2', '|', '1', '0'],
'open': ['|', '2', '&', '1', '0']})
self.assertEqual(op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (1, 40), (-0.5, 0.5)],
['discr', 'discr', 'discr', 'discr', 'conti']))
self.assertEqual(op.opt_tags, [1, 0, 1])
def test_signal_blend(self):
self.assertEqual(blender_parser('0 & 1'), ['&', '1', '0'])
self.assertEqual(blender_parser('0 or 1'), ['or', '1', '0'])
self.assertEqual(blender_parser('0 & 1 | 2'), ['|', '2', '&', '1', '0'])
blender = blender_parser('0 & 1 | 2')
self.assertEqual(signal_blend([1, 1, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 1, 0], blender), 1)
self.assertEqual(signal_blend([0, 1, 1], blender), 1)
self.assertEqual(signal_blend([0, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 0], blender), 0)
self.assertEqual(signal_blend([0, 1, 0], blender), 0)
self.assertEqual(signal_blend([0, 0, 0], blender), 0)
# parse: '0 & ( 1 | 2 )'
self.assertEqual(blender_parser('0 & ( 1 | 2 )'), ['&', '|', '2', '1', '0'])
blender = blender_parser('0 & ( 1 | 2 )')
self.assertEqual(signal_blend([1, 1, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 1, 0], blender), 1)
self.assertEqual(signal_blend([0, 1, 1], blender), 0)
self.assertEqual(signal_blend([0, 0, 1], blender), 0)
self.assertEqual(signal_blend([1, 0, 0], blender), 0)
self.assertEqual(signal_blend([0, 1, 0], blender), 0)
self.assertEqual(signal_blend([0, 0, 0], blender), 0)
# parse: '(1-2)/3 + 0'
self.assertEqual(blender_parser('(1-2)/3 + 0'), ['+', '0', '/', '3', '-', '2', '1'])
blender = blender_parser('(1-2)/3 + 0')
self.assertEqual(signal_blend([5, 9, 1, 4], blender), 7)
# pars: '(0*1/2*(3+4))+5*(6+7)-8'
self.assertEqual(blender_parser('(0*1/2*(3+4))+5*(6+7)-8'), ['-', '8', '+', '*', '+', '7', '6', '5', '*',
'+', '4', '3', '/', '2', '*', '1', '0'])
blender = blender_parser('(0*1/2*(3+4))+5*(6+7)-8')
self.assertEqual(signal_blend([1, 1, 1, 1, 1, 1, 1, 1, 1], blender), 3)
self.assertEqual(signal_blend([2, 1, 4, 3, 5, 5, 2, 2, 10], blender), 14)
# parse: '0/max(2,1,3 + 5)+4'
self.assertEqual(blender_parser('0/max(2,1,3 + 5)+4'), ['+', '4', '/', 'max(3)', '+', '5', '3', '1', '2', '0'])
blender = blender_parser('0/max(2,1,3 + 5)+4')
self.assertEqual(signal_blend([8.0, 4, 3, 5.0, 0.125, 5], blender), 0.925)
self.assertEqual(signal_blend([2, 1, 4, 3, 5, 5, 2, 2, 10], blender), 5.25)
print('speed test')
import time
st = time.time()
blender = blender_parser('0+max(1,2,(3+4)*5, max(6, (7+8)*9), 10-11) * (12+13)')
res = []
for i in range(10000):
res = signal_blend([1, 1, 2, 3, 4, 5, 3, 4, 5, 6, 7, 8, 2, 3], blender)
et = time.time()
print(f'total time for RPN processing: {et - st}, got result: {res}')
blender = blender_parser("0 + 1 * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 7)
blender = blender_parser("(0 + 1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
blender = blender_parser("(0+1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
blender = blender_parser("(0 + 1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
# TODO: 目前对于-(1+2)这样的表达式还无法处理
# self.a_to_sell.set_blender('selecting', "-(0 + 1) * 2")
# self.assertEqual(self.a_to_sell.signal_blend([1, 2, 3]), -9)
blender = blender_parser("(0-1)/2 + 3")
print(f'RPN of notation: "(0-1)/2 + 3" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, 2, 3, 0.0], blender), -0.33333333)
blender = blender_parser("0 + 1 / 2")
print(f'RPN of notation: "0 + 1 / 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, math.pi, 4], blender), 1.78539816)
blender = blender_parser("(0 + 1) / 2")
print(f'RPN of notation: "(0 + 1) / 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(signal_blend([1, 2, 3], blender), 1)
blender = blender_parser("(0 + 1 * 2) / 3")
print(f'RPN of notation: "(0 + 1 * 2) / 3" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([3, math.e, 10, 10], blender), 3.0182818284590454)
blender = blender_parser("0 / 1 * 2")
print(f'RPN of notation: "0 / 1 * 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(signal_blend([1, 3, 6], blender), 2)
blender = blender_parser("(0 - 1 + 2) * 4")
print(f'RPN of notation: "(0 - 1 + 2) * 4" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, 1, -1, np.nan, math.pi], blender), -3.141592653589793)
blender = blender_parser("0 * 1")
print(f'RPN of notation: "0 * 1" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([math.pi, math.e], blender), 8.539734222673566)
blender = blender_parser('abs(3-sqrt(2) / cos(1))')
print(f'RPN of notation: "abs(3-sqrt(2) / cos(1))" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['abs(1)', '-', '/', 'cos(1)', '1', 'sqrt(1)', '2', '3'])
blender = blender_parser('0/max(2,1,3 + 5)+4')
print(f'RPN of notation: "0/max(2,1,3 + 5)+4" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '4', '/', 'max(3)', '+', '5', '3', '1', '2', '0'])
blender = blender_parser('1 + sum(1,2,3+3, sum(1, 2) + 3) *5')
print(f'RPN of notation: "1 + sum(1,2,3+3, sum(1, 2) + 3) *5" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '*', '5', 'sum(4)', '+', '3', 'sum(2)', '2', '1',
'+', '3', '3', '2', '1', '1'])
blender = blender_parser('1+sum(1,2,(3+5)*4, sum(3, (4+5)*6), 7-8) * (2+3)')
print(f'RPN of notation: "1+sum(1,2,(3+5)*4, sum(3, (4+5)*6), 7-8) * (2+3)" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '*', '+', '3', '2', 'sum(5)', '-', '8', '7',
'sum(2)', '*', '6', '+', '5', '4', '3', '*', '4',
'+', '5', '3', '2', '1', '1'])
# TODO: ndarray type of signals to be tested:
def test_set_opt_par(self):
""" test setting opt pars in batch"""
print(f'--------- Testing setting Opt Pars: set_opt_par -------')
op = qt.Operator('dma, random, crossline')
op.set_parameter('dma',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.strategies[0].pars, (5, 10, 5))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (35, 120, 10, 'buy'))
self.assertEqual(op.opt_tags, [1, 0, 0])
op.set_opt_par((5, 12, 9))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (35, 120, 10, 'buy'))
op.set_parameter('crossline',
pars=(5, 10, 5, 'sell'),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15), ('buy', 'sell', 'none')),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.opt_tags, [1, 0, 1])
op.set_opt_par((5, 12, 9, 8, 26, 9, 'buy'))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
op.set_opt_par((9, 200, 155, 8, 26, 9, 'buy', 5, 12, 9))
self.assertEqual(op.strategies[0].pars, (9, 200, 155))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
# test set_opt_par when opt_tag is set to be 2 (enumerate type of parameters)
op.set_parameter('crossline',
pars=(5, 10, 5, 'sell'),
opt_tag=2,
par_boes=((5, 10), (5, 15), (10, 15), ('buy', 'sell', 'none')),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.opt_tags, [1, 0, 2])
self.assertEqual(op.strategies[0].pars, (9, 200, 155))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (5, 10, 5, 'sell'))
op.set_opt_par((5, 12, 9, (8, 26, 9, 'buy')))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
# Test Errors
# Not enough values for parameter
op.set_parameter('crossline', opt_tag=1)
self.assertRaises(ValueError, op.set_opt_par, (5, 12, 9, 8))
# wrong type of input
self.assertRaises(AssertionError, op.set_opt_par, [5, 12, 9, 7, 15, 12, 'sell'])
def test_stg_attribute_get_and_set(self):
self.stg = qt.TimingCrossline()
self.stg_type = 'R-TIMING'
self.stg_name = "CROSSLINE"
self.stg_text = 'Moving average crossline strategy, determine long/short position according to the cross ' \
'point' \
' of long and short term moving average prices '
self.pars = (35, 120, 10, 'buy')
self.par_boes = [(10, 250), (10, 250), (1, 100), ('buy', 'sell', 'none')]
self.par_count = 4
self.par_types = ['discr', 'discr', 'conti', 'enum']
self.opt_tag = 0
self.data_types = ['close']
self.data_freq = 'd'
self.sample_freq = 'd'
self.window_length = 270
self.assertEqual(self.stg.stg_type, self.stg_type)
self.assertEqual(self.stg.stg_name, self.stg_name)
self.assertEqual(self.stg.stg_text, self.stg_text)
self.assertEqual(self.stg.pars, self.pars)
self.assertEqual(self.stg.par_types, self.par_types)
self.assertEqual(self.stg.par_boes, self.par_boes)
self.assertEqual(self.stg.par_count, self.par_count)
self.assertEqual(self.stg.opt_tag, self.opt_tag)
self.assertEqual(self.stg.data_freq, self.data_freq)
self.assertEqual(self.stg.sample_freq, self.sample_freq)
self.assertEqual(self.stg.data_types, self.data_types)
self.assertEqual(self.stg.window_length, self.window_length)
self.stg.stg_name = 'NEW NAME'
self.stg.stg_text = 'NEW TEXT'
self.assertEqual(self.stg.stg_name, 'NEW NAME')
self.assertEqual(self.stg.stg_text, 'NEW TEXT')
self.stg.pars = (1, 2, 3, 4)
self.assertEqual(self.stg.pars, (1, 2, 3, 4))
self.stg.par_count = 3
self.assertEqual(self.stg.par_count, 3)
self.stg.par_boes = [(1, 10), (1, 10), (1, 10), (1, 10)]
self.assertEqual(self.stg.par_boes, [(1, 10), (1, 10), (1, 10), (1, 10)])
self.stg.par_types = ['conti', 'conti', 'discr', 'enum']
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'enum'])
self.stg.par_types = 'conti, conti, discr, conti'
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'conti'])
self.stg.data_types = 'close, open'
self.assertEqual(self.stg.data_types, ['close', 'open'])
self.stg.data_types = ['close', 'high', 'low']
self.assertEqual(self.stg.data_types, ['close', 'high', 'low'])
self.stg.data_freq = 'w'
self.assertEqual(self.stg.data_freq, 'w')
self.stg.window_length = 300
self.assertEqual(self.stg.window_length, 300)
def test_rolling_timing(self):
stg = TestLSStrategy()
stg_pars = {'000100': (5, 10),
'000200': (5, 10),
'000300': (5, 6)}
stg.set_pars(stg_pars)
history_data = self.hp1.values
output = stg.generate(hist_data=history_data)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
lsmask = np.array([[0., 0., 1.],
[0., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 0.],
[1., 1., 0.],
[1., 1., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.]])
# TODO: Issue to be solved: the np.nan value are converted to 0 in the lsmask,这样做可能会有意想不到的后果
# TODO: 需要解决nan值的问题
self.assertEqual(output.shape, lsmask.shape)
self.assertTrue(np.allclose(output, lsmask, equal_nan=True))
def test_sel_timing(self):
stg = TestSelStrategy()
stg_pars = ()
stg.set_pars(stg_pars)
history_data = self.hp1['high, low, close', :, :]
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
def test_simple_timing(self):
stg = TestSigStrategy()
stg_pars = (0.2, 0.02, -0.02)
stg.set_pars(stg_pars)
history_data = self.hp1['close, open, high, low', :, 3:50]
output = stg.generate(hist_data=history_data, shares=self.shares, dates=self.date_indices)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
sigmatrix = np.array([[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0]])
side_by_side_array = np.array([[i, out_line, sig_line]
for
i, out_line, sig_line
in zip(range(len(output)), output, sigmatrix)])
print(f'output and signal matrix lined up side by side is \n'
f'{side_by_side_array}')
self.assertEqual(sigmatrix.shape, output.shape)
self.assertTrue(np.allclose(output, sigmatrix))
def test_sel_finance(self):
"""Test selecting_finance strategy, test all built-in strategy parameters"""
stg = SelectingFinanceIndicator()
stg_pars = (False, 'even', 'greater', 0, 0, 0.67)
stg.set_pars(stg_pars)
stg.window_length = 5
stg.data_freq = 'd'
stg.sample_freq = '10d'
stg.sort_ascending = False
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg._poq = 0.67
history_data = self.hp2.values
print(f'Start to test financial selection parameter {stg_pars}')
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get mininum factor
stg_pars = (True, 'even', 'less', 1, 1, 0.67)
stg.sort_ascending = True
stg.condition = 'less'
stg.lbound = 1
stg.ubound = 1
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'linear', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'linear'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'proportion', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'proportion'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
# test single factor, get max factor in linear weight, threshold 0.2
stg_pars = (False, 'even', 'greater', 0.2, 0.2, 0.67)
stg.sort_ascending = False
stg.weighting = 'even'
stg.condition = 'greater'
stg.lbound = 0.2
stg.ubound = 0.2
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
def test_tokenizer(self):
self.assertListEqual(_exp_to_token('1+1'),
['1', '+', '1'])
print(_exp_to_token('1+1'))
self.assertListEqual(_exp_to_token('1 & 1'),
['1', '&', '1'])
print(_exp_to_token('1&1'))
self.assertListEqual(_exp_to_token('1 and 1'),
['1', 'and', '1'])
print(_exp_to_token('1 and 1'))
self.assertListEqual(_exp_to_token('1 or 1'),
['1', 'or', '1'])
print(_exp_to_token('1 or 1'))
self.assertListEqual(_exp_to_token('(1 - 1 + -1) * pi'),
['(', '1', '-', '1', '+', '-1', ')', '*', 'pi'])
print(_exp_to_token('(1 - 1 + -1) * pi'))
self.assertListEqual(_exp_to_token('abs(5-sqrt(2) / cos(pi))'),
['abs(', '5', '-', 'sqrt(', '2', ')', '/', 'cos(', 'pi', ')', ')'])
print(_exp_to_token('abs(5-sqrt(2) / cos(pi))'))
self.assertListEqual(_exp_to_token('sin(pi) + 2.14'),
['sin(', 'pi', ')', '+', '2.14'])
print(_exp_to_token('sin(pi) + 2.14'))
self.assertListEqual(_exp_to_token('(1-2)/3.0 + 0.0000'),
['(', '1', '-', '2', ')', '/', '3.0', '+', '0.0000'])
print(_exp_to_token('(1-2)/3.0 + 0.0000'))
self.assertListEqual(_exp_to_token('-(1. + .2) * max(1, 3, 5)'),
['-', '(', '1.', '+', '.2', ')', '*', 'max(', '1', ',', '3', ',', '5', ')'])
print(_exp_to_token('-(1. + .2) * max(1, 3, 5)'))
self.assertListEqual(_exp_to_token('(x + e * 10) / 10'),
['(', 'x', '+', 'e', '*', '10', ')', '/', '10'])
print(_exp_to_token('(x + e * 10) / 10'))
self.assertListEqual(_exp_to_token('8.2/((-.1+abs3(3,4,5))*0.12)'),
['8.2', '/', '(', '(', '-.1', '+', 'abs3(', '3', ',', '4', ',', '5', ')', ')', '*', '0.12',
')'])
print(_exp_to_token('8.2/((-.1+abs3(3,4,5))*0.12)'))
self.assertListEqual(_exp_to_token('8.2/abs3(3,4,25.34 + 5)*0.12'),
['8.2', '/', 'abs3(', '3', ',', '4', ',', '25.34', '+', '5', ')', '*', '0.12'])
print(_exp_to_token('8.2/abs3(3,4,25.34 + 5)*0.12'))
class TestLog(unittest.TestCase):
def test_init(self):
pass
class TestConfig(unittest.TestCase):
"""测试Config对象以及QT_CONFIG变量的设置和获取值"""
def test_init(self):
pass
def test_invest(self):
pass
def test_pars_string_to_type(self):
_parse_string_kwargs('000300', 'asset_pool', _valid_qt_kwargs())
class TestHistoryPanel(unittest.TestCase):
def setUp(self):
print('start testing HistoryPanel object\n')
self.data = np.random.randint(10, size=(5, 10, 4))
self.index = pd.date_range(start='20200101', freq='d', periods=10)
self.index2 = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14']
self.index3 = '2016-07-01, 2016-07-04, 2016-07-05, 2016-07-06, 2016-07-07, ' \
'2016-07-08, 2016-07-11, 2016-07-12, 2016-07-13, 2016-07-14'
self.shares = '000100,000101,000102,000103,000104'
self.htypes = 'close,open,high,low'
self.data2 = np.random.randint(10, size=(10, 5))
self.data3 = np.random.randint(10, size=(10, 4))
self.data4 = np.random.randint(10, size=(10))
self.hp = qt.HistoryPanel(values=self.data, levels=self.shares, columns=self.htypes, rows=self.index)
self.hp2 = qt.HistoryPanel(values=self.data2, levels=self.shares, columns='close', rows=self.index)
self.hp3 = qt.HistoryPanel(values=self.data3, levels='000100', columns=self.htypes, rows=self.index2)
self.hp4 = qt.HistoryPanel(values=self.data4, levels='000100', columns='close', rows=self.index3)
self.hp5 = qt.HistoryPanel(values=self.data)
self.hp6 = qt.HistoryPanel(values=self.data, levels=self.shares, rows=self.index3)
def test_properties(self):
""" test all properties of HistoryPanel
"""
self.assertFalse(self.hp.is_empty)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.shape, (5, 10, 4))
self.assertSequenceEqual(self.hp.htypes, ['close', 'open', 'high', 'low'])
self.assertSequenceEqual(self.hp.shares, ['000100', '000101', '000102', '000103', '000104'])
self.assertSequenceEqual(list(self.hp.hdates), list(self.index))
self.assertDictEqual(self.hp.columns, {'close': 0, 'open': 1, 'high': 2, 'low': 3})
self.assertDictEqual(self.hp.levels, {'000100': 0, '000101': 1, '000102': 2, '000103': 3, '000104': 4})
row_dict = {Timestamp('2020-01-01 00:00:00', freq='D'): 0,
Timestamp('2020-01-02 00:00:00', freq='D'): 1,
Timestamp('2020-01-03 00:00:00', freq='D'): 2,
Timestamp('2020-01-04 00:00:00', freq='D'): 3,
Timestamp('2020-01-05 00:00:00', freq='D'): 4,
Timestamp('2020-01-06 00:00:00', freq='D'): 5,
Timestamp('2020-01-07 00:00:00', freq='D'): 6,
Timestamp('2020-01-08 00:00:00', freq='D'): 7,
Timestamp('2020-01-09 00:00:00', freq='D'): 8,
Timestamp('2020-01-10 00:00:00', freq='D'): 9}
self.assertDictEqual(self.hp.rows, row_dict)
def test_len(self):
""" test the function len(HistoryPanel)
:return:
"""
self.assertEqual(len(self.hp), 10)
def test_empty_history_panel(self):
"""测试空HP或者特殊HP如维度标签为纯数字的HP"""
test_hp = qt.HistoryPanel(self.data)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
self.assertEqual(test_hp.level_count, 5)
self.assertEqual(test_hp.row_count, 10)
self.assertEqual(test_hp.column_count, 4)
self.assertEqual(test_hp.shares, list(range(5)))
self.assertEqual(test_hp.hdates, list(pd.date_range(start='20200730', periods=10, freq='d')))
self.assertEqual(test_hp.htypes, list(range(4)))
self.assertTrue(np.allclose(test_hp.values, self.data))
print(f'shares: {test_hp.shares}\nhtypes: {test_hp.htypes}')
print(test_hp)
# HistoryPanel should be empty if no value is given
empty_hp = qt.HistoryPanel()
self.assertTrue(empty_hp.is_empty)
self.assertIsInstance(empty_hp, qt.HistoryPanel)
self.assertEqual(empty_hp.shape[0], 0)
self.assertEqual(empty_hp.shape[1], 0)
self.assertEqual(empty_hp.shape[2], 0)
self.assertEqual(empty_hp.level_count, 0)
self.assertEqual(empty_hp.row_count, 0)
self.assertEqual(empty_hp.column_count, 0)
# HistoryPanel should also be empty if empty value (np.array([])) is given
empty_hp = qt.HistoryPanel(np.empty((5, 0, 4)), levels=self.shares, columns=self.htypes)
self.assertTrue(empty_hp.is_empty)
self.assertIsInstance(empty_hp, qt.HistoryPanel)
self.assertEqual(empty_hp.shape[0], 0)
self.assertEqual(empty_hp.shape[1], 0)
self.assertEqual(empty_hp.shape[2], 0)
self.assertEqual(empty_hp.level_count, 0)
self.assertEqual(empty_hp.row_count, 0)
self.assertEqual(empty_hp.column_count, 0)
def test_create_history_panel(self):
""" test the creation of a HistoryPanel object by passing all data explicitly
"""
self.assertIsInstance(self.hp, qt.HistoryPanel)
self.assertEqual(self.hp.shape[0], 5)
self.assertEqual(self.hp.shape[1], 10)
self.assertEqual(self.hp.shape[2], 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(list(self.hp.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp2, qt.HistoryPanel)
self.assertEqual(self.hp2.shape[0], 5)
self.assertEqual(self.hp2.shape[1], 10)
self.assertEqual(self.hp2.shape[2], 1)
self.assertEqual(self.hp2.level_count, 5)
self.assertEqual(self.hp2.row_count, 10)
self.assertEqual(self.hp2.column_count, 1)
self.assertEqual(list(self.hp2.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp2.columns.keys()), ['close'])
self.assertEqual(list(self.hp2.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp3, qt.HistoryPanel)
self.assertEqual(self.hp3.shape[0], 1)
self.assertEqual(self.hp3.shape[1], 10)
self.assertEqual(self.hp3.shape[2], 4)
self.assertEqual(self.hp3.level_count, 1)
self.assertEqual(self.hp3.row_count, 10)
self.assertEqual(self.hp3.column_count, 4)
self.assertEqual(list(self.hp3.levels.keys()), ['000100'])
self.assertEqual(list(self.hp3.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp3.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.assertIsInstance(self.hp4, qt.HistoryPanel)
self.assertEqual(self.hp4.shape[0], 1)
self.assertEqual(self.hp4.shape[1], 10)
self.assertEqual(self.hp4.shape[2], 1)
self.assertEqual(self.hp4.level_count, 1)
self.assertEqual(self.hp4.row_count, 10)
self.assertEqual(self.hp4.column_count, 1)
self.assertEqual(list(self.hp4.levels.keys()), ['000100'])
self.assertEqual(list(self.hp4.columns.keys()), ['close'])
self.assertEqual(list(self.hp4.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.hp5.info()
self.assertIsInstance(self.hp5, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp5.values, self.data))
self.assertEqual(self.hp5.shape[0], 5)
self.assertEqual(self.hp5.shape[1], 10)
self.assertEqual(self.hp5.shape[2], 4)
self.assertEqual(self.hp5.level_count, 5)
self.assertEqual(self.hp5.row_count, 10)
self.assertEqual(self.hp5.column_count, 4)
self.assertEqual(list(self.hp5.levels.keys()), [0, 1, 2, 3, 4])
self.assertEqual(list(self.hp5.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp5.rows.keys())[0], pd.Timestamp('2020-07-30'))
self.hp6.info()
self.assertIsInstance(self.hp6, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp6.values, self.data))
self.assertEqual(self.hp6.shape[0], 5)
self.assertEqual(self.hp6.shape[1], 10)
self.assertEqual(self.hp6.shape[2], 4)
self.assertEqual(self.hp6.level_count, 5)
self.assertEqual(self.hp6.row_count, 10)
self.assertEqual(self.hp6.column_count, 4)
self.assertEqual(list(self.hp6.levels.keys()), ['000100', '000101', '000102', '000103', '000104'])
self.assertEqual(list(self.hp6.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp6.rows.keys())[0], pd.Timestamp('2016-07-01'))
print('test creating HistoryPanel with very limited data')
print('test creating HistoryPanel with 2D data')
temp_data = np.random.randint(10, size=(7, 3)).astype('float')
temp_hp = qt.HistoryPanel(temp_data)
# Error testing during HistoryPanel creating
# shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data,
levels=self.shares, columns='close', rows=self.index)
# valus is not np.ndarray
self.assertRaises(TypeError,
qt.HistoryPanel,
list(self.data))
# dimension/shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data2,
levels='000100', columns=self.htypes, rows=self.index)
# value dimension over 3
self.assertRaises(AssertionError,
qt.HistoryPanel,
np.random.randint(10, size=(5, 10, 4, 2)))
# lebel value not valid
self.assertRaises(ValueError,
qt.HistoryPanel,
self.data2,
levels=self.shares, columns='close',
rows='a,b,c,d,e,f,g,h,i,j')
def test_history_panel_slicing(self):
"""测试HistoryPanel的各种切片方法
包括通过标签名称切片,通过数字切片,通过逗号分隔的标签名称切片,通过冒号分隔的标签名称切片等切片方式"""
self.assertTrue(np.allclose(self.hp['close'], self.data[:, :, 0:1]))
self.assertTrue(np.allclose(self.hp['close,open'], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp[['close', 'open']], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp['close:high'], self.data[:, :, 0:3]))
self.assertTrue(np.allclose(self.hp['close,high'], self.data[:, :, [0, 2]]))
self.assertTrue(np.allclose(self.hp[:, '000100'], self.data[0:1, :, ]))
self.assertTrue(np.allclose(self.hp[:, '000100,000101'], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, ['000100', '000101']], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, '000100:000102'], self.data[0:3, :]))
self.assertTrue(np.allclose(self.hp[:, '000100,000102'], self.data[[0, 2], :]))
self.assertTrue(np.allclose(self.hp['close,open', '000100,000102'], self.data[[0, 2], :, 0:2]))
print('start testing HistoryPanel')
data = np.random.randint(10, size=(10, 5))
# index = pd.date_range(start='20200101', freq='d', periods=10)
shares = '000100,000101,000102,000103,000104'
dtypes = 'close'
df = pd.DataFrame(data)
print('=========================\nTesting HistoryPanel creation from DataFrame')
hp = qt.dataframe_to_hp(df=df, shares=shares, htypes=dtypes)
hp.info()
hp = qt.dataframe_to_hp(df=df, shares='000100', htypes='close, open, high, low, middle', column_type='htypes')
hp.info()
print('=========================\nTesting HistoryPanel creation from initialization')
data = np.random.randint(10, size=(5, 10, 4)).astype('float')
index = pd.date_range(start='20200101', freq='d', periods=10)
dtypes = 'close, open, high,low'
data[0, [5, 6, 9], [0, 1, 3]] = np.nan
data[1:4, [4, 7, 6, 2], [1, 1, 3, 0]] = np.nan
data[4:5, [2, 9, 1, 2], [0, 3, 2, 1]] = np.nan
hp = qt.HistoryPanel(data, levels=shares, columns=dtypes, rows=index)
hp.info()
print('==========================\n输出close类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close', :, :], data[:, :, 0:1], equal_nan=True))
print(f'==========================\n输出close和open类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1], :, :], data[:, :, 0:2], equal_nan=True))
print(f'==========================\n输出第一只股票的所有类型历史数据\n')
self.assertTrue(np.allclose(hp[:, [0], :], data[0:1, :, :], equal_nan=True))
print('==========================\n输出第0、1、2个htype对应的所有股票全部历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1, 2]], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[['close', 'high']], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出0、1两个htype的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1]], data[:, :, 0:2], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close,high'], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出close起到high止的三个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close:high'], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出0、1、3三个股票的全部历史数据\n')
self.assertTrue(np.allclose(hp[:, [0, 1, 3]], data[[0, 1, 3], :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, ['000100', '000102']], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出0、1、2三个股票的历史数据\n', hp[:, 0: 3])
self.assertTrue(np.allclose(hp[:, 0: 3], data[0:3, :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100, 000102'], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出所有股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, :, 0:8], data[:, 0:8, :], equal_nan=True))
print('==========================\n输出000100股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100', 0:8], data[0, 0:8, :], equal_nan=True))
print('==========================\nstart testing multy axis slicing of HistoryPanel object')
print('==========================\n输出000100、000120两只股票的close、open两组历史数据\n',
hp['close,open', ['000100', '000102']])
print('==========================\n输出000100、000120两只股票的close到open三组历史数据\n',
hp['close,open', '000100, 000102'])
print(f'historyPanel: hp:\n{hp}')
print(f'data is:\n{data}')
hp.htypes = 'open,high,low,close'
hp.info()
hp.shares = ['000300', '600227', '600222', '000123', '000129']
hp.info()
def test_segment(self):
"""测试历史数据片段的获取"""
test_hp = qt.HistoryPanel(self.data,
levels=self.shares,
columns=self.htypes,
rows=self.index2)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
print(f'Test segment with None parameters')
seg1 = test_hp.segment()
seg2 = test_hp.segment('20150202')
seg3 = test_hp.segment(end_date='20201010')
self.assertIsInstance(seg1, qt.HistoryPanel)
self.assertIsInstance(seg2, qt.HistoryPanel)
self.assertIsInstance(seg3, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp.values
))
self.assertTrue(np.allclose(
seg2.values, test_hp.values
))
self.assertTrue(np.allclose(
seg3.values, test_hp.values
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
self.assertEqual(seg2.htypes, test_hp.htypes)
self.assertEqual(seg2.shares, test_hp.shares)
self.assertEqual(seg3.htypes, test_hp.htypes)
self.assertEqual(seg3.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
self.assertEqual(seg2.hdates, test_hp.hdates)
self.assertEqual(seg3.hdates, test_hp.hdates)
print(f'Test segment with proper dates')
seg1 = test_hp.segment()
seg2 = test_hp.segment('20160704')
seg3 = test_hp.segment(start_date='2016-07-05',
end_date='20160708')
self.assertIsInstance(seg1, qt.HistoryPanel)
self.assertIsInstance(seg2, qt.HistoryPanel)
self.assertIsInstance(seg3, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp[:, :, :]
))
self.assertTrue(np.allclose(
seg2.values, test_hp[:, :, 1:10]
))
self.assertTrue(np.allclose(
seg3.values, test_hp[:, :, 2:6]
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
self.assertEqual(seg2.htypes, test_hp.htypes)
self.assertEqual(seg2.shares, test_hp.shares)
self.assertEqual(seg3.htypes, test_hp.htypes)
self.assertEqual(seg3.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
self.assertEqual(seg2.hdates, test_hp.hdates[1:10])
self.assertEqual(seg3.hdates, test_hp.hdates[2:6])
print(f'Test segment with non-existing but in range dates')
seg1 = test_hp.segment()
seg2 = test_hp.segment('20160703')
seg3 = test_hp.segment(start_date='2016-07-03',
end_date='20160710')
self.assertIsInstance(seg1, qt.HistoryPanel)
self.assertIsInstance(seg2, qt.HistoryPanel)
self.assertIsInstance(seg3, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp[:, :, :]
))
self.assertTrue(np.allclose(
seg2.values, test_hp[:, :, 1:10]
))
self.assertTrue(np.allclose(
seg3.values, test_hp[:, :, 1:6]
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
self.assertEqual(seg2.htypes, test_hp.htypes)
self.assertEqual(seg2.shares, test_hp.shares)
self.assertEqual(seg3.htypes, test_hp.htypes)
self.assertEqual(seg3.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
self.assertEqual(seg2.hdates, test_hp.hdates[1:10])
self.assertEqual(seg3.hdates, test_hp.hdates[1:6])
print(f'Test segment with out-of-range dates')
seg1 = test_hp.segment(start_date='2016-05-03',
end_date='20160910')
self.assertIsInstance(seg1, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp[:, :, :]
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
def test_slice(self):
"""测试历史数据切片的获取"""
test_hp = qt.HistoryPanel(self.data,
levels=self.shares,
columns=self.htypes,
rows=self.index2)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
print(f'Test slice with shares')
share = '000101'
slc = test_hp.slice(shares=share)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, ['000101'])
self.assertEqual(slc.htypes, test_hp.htypes)
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp[:, '000101']))
share = '000101, 000103'
slc = test_hp.slice(shares=share)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, ['000101', '000103'])
self.assertEqual(slc.htypes, test_hp.htypes)
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp[:, '000101, 000103']))
print(f'Test slice with htypes')
htype = 'open'
slc = test_hp.slice(htypes=htype)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, test_hp.shares)
self.assertEqual(slc.htypes, ['open'])
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp['open']))
htype = 'open, close'
slc = test_hp.slice(htypes=htype)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, test_hp.shares)
self.assertEqual(slc.htypes, ['open', 'close'])
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp['open, close']))
# test that slicing of "open, close" does NOT equal to "close, open"
self.assertFalse(np.allclose(slc.values, test_hp['close, open']))
print(f'Test slicing with both htypes and shares')
share = '000103, 000101'
htype = 'high, low, close'
slc = test_hp.slice(shares=share, htypes=htype)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, ['000103', '000101'])
self.assertEqual(slc.htypes, ['high', 'low', 'close'])
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp['high, low, close', '000103, 000101']))
print(f'Test Error cases')
# duplicated input
htype = 'open, close, open'
self.assertRaises(AssertionError, test_hp.slice, htypes=htype)
def test_relabel(self):
new_shares_list = ['000001', '000002', '000003', '000004', '000005']
new_shares_str = '000001, 000002, 000003, 000004, 000005'
new_htypes_list = ['close', 'volume', 'value', 'exchange']
new_htypes_str = 'close, volume, value, exchange'
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_list)
print(temp_hp.info())
print(temp_hp.htypes)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.htypes, temp_hp.htypes)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.shares, new_shares_list)
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_str)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.htypes, temp_hp.htypes)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.shares, new_shares_list)
temp_hp = self.hp.copy()
temp_hp.re_label(htypes=new_htypes_list)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.shares, temp_hp.shares)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.htypes, new_htypes_list)
temp_hp = self.hp.copy()
temp_hp.re_label(htypes=new_htypes_str)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.shares, temp_hp.shares)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.htypes, new_htypes_list)
print(f'test errors raising')
temp_hp = self.hp.copy()
self.assertRaises(AssertionError, temp_hp.re_label, htypes=new_shares_str)
self.assertRaises(TypeError, temp_hp.re_label, htypes=123)
self.assertRaises(AssertionError, temp_hp.re_label, htypes='wrong input!')
def test_csv_to_hp(self):
pass
def test_hdf_to_hp(self):
pass
def test_hp_join(self):
# TODO: 这里需要加强,需要用具体的例子确认hp_join的结果正确
# TODO: 尤其是不同的shares、htypes、hdates,以及它们在顺
# TODO: 序不同的情况下是否能正确地组合
print(f'join two simple HistoryPanels with same shares')
temp_hp = self.hp.join(self.hp2, same_shares=True)
self.assertIsInstance(temp_hp, qt.HistoryPanel)
def test_df_to_hp(self):
print(f'test converting DataFrame to HistoryPanel')
data = np.random.randint(10, size=(10, 5))
df1 = pd.DataFrame(data)
df2 = pd.DataFrame(data, columns=str_to_list(self.shares))
df3 = pd.DataFrame(data[:, 0:4])
df4 = pd.DataFrame(data[:, 0:4], columns=str_to_list(self.htypes))
hp = qt.dataframe_to_hp(df1, htypes='close')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, [0, 1, 2, 3, 4])
self.assertEqual(hp.htypes, ['close'])
self.assertEqual(hp.hdates, [pd.Timestamp('1970-01-01 00:00:00'),
pd.Timestamp('1970-01-01 00:00:00.000000001'),
pd.Timestamp('1970-01-01 00:00:00.000000002'),
pd.Timestamp('1970-01-01 00:00:00.000000003'),
pd.Timestamp('1970-01-01 00:00:00.000000004'),
pd.Timestamp('1970-01-01 00:00:00.000000005'),
pd.Timestamp('1970-01-01 00:00:00.000000006'),
pd.Timestamp('1970-01-01 00:00:00.000000007'),
pd.Timestamp('1970-01-01 00:00:00.000000008'),
pd.Timestamp('1970-01-01 00:00:00.000000009')])
hp = qt.dataframe_to_hp(df2, shares=self.shares, htypes='close')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, str_to_list(self.shares))
self.assertEqual(hp.htypes, ['close'])
hp = qt.dataframe_to_hp(df3, shares='000100', column_type='htypes')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, ['000100'])
self.assertEqual(hp.htypes, [0, 1, 2, 3])
hp = qt.dataframe_to_hp(df4, shares='000100', htypes=self.htypes, column_type='htypes')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, ['000100'])
self.assertEqual(hp.htypes, str_to_list(self.htypes))
hp.info()
self.assertRaises(KeyError, qt.dataframe_to_hp, df1)
def test_to_dataframe(self):
""" 测试HistoryPanel对象的to_dataframe方法
"""
print(f'START TEST == test_to_dataframe')
print(f'test converting test hp to dataframe with share == "000102":')
df_test = self.hp.to_dataframe(share='000102')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.htypes), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp[:, '000102'], values))
print(f'test DataFrame conversion with share == "000100"')
df_test = self.hp.to_dataframe(share='000100')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.htypes), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp[:, '000100'], values))
print(f'test DataFrame conversion error: type incorrect')
self.assertRaises(AssertionError, self.hp.to_dataframe, share=3.0)
print(f'test DataFrame error raising with share not found error')
self.assertRaises(KeyError, self.hp.to_dataframe, share='000300')
print(f'test DataFrame conversion with htype == "close"')
df_test = self.hp.to_dataframe(htype='close')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp['close'].T, values))
print(f'test DataFrame conversion with htype == "high"')
df_test = self.hp.to_dataframe(htype='high')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp['high'].T, values))
print(f'test DataFrame conversion with htype == "high" and dropna')
v = self.hp.values.astype('float')
v[:, 3, :] = np.nan
v[:, 4, :] = np.inf
test_hp = qt.HistoryPanel(v, levels=self.shares, columns=self.htypes, rows=self.index)
df_test = test_hp.to_dataframe(htype='high', dropna=True)
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates[:3]) + list(self.hp.hdates[4:]), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
target_values = test_hp['high'].T
target_values = target_values[np.where(~np.isnan(target_values))].reshape(9, 5)
self.assertTrue(np.allclose(target_values, values))
print(f'test DataFrame conversion with htype == "high", dropna and treat infs as na')
v = self.hp.values.astype('float')
v[:, 3, :] = np.nan
v[:, 4, :] = np.inf
test_hp = qt.HistoryPanel(v, levels=self.shares, columns=self.htypes, rows=self.index)
df_test = test_hp.to_dataframe(htype='high', dropna=True, inf_as_na=True)
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates[:3]) + list(self.hp.hdates[5:]), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
target_values = test_hp['high'].T
target_values = target_values[np.where(~np.isnan(target_values) & ~np.isinf(target_values))].reshape(8, 5)
self.assertTrue(np.allclose(target_values, values))
print(f'test DataFrame conversion error: type incorrect')
self.assertRaises(AssertionError, self.hp.to_dataframe, htype=pd.DataFrame())
print(f'test DataFrame error raising with share not found error')
self.assertRaises(KeyError, self.hp.to_dataframe, htype='non_type')
print(f'Raises ValueError when both or none parameter is given')
self.assertRaises(KeyError, self.hp.to_dataframe)
self.assertRaises(KeyError, self.hp.to_dataframe, share='000100', htype='close')
def test_to_df_dict(self):
"""测试HistoryPanel公有方法to_df_dict"""
print('test convert history panel slice by share')
df_dict = self.hp.to_df_dict('share')
self.assertEqual(self.hp.shares, list(df_dict.keys()))
df_dict = self.hp.to_df_dict()
self.assertEqual(self.hp.shares, list(df_dict.keys()))
print('test convert historypanel slice by htype ')
df_dict = self.hp.to_df_dict('htype')
self.assertEqual(self.hp.htypes, list(df_dict.keys()))
print('test raise assertion error')
self.assertRaises(AssertionError, self.hp.to_df_dict, by='random text')
self.assertRaises(AssertionError, self.hp.to_df_dict, by=3)
print('test empty hp')
df_dict = qt.HistoryPanel().to_df_dict('share')
self.assertEqual(df_dict, {})
def test_stack_dataframes(self):
print('test stack dataframes in a list')
df1 = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [2, 3, 4, 5], 'c': [3, 4, 5, 6]})
df1.index = ['20200101', '20200102', '20200103', '20200104']
df2 = pd.DataFrame({'b': [4, 3, 2, 1], 'd': [1, 1, 1, 1], 'c': [6, 5, 4, 3]})
df2.index = ['20200101', '20200102', '20200104', '20200105']
df3 = pd.DataFrame({'a': [6, 6, 6, 6], 'd': [4, 4, 4, 4], 'b': [2, 4, 6, 8]})
df3.index = ['20200101', '20200102', '20200103', '20200106']
values1 = np.array([[[1., 2., 3., np.nan],
[2., 3., 4., np.nan],
[3., 4., 5., np.nan],
[4., 5., 6., np.nan],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan]],
[[np.nan, 4., 6., 1.],
[np.nan, 3., 5., 1.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 2., 4., 1.],
[np.nan, 1., 3., 1.],
[np.nan, np.nan, np.nan, np.nan]],
[[6., 2., np.nan, 4.],
[6., 4., np.nan, 4.],
[6., 6., np.nan, 4.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[6., 8., np.nan, 4.]]])
values2 = np.array([[[1., np.nan, 6.],
[2., np.nan, 6.],
[3., np.nan, 6.],
[4., np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 6.]],
[[2., 4., 2.],
[3., 3., 4.],
[4., np.nan, 6.],
[5., 2., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 8.]],
[[3., 6., np.nan],
[4., 5., np.nan],
[5., np.nan, np.nan],
[6., 4., np.nan],
[np.nan, 3., np.nan],
[np.nan, np.nan, np.nan]],
[[np.nan, 1., 4.],
[np.nan, 1., 4.],
[np.nan, np.nan, 4.],
[np.nan, 1., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 4.]]])
print(df1.rename(index=pd.to_datetime))
print(df2.rename(index=pd.to_datetime))
print(df3.rename(index=pd.to_datetime))
hp1 = stack_dataframes([df1, df2, df3], stack_along='shares',
shares=['000100', '000200', '000300'])
hp2 = stack_dataframes([df1, df2, df3], stack_along='shares',
shares='000100, 000300, 000200')
print('hp1 is:\n', hp1)
print('hp2 is:\n', hp2)
self.assertEqual(hp1.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp1.shares, ['000100', '000200', '000300'])
self.assertTrue(np.allclose(hp1.values, values1, equal_nan=True))
self.assertEqual(hp2.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp2.shares, ['000100', '000300', '000200'])
self.assertTrue(np.allclose(hp2.values, values1, equal_nan=True))
hp3 = stack_dataframes([df1, df2, df3], stack_along='htypes',
htypes=['close', 'high', 'low'])
hp4 = stack_dataframes([df1, df2, df3], stack_along='htypes',
htypes='open, close, high')
print('hp3 is:\n', hp3.values)
print('hp4 is:\n', hp4.values)
self.assertEqual(hp3.htypes, ['close', 'high', 'low'])
self.assertEqual(hp3.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp3.values, values2, equal_nan=True))
self.assertEqual(hp4.htypes, ['open', 'close', 'high'])
self.assertEqual(hp4.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp4.values, values2, equal_nan=True))
print('test stack dataframes in a dict')
df1 = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [2, 3, 4, 5], 'c': [3, 4, 5, 6]})
df1.index = ['20200101', '20200102', '20200103', '20200104']
df2 = pd.DataFrame({'b': [4, 3, 2, 1], 'd': [1, 1, 1, 1], 'c': [6, 5, 4, 3]})
df2.index = ['20200101', '20200102', '20200104', '20200105']
df3 = pd.DataFrame({'a': [6, 6, 6, 6], 'd': [4, 4, 4, 4], 'b': [2, 4, 6, 8]})
df3.index = ['20200101', '20200102', '20200103', '20200106']
values1 = np.array([[[1., 2., 3., np.nan],
[2., 3., 4., np.nan],
[3., 4., 5., np.nan],
[4., 5., 6., np.nan],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan]],
[[np.nan, 4., 6., 1.],
[np.nan, 3., 5., 1.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 2., 4., 1.],
[np.nan, 1., 3., 1.],
[np.nan, np.nan, np.nan, np.nan]],
[[6., 2., np.nan, 4.],
[6., 4., np.nan, 4.],
[6., 6., np.nan, 4.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[6., 8., np.nan, 4.]]])
values2 = np.array([[[1., np.nan, 6.],
[2., np.nan, 6.],
[3., np.nan, 6.],
[4., np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 6.]],
[[2., 4., 2.],
[3., 3., 4.],
[4., np.nan, 6.],
[5., 2., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 8.]],
[[3., 6., np.nan],
[4., 5., np.nan],
[5., np.nan, np.nan],
[6., 4., np.nan],
[np.nan, 3., np.nan],
[np.nan, np.nan, np.nan]],
[[np.nan, 1., 4.],
[np.nan, 1., 4.],
[np.nan, np.nan, 4.],
[np.nan, 1., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 4.]]])
print(df1.rename(index=pd.to_datetime))
print(df2.rename(index=pd.to_datetime))
print(df3.rename(index=pd.to_datetime))
hp1 = stack_dataframes(dfs={'000001.SZ': df1, '000002.SZ': df2, '000003.SZ': df3},
stack_along='shares')
hp2 = stack_dataframes(dfs={'000001.SZ': df1, '000002.SZ': df2, '000003.SZ': df3},
stack_along='shares',
shares='000100, 000300, 000200')
print('hp1 is:\n', hp1)
print('hp2 is:\n', hp2)
self.assertEqual(hp1.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp1.shares, ['000001.SZ', '000002.SZ', '000003.SZ'])
self.assertTrue(np.allclose(hp1.values, values1, equal_nan=True))
self.assertEqual(hp2.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp2.shares, ['000100', '000300', '000200'])
self.assertTrue(np.allclose(hp2.values, values1, equal_nan=True))
hp3 = stack_dataframes(dfs={'close': df1, 'high': df2, 'low': df3},
stack_along='htypes')
hp4 = stack_dataframes(dfs={'close': df1, 'low': df2, 'high': df3},
stack_along='htypes',
htypes='open, close, high')
print('hp3 is:\n', hp3.values)
print('hp4 is:\n', hp4.values)
self.assertEqual(hp3.htypes, ['close', 'high', 'low'])
self.assertEqual(hp3.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp3.values, values2, equal_nan=True))
self.assertEqual(hp4.htypes, ['open', 'close', 'high'])
self.assertEqual(hp4.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp4.values, values2, equal_nan=True))
def test_to_csv(self):
pass
def test_to_hdf(self):
pass
def test_fill_na(self):
"""测试填充无效值"""
print(self.hp)
new_values = self.hp.values.astype(float)
new_values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]] = np.nan
print(new_values)
temp_hp = qt.HistoryPanel(values=new_values, levels=self.hp.levels, rows=self.hp.rows, columns=self.hp.columns)
self.assertTrue(np.allclose(temp_hp.values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]], np.nan, equal_nan=True))
temp_hp.fillna(2.3)
filled_values = new_values.copy()
filled_values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]] = 2.3
self.assertTrue(np.allclose(temp_hp.values,
filled_values, equal_nan=True))
def test_fill_inf(self):
"""测试填充无限值"""
def test_get_history_panel(self):
# TODO: implement this test case
# test get only one line of data
pass
def test_get_price_type_raw_data(self):
shares = '000039.SZ, 600748.SH, 000040.SZ'
start = '20200101'
end = '20200131'
htypes = 'open, high, low, close'
target_price_000039 = [[9.45, 9.49, 9.12, 9.17],
[9.46, 9.56, 9.4, 9.5],
[9.7, 9.76, 9.5, 9.51],
[9.7, 9.75, 9.7, 9.72],
[9.73, 9.77, 9.7, 9.73],
[9.83, 9.85, 9.71, 9.72],
[9.85, 9.85, 9.75, 9.79],
[9.96, 9.96, 9.83, 9.86],
[9.87, 9.94, 9.77, 9.93],
[9.82, 9.9, 9.76, 9.87],
[9.8, 9.85, 9.77, 9.82],
[9.84, 9.86, 9.71, 9.72],
[9.83, 9.93, 9.81, 9.86],
[9.7, 9.87, 9.7, 9.82],
[9.83, 9.86, 9.69, 9.79],
[9.8, 9.94, 9.8, 9.86]]
target_price_600748 = [[5.68, 5.68, 5.32, 5.37],
[5.62, 5.68, 5.46, 5.65],
[5.72, 5.72, 5.61, 5.62],
[5.76, 5.77, 5.6, 5.73],
[5.78, 5.84, 5.73, 5.75],
[5.89, 5.91, 5.76, 5.77],
[6.03, 6.04, 5.87, 5.89],
[5.94, 6.07, 5.94, 6.02],
[5.96, 5.98, 5.88, 5.97],
[6.04, 6.06, 5.95, 5.96],
[5.98, 6.04, 5.96, 6.03],
[6.1, 6.11, 5.89, 5.94],
[6.02, 6.12, 6., 6.1],
[5.96, 6.05, 5.88, 6.01],
[6.03, 6.03, 5.95, 5.99],
[6.02, 6.12, 5.99, 5.99]]
target_price_000040 = [[3.63, 3.83, 3.63, 3.65],
[3.99, 4.07, 3.97, 4.03],
[4.1, 4.11, 3.93, 3.95],
[4.12, 4.13, 4.06, 4.11],
[4.13, 4.19, 4.07, 4.13],
[4.27, 4.28, 4.11, 4.12],
[4.37, 4.38, 4.25, 4.29],
[4.34, 4.5, 4.32, 4.41],
[4.28, 4.35, 4.2, 4.34],
[4.41, 4.43, 4.29, 4.31],
[4.42, 4.45, 4.36, 4.41],
[4.51, 4.56, 4.33, 4.35],
[4.35, 4.55, 4.31, 4.55],
[4.3, 4.41, 4.22, 4.36],
[4.27, 4.44, 4.23, 4.34],
[4.23, 4.27, 4.18, 4.25]]
print(f'test get price type raw data with single thread')
df_list = get_price_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, freq='d')
self.assertIsInstance(df_list, dict)
self.assertEqual(len(df_list), 3)
self.assertTrue(np.allclose(df_list['000039.SZ'].values, np.array(target_price_000039)))
self.assertTrue(np.allclose(df_list['600748.SH'].values, np.array(target_price_600748)))
self.assertTrue(np.allclose(df_list['000040.SZ'].values, np.array(target_price_000040)))
print(f'in get financial report type raw data, got DataFrames: \n"000039.SZ":\n'
f'{df_list["000039.SZ"]}\n"600748.SH":\n'
f'{df_list["600748.SH"]}\n"000040.SZ":\n{df_list["000040.SZ"]}')
print(f'test get price type raw data with with multi threads')
df_list = get_price_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, freq='d', parallel=10)
self.assertIsInstance(df_list, dict)
self.assertEqual(len(df_list), 3)
self.assertTrue(np.allclose(df_list['000039.SZ'].values, np.array(target_price_000039)))
self.assertTrue(np.allclose(df_list['600748.SH'].values, np.array(target_price_600748)))
self.assertTrue(np.allclose(df_list['000040.SZ'].values, np.array(target_price_000040)))
print(f'in get financial report type raw data, got DataFrames: \n"000039.SZ":\n'
f'{df_list["000039.SZ"]}\n"600748.SH":\n'
f'{df_list["600748.SH"]}\n"000040.SZ":\n{df_list["000040.SZ"]}')
def test_get_financial_report_type_raw_data(self):
shares = '000039.SZ, 600748.SH, 000040.SZ'
start = '20160101'
end = '20201231'
htypes = 'eps,basic_eps,diluted_eps,total_revenue,revenue,total_share,' \
'cap_rese,undistr_porfit,surplus_rese,net_profit'
target_eps_000039 = [[1.41],
[0.1398],
[-0.0841],
[-0.1929],
[0.37],
[0.1357],
[0.1618],
[0.1191],
[1.11],
[0.759],
[0.3061],
[0.1409],
[0.81],
[0.4187],
[0.2554],
[0.1624],
[0.14],
[-0.0898],
[-0.1444],
[0.1291]]
target_eps_600748 = [[0.41],
[0.22],
[0.22],
[0.09],
[0.42],
[0.23],
[0.22],
[0.09],
[0.36],
[0.16],
[0.15],
[0.07],
[0.47],
[0.19],
[0.12],
[0.07],
[0.32],
[0.22],
[0.14],
[0.07]]
target_eps_000040 = [[-0.6866],
[-0.134],
[-0.189],
[-0.036],
[-0.6435],
[0.05],
[0.062],
[0.0125],
[0.8282],
[1.05],
[0.985],
[0.811],
[0.41],
[0.242],
[0.113],
[0.027],
[0.19],
[0.17],
[0.17],
[0.064]]
target_basic_eps_000039 = [[1.3980000e-01, 1.3980000e-01, 6.3591954e+10, 6.3591954e+10],
[-8.4100000e-02, -8.4100000e-02, 3.9431807e+10, 3.9431807e+10],
[-1.9290000e-01, -1.9290000e-01, 1.5852177e+10, 1.5852177e+10],
[3.7000000e-01, 3.7000000e-01, 8.5815341e+10, 8.5815341e+10],
[1.3570000e-01, 1.3430000e-01, 6.1660271e+10, 6.1660271e+10],
[1.6180000e-01, 1.6040000e-01, 4.2717729e+10, 4.2717729e+10],
[1.1910000e-01, 1.1900000e-01, 1.9099547e+10, 1.9099547e+10],
[1.1100000e+00, 1.1000000e+00, 9.3497622e+10, 9.3497622e+10],
[7.5900000e-01, 7.5610000e-01, 6.6906147e+10, 6.6906147e+10],
[3.0610000e-01, 3.0380000e-01, 4.3560398e+10, 4.3560398e+10],
[1.4090000e-01, 1.4050000e-01, 1.9253639e+10, 1.9253639e+10],
[8.1000000e-01, 8.1000000e-01, 7.6299930e+10, 7.6299930e+10],
[4.1870000e-01, 4.1710000e-01, 5.3962706e+10, 5.3962706e+10],
[2.5540000e-01, 2.5440000e-01, 3.3387152e+10, 3.3387152e+10],
[1.6240000e-01, 1.6200000e-01, 1.4675987e+10, 1.4675987e+10],
[1.4000000e-01, 1.4000000e-01, 5.1111652e+10, 5.1111652e+10],
[-8.9800000e-02, -8.9800000e-02, 3.4982614e+10, 3.4982614e+10],
[-1.4440000e-01, -1.4440000e-01, 2.3542843e+10, 2.3542843e+10],
[1.2910000e-01, 1.2860000e-01, 1.0412416e+10, 1.0412416e+10],
[7.2000000e-01, 7.1000000e-01, 5.8685804e+10, 5.8685804e+10]]
target_basic_eps_600748 = [[2.20000000e-01, 2.20000000e-01, 5.29423397e+09, 5.29423397e+09],
[2.20000000e-01, 2.20000000e-01, 4.49275653e+09, 4.49275653e+09],
[9.00000000e-02, 9.00000000e-02, 1.59067065e+09, 1.59067065e+09],
[4.20000000e-01, 4.20000000e-01, 8.86555586e+09, 8.86555586e+09],
[2.30000000e-01, 2.30000000e-01, 5.44850143e+09, 5.44850143e+09],
[2.20000000e-01, 2.20000000e-01, 4.34978927e+09, 4.34978927e+09],
[9.00000000e-02, 9.00000000e-02, 1.73793793e+09, 1.73793793e+09],
[3.60000000e-01, 3.60000000e-01, 8.66375241e+09, 8.66375241e+09],
[1.60000000e-01, 1.60000000e-01, 4.72875116e+09, 4.72875116e+09],
[1.50000000e-01, 1.50000000e-01, 3.76879016e+09, 3.76879016e+09],
[7.00000000e-02, 7.00000000e-02, 1.31785454e+09, 1.31785454e+09],
[4.70000000e-01, 4.70000000e-01, 7.23391685e+09, 7.23391685e+09],
[1.90000000e-01, 1.90000000e-01, 3.76072215e+09, 3.76072215e+09],
[1.20000000e-01, 1.20000000e-01, 2.35845364e+09, 2.35845364e+09],
[7.00000000e-02, 7.00000000e-02, 1.03831865e+09, 1.03831865e+09],
[3.20000000e-01, 3.20000000e-01, 6.48880919e+09, 6.48880919e+09],
[2.20000000e-01, 2.20000000e-01, 3.72209142e+09, 3.72209142e+09],
[1.40000000e-01, 1.40000000e-01, 2.22563924e+09, 2.22563924e+09],
[7.00000000e-02, 7.00000000e-02, 8.96647052e+08, 8.96647052e+08],
[4.80000000e-01, 4.80000000e-01, 6.61917508e+09, 6.61917508e+09]]
target_basic_eps_000040 = [[-1.34000000e-01, -1.34000000e-01, 2.50438755e+09, 2.50438755e+09],
[-1.89000000e-01, -1.89000000e-01, 1.32692347e+09, 1.32692347e+09],
[-3.60000000e-02, -3.60000000e-02, 5.59073338e+08, 5.59073338e+08],
[-6.43700000e-01, -6.43700000e-01, 6.80576162e+09, 6.80576162e+09],
[5.00000000e-02, 5.00000000e-02, 6.38891620e+09, 6.38891620e+09],
[6.20000000e-02, 6.20000000e-02, 5.23267082e+09, 5.23267082e+09],
[1.25000000e-02, 1.25000000e-02, 2.22420874e+09, 2.22420874e+09],
[8.30000000e-01, 8.30000000e-01, 8.67628947e+09, 8.67628947e+09],
[1.05000000e+00, 1.05000000e+00, 5.29431716e+09, 5.29431716e+09],
[9.85000000e-01, 9.85000000e-01, 3.56822382e+09, 3.56822382e+09],
[8.11000000e-01, 8.11000000e-01, 1.06613439e+09, 1.06613439e+09],
[4.10000000e-01, 4.10000000e-01, 8.13102532e+09, 8.13102532e+09],
[2.42000000e-01, 2.42000000e-01, 5.17971521e+09, 5.17971521e+09],
[1.13000000e-01, 1.13000000e-01, 3.21704120e+09, 3.21704120e+09],
[2.70000000e-02, 2.70000000e-02, 8.41966738e+08, 8.24272235e+08],
[1.90000000e-01, 1.90000000e-01, 3.77350171e+09, 3.77350171e+09],
[1.70000000e-01, 1.70000000e-01, 2.38643892e+09, 2.38643892e+09],
[1.70000000e-01, 1.70000000e-01, 1.29127117e+09, 1.29127117e+09],
[6.40000000e-02, 6.40000000e-02, 6.03256858e+08, 6.03256858e+08],
[1.30000000e-01, 1.30000000e-01, 1.66572918e+09, 1.66572918e+09]]
target_total_share_000039 = [[3.5950140e+09, 4.8005360e+09, 2.1573660e+10, 3.5823430e+09],
[3.5860750e+09, 4.8402300e+09, 2.0750827e+10, 3.5823430e+09],
[3.5860750e+09, 4.9053550e+09, 2.0791307e+10, 3.5823430e+09],
[3.5845040e+09, 4.8813110e+09, 2.1482857e+10, 3.5823430e+09],
[3.5831490e+09, 4.9764250e+09, 2.0926816e+10, 3.2825850e+09],
[3.5825310e+09, 4.8501270e+09, 2.1020418e+10, 3.2825850e+09],
[2.9851110e+09, 5.4241420e+09, 2.2438350e+10, 3.2825850e+09],
[2.9849890e+09, 4.1284000e+09, 2.2082769e+10, 3.2825850e+09],
[2.9849610e+09, 4.0838010e+09, 2.1045994e+10, 3.2815350e+09],
[2.9849560e+09, 4.2491510e+09, 1.9694345e+10, 3.2815350e+09],
[2.9846970e+09, 4.2351600e+09, 2.0016361e+10, 3.2815350e+09],
[2.9828890e+09, 4.2096630e+09, 1.9734494e+10, 3.2815350e+09],
[2.9813960e+09, 3.4564240e+09, 1.8562738e+10, 3.2793790e+09],
[2.9803530e+09, 3.0759650e+09, 1.8076208e+10, 3.2793790e+09],
[2.9792680e+09, 3.1376690e+09, 1.7994776e+10, 3.2793790e+09],
[2.9785770e+09, 3.1265850e+09, 1.7495053e+10, 3.2793790e+09],
[2.9783640e+09, 3.1343850e+09, 1.6740840e+10, 3.2035780e+09],
[2.9783590e+09, 3.1273880e+09, 1.6578389e+10, 3.2035780e+09],
[2.9782780e+09, 3.1169280e+09, 1.8047639e+10, 3.2035780e+09],
[2.9778200e+09, 3.1818630e+09, 1.7663145e+10, 3.2035780e+09]]
target_total_share_600748 = [[1.84456289e+09, 2.60058426e+09, 5.72443733e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.72096899e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.65738237e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.50257806e+09, 4.58026529e+08],
[1.84456289e+09, 2.59868164e+09, 5.16741523e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 5.14677280e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 4.94955591e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 4.79001451e+09, 4.44998882e+08],
[1.84456289e+09, 3.11401684e+09, 4.46326988e+09, 4.01064256e+08],
[1.84456289e+09, 3.11596723e+09, 4.45419136e+09, 4.01064256e+08],
[1.84456289e+09, 3.11596723e+09, 4.39652948e+09, 4.01064256e+08],
[1.84456289e+09, 3.18007783e+09, 4.26608403e+09, 4.01064256e+08],
[1.84456289e+09, 3.10935622e+09, 3.78417688e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.65806574e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.62063090e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.50063915e+09, 3.65651701e+08],
[1.41889453e+09, 3.55940850e+09, 3.22272993e+09, 3.62124939e+08],
[1.41889453e+09, 3.56129650e+09, 3.11477476e+09, 3.62124939e+08],
[1.41889453e+09, 3.59632888e+09, 3.06836903e+09, 3.62124939e+08],
[1.08337087e+09, 3.37400726e+07, 3.00918704e+09, 3.62124939e+08]]
target_total_share_000040 = [[1.48687387e+09, 1.06757900e+10, 8.31900755e+08, 2.16091994e+08],
[1.48687387e+09, 1.06757900e+10, 7.50177302e+08, 2.16091994e+08],
[1.48687387e+09, 1.06757899e+10, 9.90255974e+08, 2.16123282e+08],
[1.48687387e+09, 1.06757899e+10, 1.03109866e+09, 2.16091994e+08],
[1.48687387e+09, 1.06757910e+10, 2.07704745e+09, 2.16123282e+08],
[1.48687387e+09, 1.06757910e+10, 2.09608665e+09, 2.16123282e+08],
[1.48687387e+09, 1.06803833e+10, 2.13354083e+09, 2.16123282e+08],
[1.48687387e+09, 1.06804090e+10, 2.11489364e+09, 2.16123282e+08],
[1.33717327e+09, 8.87361727e+09, 2.42939924e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 2.34220254e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 2.16390368e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 1.07961915e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 8.58866066e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 6.87024393e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 5.71554565e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 5.54241222e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361726e+09, 5.10059576e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361726e+09, 4.59351639e+08, 1.88489589e+08],
[4.69593364e+08, 2.78355875e+08, 4.13430814e+08, 1.88489589e+08],
[4.69593364e+08, 2.74235459e+08, 3.83557678e+08, 1.88489589e+08]]
target_net_profit_000039 = [[np.nan],
[2.422180e+08],
[np.nan],
[2.510113e+09],
[np.nan],
[1.102220e+09],
[np.nan],
[4.068455e+09],
[np.nan],
[1.315957e+09],
[np.nan],
[3.158415e+09],
[np.nan],
[1.066509e+09],
[np.nan],
[7.349830e+08],
[np.nan],
[-5.411600e+08],
[np.nan],
[2.271961e+09]]
target_net_profit_600748 = [[np.nan],
[4.54341757e+08],
[np.nan],
[9.14476670e+08],
[np.nan],
[5.25360283e+08],
[np.nan],
[9.24502415e+08],
[np.nan],
[4.66560302e+08],
[np.nan],
[9.15265285e+08],
[np.nan],
[2.14639674e+08],
[np.nan],
[7.45093049e+08],
[np.nan],
[2.10967312e+08],
[np.nan],
[6.04572711e+08]]
target_net_profit_000040 = [[np.nan],
[-2.82458846e+08],
[np.nan],
[-9.57130872e+08],
[np.nan],
[9.22114527e+07],
[np.nan],
[1.12643819e+09],
[np.nan],
[1.31715269e+09],
[np.nan],
[5.39940093e+08],
[np.nan],
[1.51440838e+08],
[np.nan],
[1.75339071e+08],
[np.nan],
[8.04740415e+07],
[np.nan],
[6.20445815e+07]]
print('test get financial data, in multi thread mode')
df_list = get_financial_report_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, parallel=4)
self.assertIsInstance(df_list, tuple)
self.assertEqual(len(df_list), 4)
self.assertEqual(len(df_list[0]), 3)
self.assertEqual(len(df_list[1]), 3)
self.assertEqual(len(df_list[2]), 3)
self.assertEqual(len(df_list[3]), 3)
# 检查确认所有数据类型正确
self.assertTrue(all(isinstance(item, pd.DataFrame) for subdict in df_list for item in subdict.values()))
# 检查是否有空数据
print(all(item.empty for subdict in df_list for item in subdict.values()))
# 检查获取的每组数据正确,且所有数据的顺序一致, 如果取到空数据,则忽略
if df_list[0]['000039.SZ'].empty:
print(f'income data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000039.SZ'].values, target_basic_eps_000039))
if df_list[0]['600748.SH'].empty:
print(f'income data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['600748.SH'].values, target_basic_eps_600748))
if df_list[0]['000040.SZ'].empty:
print(f'income data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000040.SZ'].values, target_basic_eps_000040))
if df_list[1]['000039.SZ'].empty:
print(f'indicator data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000039.SZ'].values, target_eps_000039))
if df_list[1]['600748.SH'].empty:
print(f'indicator data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['600748.SH'].values, target_eps_600748))
if df_list[1]['000040.SZ'].empty:
print(f'indicator data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000040.SZ'].values, target_eps_000040))
if df_list[2]['000039.SZ'].empty:
print(f'balance data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000039.SZ'].values, target_total_share_000039))
if df_list[2]['600748.SH'].empty:
print(f'balance data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['600748.SH'].values, target_total_share_600748))
if df_list[2]['000040.SZ'].empty:
print(f'balance data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000040.SZ'].values, target_total_share_000040))
if df_list[3]['000039.SZ'].empty:
print(f'cash flow data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000039.SZ'].values, target_net_profit_000039, equal_nan=True))
if df_list[3]['600748.SH'].empty:
print(f'cash flow data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['600748.SH'].values, target_net_profit_600748, equal_nan=True))
if df_list[3]['000040.SZ'].empty:
print(f'cash flow data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000040.SZ'].values, target_net_profit_000040, equal_nan=True))
print('test get financial data, in single thread mode')
df_list = get_financial_report_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, parallel=0)
self.assertIsInstance(df_list, tuple)
self.assertEqual(len(df_list), 4)
self.assertEqual(len(df_list[0]), 3)
self.assertEqual(len(df_list[1]), 3)
self.assertEqual(len(df_list[2]), 3)
self.assertEqual(len(df_list[3]), 3)
# 检查确认所有数据类型正确
self.assertTrue(all(isinstance(item, pd.DataFrame) for subdict in df_list for item in subdict.values()))
# 检查是否有空数据,因为网络问题,有可能会取到空数据
self.assertFalse(all(item.empty for subdict in df_list for item in subdict.values()))
# 检查获取的每组数据正确,且所有数据的顺序一致, 如果取到空数据,则忽略
if df_list[0]['000039.SZ'].empty:
print(f'income data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000039.SZ'].values, target_basic_eps_000039))
if df_list[0]['600748.SH'].empty:
print(f'income data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['600748.SH'].values, target_basic_eps_600748))
if df_list[0]['000040.SZ'].empty:
print(f'income data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000040.SZ'].values, target_basic_eps_000040))
if df_list[1]['000039.SZ'].empty:
print(f'indicator data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000039.SZ'].values, target_eps_000039))
if df_list[1]['600748.SH'].empty:
print(f'indicator data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['600748.SH'].values, target_eps_600748))
if df_list[1]['000040.SZ'].empty:
print(f'indicator data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000040.SZ'].values, target_eps_000040))
if df_list[2]['000039.SZ'].empty:
print(f'balance data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000039.SZ'].values, target_total_share_000039))
if df_list[2]['600748.SH'].empty:
print(f'balance data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['600748.SH'].values, target_total_share_600748))
if df_list[2]['000040.SZ'].empty:
print(f'balance data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000040.SZ'].values, target_total_share_000040))
if df_list[3]['000039.SZ'].empty:
print(f'cash flow data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000039.SZ'].values, target_net_profit_000039, equal_nan=True))
if df_list[3]['600748.SH'].empty:
print(f'cash flow data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['600748.SH'].values, target_net_profit_600748, equal_nan=True))
if df_list[3]['000040.SZ'].empty:
print(f'cash flow data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000040.SZ'].values, target_net_profit_000040, equal_nan=True))
def test_get_composite_type_raw_data(self):
pass
class TestUtilityFuncs(unittest.TestCase):
def setUp(self):
pass
def test_time_string_format(self):
print('Testing qt.time_string_format() function:')
t = 3.14
self.assertEqual(time_str_format(t), '3s 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '3s ')
self.assertEqual(time_str_format(t, short_form=True), '3"140')
self.assertEqual(time_str_format(t, estimation=True, short_form=True), '3"')
t = 300.14
self.assertEqual(time_str_format(t), '5min 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '5min ')
self.assertEqual(time_str_format(t, short_form=True), "5'140")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "5'")
t = 7435.0014
self.assertEqual(time_str_format(t), '2hrs 3min 55s 1.4ms')
self.assertEqual(time_str_format(t, estimation=True), '2hrs ')
self.assertEqual(time_str_format(t, short_form=True), "2H3'55\"001")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "2H")
t = 88425.0509
self.assertEqual(time_str_format(t), '1days 33min 45s 50.9ms')
self.assertEqual(time_str_format(t, estimation=True), '1days ')
self.assertEqual(time_str_format(t, short_form=True), "1D33'45\"051")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "1D")
def test_str_to_list(self):
self.assertEqual(str_to_list('a,b,c,d,e'), ['a', 'b', 'c', 'd', 'e'])
self.assertEqual(str_to_list('a, b, c '), ['a', 'b', 'c'])
self.assertEqual(str_to_list('a, b: c', sep_char=':'), ['a,b', 'c'])
self.assertEqual(str_to_list('abc'), ['abc'])
self.assertEqual(str_to_list(''), [])
self.assertRaises(AssertionError, str_to_list, 123)
def test_list_or_slice(self):
str_dict = {'close': 0, 'open': 1, 'high': 2, 'low': 3}
self.assertEqual(list_or_slice(slice(1, 2, 1), str_dict), slice(1, 2, 1))
self.assertEqual(list_or_slice('open', str_dict), [1])
self.assertEqual(list(list_or_slice('close, high, low', str_dict)), [0, 2, 3])
self.assertEqual(list(list_or_slice('close:high', str_dict)), [0, 1, 2])
self.assertEqual(list(list_or_slice(['open'], str_dict)), [1])
self.assertEqual(list(list_or_slice(['open', 'high'], str_dict)), [1, 2])
self.assertEqual(list(list_or_slice(0, str_dict)), [0])
self.assertEqual(list(list_or_slice([0, 2], str_dict)), [0, 2])
self.assertEqual(list(list_or_slice([True, False, True, False], str_dict)), [0, 2])
def test_labels_to_dict(self):
target_list = [0, 1, 10, 100]
target_dict = {'close': 0, 'open': 1, 'high': 2, 'low': 3}
target_dict2 = {'close': 0, 'open': 2, 'high': 1, 'low': 3}
self.assertEqual(labels_to_dict('close, open, high, low', target_list), target_dict)
self.assertEqual(labels_to_dict(['close', 'open', 'high', 'low'], target_list), target_dict)
self.assertEqual(labels_to_dict('close, high, open, low', target_list), target_dict2)
self.assertEqual(labels_to_dict(['close', 'high', 'open', 'low'], target_list), target_dict2)
def test_input_to_list(self):
""" test util function input_to_list()"""
self.assertEqual(input_to_list(5, 3), [5, 5, 5])
self.assertEqual(input_to_list(5, 3, 0), [5, 5, 5])
self.assertEqual(input_to_list([5], 3, 0), [5, 0, 0])
self.assertEqual(input_to_list([5, 4], 3, 0), [5, 4, 0])
def test_regulate_date_format(self):
self.assertEqual(regulate_date_format('2019/11/06'), '20191106')
self.assertEqual(regulate_date_format('2019-11-06'), '20191106')
self.assertEqual(regulate_date_format('20191106'), '20191106')
self.assertEqual(regulate_date_format('191106'), '20061119')
self.assertEqual(regulate_date_format('830522'), '19830522')
self.assertEqual(regulate_date_format(datetime.datetime(2010, 3, 15)), '20100315')
self.assertEqual(regulate_date_format(pd.Timestamp('2010.03.15')), '20100315')
self.assertRaises(ValueError, regulate_date_format, 'abc')
self.assertRaises(ValueError, regulate_date_format, '2019/13/43')
def test_list_to_str_format(self):
self.assertEqual(list_to_str_format(['close', 'open', 'high', 'low']),
'close,open,high,low')
self.assertEqual(list_to_str_format(['letters', ' ', '123 4', 123, ' kk l']),
'letters,,1234,kkl')
self.assertEqual(list_to_str_format('a string input'),
'a,string,input')
self.assertEqual(list_to_str_format('already,a,good,string'),
'already,a,good,string')
self.assertRaises(AssertionError, list_to_str_format, 123)
def test_is_trade_day(self):
"""test if the funcion maybe_trade_day() and is_market_trade_day() works properly
"""
date_trade = '20210401'
date_holiday = '20210102'
date_weekend = '20210424'
date_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertTrue(maybe_trade_day(date_trade))
self.assertFalse(maybe_trade_day(date_holiday))
self.assertFalse(maybe_trade_day(date_weekend))
self.assertTrue(maybe_trade_day(date_seems_trade_day))
self.assertTrue(maybe_trade_day(date_too_early))
self.assertTrue(maybe_trade_day(date_too_late))
self.assertTrue(maybe_trade_day(date_christmas))
self.assertTrue(is_market_trade_day(date_trade))
self.assertFalse(is_market_trade_day(date_holiday))
self.assertFalse(is_market_trade_day(date_weekend))
self.assertFalse(is_market_trade_day(date_seems_trade_day))
self.assertFalse(is_market_trade_day(date_too_early))
self.assertFalse(is_market_trade_day(date_too_late))
self.assertTrue(is_market_trade_day(date_christmas))
self.assertFalse(is_market_trade_day(date_christmas, exchange='XHKG'))
date_trade = pd.to_datetime('20210401')
date_holiday = pd.to_datetime('20210102')
date_weekend = pd.to_datetime('20210424')
self.assertTrue(maybe_trade_day(date_trade))
self.assertFalse(maybe_trade_day(date_holiday))
self.assertFalse(maybe_trade_day(date_weekend))
def test_weekday_name(self):
""" test util func weekday_name()"""
self.assertEqual(weekday_name(0), 'Monday')
self.assertEqual(weekday_name(1), 'Tuesday')
self.assertEqual(weekday_name(2), 'Wednesday')
self.assertEqual(weekday_name(3), 'Thursday')
self.assertEqual(weekday_name(4), 'Friday')
self.assertEqual(weekday_name(5), 'Saturday')
self.assertEqual(weekday_name(6), 'Sunday')
def test_list_truncate(self):
""" test util func list_truncate()"""
l = [1,2,3,4,5]
ls = list_truncate(l, 2)
self.assertEqual(ls[0], [1, 2])
self.assertEqual(ls[1], [3, 4])
self.assertEqual(ls[2], [5])
self.assertRaises(AssertionError, list_truncate, l, 0)
self.assertRaises(AssertionError, list_truncate, 12, 0)
self.assertRaises(AssertionError, list_truncate, 0, l)
def test_maybe_trade_day(self):
""" test util function maybe_trade_day()"""
self.assertTrue(maybe_trade_day('20220104'))
self.assertTrue(maybe_trade_day('2021-12-31'))
self.assertTrue(maybe_trade_day(pd.to_datetime('2020/03/06')))
self.assertFalse(maybe_trade_day('2020-01-01'))
self.assertFalse(maybe_trade_day('2020/10/06'))
self.assertRaises(TypeError, maybe_trade_day, 'aaa')
def test_prev_trade_day(self):
"""test the function prev_trade_day()
"""
date_trade = '20210401'
date_holiday = '20210102'
prev_holiday = pd.to_datetime(date_holiday) - pd.Timedelta(2, 'd')
date_weekend = '20210424'
prev_weekend = pd.to_datetime(date_weekend) - pd.Timedelta(1, 'd')
date_seems_trade_day = '20210217'
prev_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertEqual(pd.to_datetime(prev_trade_day(date_trade)),
pd.to_datetime(date_trade))
self.assertEqual(pd.to_datetime(prev_trade_day(date_holiday)),
pd.to_datetime(prev_holiday))
self.assertEqual(pd.to_datetime(prev_trade_day(date_weekend)),
pd.to_datetime(prev_weekend))
self.assertEqual(pd.to_datetime(prev_trade_day(date_seems_trade_day)),
| pd.to_datetime(prev_seems_trade_day) | pandas.to_datetime |
from functools import partial
from itertools import product
from string import ascii_letters
import numpy as np
from pandas import (
Categorical,
DataFrame,
MultiIndex,
Series,
Timestamp,
date_range,
period_range,
)
from .pandas_vb_common import tm
method_blocklist = {
"object": {
"median",
"prod",
"sem",
"cumsum",
"sum",
"cummin",
"mean",
"max",
"skew",
"cumprod",
"cummax",
"pct_change",
"min",
"var",
"mad",
"describe",
"std",
"quantile",
},
"datetime": {
"median",
"prod",
"sem",
"cumsum",
"sum",
"mean",
"skew",
"cumprod",
"cummax",
"pct_change",
"var",
"mad",
"describe",
"std",
},
}
class ApplyDictReturn:
def setup(self):
self.labels = np.arange(1000).repeat(10)
self.data = Series(np.random.randn(len(self.labels)))
def time_groupby_apply_dict_return(self):
self.data.groupby(self.labels).apply(
lambda x: {"first": x.values[0], "last": x.values[-1]}
)
class Apply:
param_names = ["factor"]
params = [4, 5]
def setup(self, factor):
N = 10 ** factor
# two cases:
# - small groups: small data (N**4) + many labels (2000) -> average group
# size of 5 (-> larger overhead of slicing method)
# - larger groups: larger data (N**5) + fewer labels (20) -> average group
# size of 5000
labels = np.random.randint(0, 2000 if factor == 4 else 20, size=N)
labels2 = np.random.randint(0, 3, size=N)
df = DataFrame(
{
"key": labels,
"key2": labels2,
"value1": np.random.randn(N),
"value2": ["foo", "bar", "baz", "qux"] * (N // 4),
}
)
self.df = df
def time_scalar_function_multi_col(self, factor):
self.df.groupby(["key", "key2"]).apply(lambda x: 1)
def time_scalar_function_single_col(self, factor):
self.df.groupby("key").apply(lambda x: 1)
@staticmethod
def df_copy_function(g):
# ensure that the group name is available (see GH #15062)
g.name
return g.copy()
def time_copy_function_multi_col(self, factor):
self.df.groupby(["key", "key2"]).apply(self.df_copy_function)
def time_copy_overhead_single_col(self, factor):
self.df.groupby("key").apply(self.df_copy_function)
class Groups:
param_names = ["key"]
params = ["int64_small", "int64_large", "object_small", "object_large"]
def setup_cache(self):
size = 10 ** 6
data = {
"int64_small": Series(np.random.randint(0, 100, size=size)),
"int64_large": Series(np.random.randint(0, 10000, size=size)),
"object_small": Series(
tm.makeStringIndex(100).take(np.random.randint(0, 100, size=size))
),
"object_large": Series(
tm.makeStringIndex(10000).take(np.random.randint(0, 10000, size=size))
),
}
return data
def setup(self, data, key):
self.ser = data[key]
def time_series_groups(self, data, key):
self.ser.groupby(self.ser).groups
def time_series_indices(self, data, key):
self.ser.groupby(self.ser).indices
class GroupManyLabels:
params = [1, 1000]
param_names = ["ncols"]
def setup(self, ncols):
N = 1000
data = np.random.randn(N, ncols)
self.labels = np.random.randint(0, 100, size=N)
self.df = DataFrame(data)
def time_sum(self, ncols):
self.df.groupby(self.labels).sum()
class Nth:
param_names = ["dtype"]
params = ["float32", "float64", "datetime", "object"]
def setup(self, dtype):
N = 10 ** 5
# with datetimes (GH7555)
if dtype == "datetime":
values = date_range("1/1/2011", periods=N, freq="s")
elif dtype == "object":
values = ["foo"] * N
else:
values = np.arange(N).astype(dtype)
key = np.arange(N)
self.df = DataFrame({"key": key, "values": values})
self.df.iloc[1, 1] = np.nan # insert missing data
def time_frame_nth_any(self, dtype):
self.df.groupby("key").nth(0, dropna="any")
def time_groupby_nth_all(self, dtype):
self.df.groupby("key").nth(0, dropna="all")
def time_frame_nth(self, dtype):
self.df.groupby("key").nth(0)
def time_series_nth_any(self, dtype):
self.df["values"].groupby(self.df["key"]).nth(0, dropna="any")
def time_series_nth_all(self, dtype):
self.df["values"].groupby(self.df["key"]).nth(0, dropna="all")
def time_series_nth(self, dtype):
self.df["values"].groupby(self.df["key"]).nth(0)
class DateAttributes:
def setup(self):
rng = date_range("1/1/2000", "12/31/2005", freq="H")
self.year, self.month, self.day = rng.year, rng.month, rng.day
self.ts = Series(np.random.randn(len(rng)), index=rng)
def time_len_groupby_object(self):
len(self.ts.groupby([self.year, self.month, self.day]))
class Int64:
def setup(self):
arr = np.random.randint(-1 << 12, 1 << 12, (1 << 17, 5))
i = np.random.choice(len(arr), len(arr) * 5)
arr = np.vstack((arr, arr[i]))
i = np.random.permutation(len(arr))
arr = arr[i]
self.cols = list("abcde")
self.df = | DataFrame(arr, columns=self.cols) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pytest
import os
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import numpy.testing as npt
from numpy.linalg import norm, lstsq
from numpy.random import randn
from flaky import flaky
from lifelines import CoxPHFitter, WeibullAFTFitter, KaplanMeierFitter, ExponentialFitter
from lifelines.datasets import load_regression_dataset, load_larynx, load_waltons, load_rossi
from lifelines import utils
from lifelines import exceptions
from lifelines.utils.sklearn_adapter import sklearn_adapter
from lifelines.utils.safe_exp import safe_exp
def test_format_p_values():
assert utils.format_p_value(2)(0.004) == "<0.005"
assert utils.format_p_value(3)(0.004) == "0.004"
assert utils.format_p_value(3)(0.000) == "<0.0005"
assert utils.format_p_value(3)(0.005) == "0.005"
assert utils.format_p_value(3)(0.2111) == "0.211"
assert utils.format_p_value(3)(0.2119) == "0.212"
def test_ridge_regression_with_penalty_is_less_than_without_penalty():
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1=2.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])
assert norm(utils.ridge_regression(X, Y, c1=1.0, c2=1.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])
def test_ridge_regression_with_extreme_c1_penalty_equals_close_to_zero_vector():
c1 = 10e8
c2 = 0.0
offset = np.ones(2)
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0]) < 10e-4
def test_ridge_regression_with_extreme_c2_penalty_equals_close_to_offset():
c1 = 0.0
c2 = 10e8
offset = np.ones(2)
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0] - offset) < 10e-4
def test_lstsq_returns_similar_values_to_ridge_regression():
X = randn(2, 2)
Y = randn(2)
expected = lstsq(X, Y, rcond=None)[0]
assert norm(utils.ridge_regression(X, Y)[0] - expected) < 10e-4
def test_lstsq_returns_correct_values():
X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
beta, V = utils.ridge_regression(X, y)
expected_beta = [-0.98684211, -0.07894737]
expected_v = [
[-0.03289474, -0.49342105, 0.06578947, 0.03289474, 0.49342105],
[-0.30263158, 0.46052632, -0.39473684, 0.30263158, -0.46052632],
]
assert norm(beta - expected_beta) < 10e-4
for V_row, e_v_row in zip(V, expected_v):
assert norm(V_row - e_v_row) < 1e-4
def test_unnormalize():
df = load_larynx()
m = df.mean(0)
s = df.std(0)
ndf = utils.normalize(df)
npt.assert_almost_equal(df.values, utils.unnormalize(ndf, m, s).values)
def test_normalize():
df = load_larynx()
n, d = df.shape
npt.assert_almost_equal(utils.normalize(df).mean(0).values, np.zeros(d))
npt.assert_almost_equal(utils.normalize(df).std(0).values, np.ones(d))
def test_median():
sv = pd.DataFrame(1 - np.linspace(0, 1, 1000))
assert utils.median_survival_times(sv) == 500
def test_median_accepts_series():
sv = pd.Series(1 - np.linspace(0, 1, 1000))
assert utils.median_survival_times(sv) == 500
def test_qth_survival_times_with_varying_datatype_inputs():
sf_list = [1.0, 0.75, 0.5, 0.25, 0.0]
sf_array = np.array([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_series_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_series_no_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0])
q = 0.5
assert utils.qth_survival_times(q, sf_list) == 2
assert utils.qth_survival_times(q, sf_array) == 2
assert utils.qth_survival_times(q, sf_df_no_index) == 2
assert utils.qth_survival_times(q, sf_df_index) == 30
assert utils.qth_survival_times(q, sf_series_index) == 30
assert utils.qth_survival_times(q, sf_series_no_index) == 2
def test_qth_survival_times_multi_dim_input():
sf = np.linspace(1, 0, 50)
sf_multi_df = pd.DataFrame({"sf": sf, "sf**2": sf ** 2})
medians = utils.qth_survival_times(0.5, sf_multi_df)
assert medians["sf"].loc[0.5] == 25
assert medians["sf**2"].loc[0.5] == 15
def test_qth_survival_time_returns_inf():
sf = pd.Series([1.0, 0.7, 0.6])
assert utils.qth_survival_time(0.5, sf) == np.inf
def test_qth_survival_time_accepts_a_model():
kmf = KaplanMeierFitter().fit([1.0, 0.7, 0.6])
assert utils.qth_survival_time(0.8, kmf) > 0
def test_qth_survival_time_with_dataframe():
sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_df_too_many_columns = pd.DataFrame([[1, 2], [3, 4]])
assert utils.qth_survival_time(0.5, sf_df_no_index) == 2
assert utils.qth_survival_time(0.5, sf_df_index) == 30
with pytest.raises(ValueError):
utils.qth_survival_time(0.5, sf_df_too_many_columns)
def test_qth_survival_times_with_multivariate_q():
sf = np.linspace(1, 0, 50)
sf_multi_df = pd.DataFrame({"sf": sf, "sf**2": sf ** 2})
assert_frame_equal(
utils.qth_survival_times([0.2, 0.5], sf_multi_df),
pd.DataFrame([[40, 28], [25, 15]], index=[0.2, 0.5], columns=["sf", "sf**2"]),
)
assert_frame_equal(
utils.qth_survival_times([0.2, 0.5], sf_multi_df["sf"]), pd.DataFrame([40, 25], index=[0.2, 0.5], columns=["sf"])
)
assert_frame_equal(utils.qth_survival_times(0.5, sf_multi_df), pd.DataFrame([[25, 15]], index=[0.5], columns=["sf", "sf**2"]))
assert utils.qth_survival_times(0.5, sf_multi_df["sf"]) == 25
def test_qth_survival_times_with_duplicate_q_returns_valid_index_and_shape():
sf = pd.DataFrame(np.linspace(1, 0, 50))
q = pd.Series([0.5, 0.5, 0.2, 0.0, 0.0])
actual = utils.qth_survival_times(q, sf)
assert actual.shape[0] == len(q)
assert actual.index[0] == actual.index[1]
assert_series_equal(actual.iloc[0], actual.iloc[1])
npt.assert_almost_equal(actual.index.values, q.values)
def test_datetimes_to_durations_with_different_frequencies():
# days
start_date = ["2013-10-10 0:00:00", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "2013-10-10 0:00:00", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date)
npt.assert_almost_equal(T, np.array([3, 1, 5 + 365]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
# years
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "2013-10-10", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y")
npt.assert_almost_equal(T, np.array([0, 0, 1]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
# hours
start_date = ["2013-10-10 17:00:00", "2013-10-09 0:00:00", "2013-10-10 23:00:00"]
end_date = ["2013-10-10 18:00:00", "2013-10-10 0:00:00", "2013-10-11 2:00:00"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="h")
npt.assert_almost_equal(T, np.array([1, 24, 3]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
def test_datetimes_to_durations_will_handle_dates_above_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", "2013-10-12", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date="2013-10-12")
npt.assert_almost_equal(C, np.array([1, 1, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 2]))
def test_datetimes_to_durations_will_handle_dates_above_multi_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", None, None]
last_observation = ["2013-10-10", "2013-10-12", "2013-10-14"]
T, E = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date=last_observation)
npt.assert_almost_equal(E, np.array([1, 0, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 4]))
def test_datetimes_to_durations_will_handle_dates_above_multi_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", None, "2013-10-20"]
last_observation = ["2013-10-10", "2013-10-12", "2013-10-14"]
T, E = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date=last_observation)
npt.assert_almost_equal(E, np.array([1, 0, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 4]))
def test_datetimes_to_durations_censor():
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", None, ""]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y")
npt.assert_almost_equal(C, np.array([1, 0, 0], dtype=bool))
def test_datetimes_to_durations_custom_censor():
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "NaT", ""]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y", na_values=["NaT", ""])
npt.assert_almost_equal(C, np.array([1, 0, 0], dtype=bool))
def test_survival_events_from_table_no_ties():
T, C = np.array([1, 2, 3, 4, 4, 5]), np.array([1, 0, 1, 1, 0, 1])
d = utils.survival_table_from_events(T, C)
T_, C_, W_ = utils.survival_events_from_table(d[["censored", "observed"]])
npt.assert_array_equal(T, T_)
npt.assert_array_equal(C, C_)
npt.assert_array_equal(W_, np.ones_like(T))
def test_survival_events_from_table_with_ties():
T, C = np.array([1, 2, 3, 4, 4, 5]), np.array([1, 0, 1, 1, 1, 1])
d = utils.survival_table_from_events(T, C)
T_, C_, W_ = utils.survival_events_from_table(d[["censored", "observed"]])
npt.assert_array_equal([1, 2, 3, 4, 5], T_)
npt.assert_array_equal([1, 0, 1, 1, 1], C_)
npt.assert_array_equal([1, 1, 1, 2, 1], W_)
def test_survival_table_from_events_with_non_trivial_censorship_column():
T = np.random.exponential(5, size=50)
malformed_C = np.random.binomial(2, p=0.8) # set to 2 on purpose!
proper_C = malformed_C > 0 # (proper "boolean" array)
table1 = utils.survival_table_from_events(T, malformed_C, np.zeros_like(T))
table2 = utils.survival_table_from_events(T, proper_C, np.zeros_like(T))
assert_frame_equal(table1, table2)
def test_group_survival_table_from_events_on_waltons_data():
df = load_waltons()
g, removed, observed, censored = utils.group_survival_table_from_events(df["group"], df["T"], df["E"])
assert len(g) == 2
assert all(removed.columns == ["removed:miR-137", "removed:control"])
assert all(removed.index == observed.index)
assert all(removed.index == censored.index)
def test_group_survival_table_with_weights():
df = load_waltons()
dfw = df.groupby(["T", "E", "group"]).size().reset_index().rename(columns={0: "weights"})
gw, removedw, observedw, censoredw = utils.group_survival_table_from_events(
dfw["group"], dfw["T"], dfw["E"], weights=dfw["weights"]
)
assert len(gw) == 2
assert all(removedw.columns == ["removed:miR-137", "removed:control"])
assert all(removedw.index == observedw.index)
assert all(removedw.index == censoredw.index)
g, removed, observed, censored = utils.group_survival_table_from_events(df["group"], df["T"], df["E"])
assert_frame_equal(removedw, removed)
| assert_frame_equal(observedw, observed) | pandas.testing.assert_frame_equal |
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
from pandas.api import types as ptypes
import cudf
from cudf.api import types as types
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, True),
(pd.CategoricalDtype, True),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), True),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, True),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), True),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), True),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
# TODO: Currently creating an empty Series of list type ignores the
# provided type and instead makes a float64 Series.
(cudf.Series([[1, 2], [3, 4, 5]]), False),
# TODO: Currently creating an empty Series of struct type fails because
# it uses a numpy utility that doesn't understand StructDtype.
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_categorical_dtype(obj, expect):
assert types.is_categorical_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, True),
(int, True),
(float, True),
(complex, True),
(str, False),
(object, False),
# NumPy types.
(np.bool_, True),
(np.int_, True),
(np.float64, True),
(np.complex128, True),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), True),
(np.int_(), True),
(np.float64(), True),
(np.complex128(), True),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), True),
(np.dtype("int"), True),
(np.dtype("float"), True),
(np.dtype("complex"), True),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), True),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), True),
(np.array([], dtype=np.complex128), True),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), True),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), True),
(pd.Series(dtype="complex"), True),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, True),
(cudf.Decimal64Dtype, True),
(cudf.Decimal32Dtype, True),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), True),
(cudf.Decimal64Dtype(5, 2), True),
(cudf.Decimal32Dtype(5, 2), True),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), True),
(cudf.Series(dtype="int"), True),
(cudf.Series(dtype="float"), True),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), True),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_numeric_dtype(obj, expect):
assert types.is_numeric_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, True),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, True),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), True),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), True),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), True),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_integer_dtype(obj, expect):
assert types.is_integer_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), True),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), True),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_integer(obj, expect):
assert types.is_integer(obj) == expect
# TODO: Temporarily ignoring all cases of "object" until we decide what to do.
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, True),
# (object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, True),
(np.unicode_, True),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), True),
(np.unicode_(), True),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), True),
(np.dtype("unicode"), True),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
# (np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), True),
(np.array([], dtype=np.unicode_), True),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
# (np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), True),
(pd.Series(dtype="unicode"), True),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
# (pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), True),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_string_dtype(obj, expect):
assert types.is_string_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, True),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), True),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), True),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), True),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), True),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), True),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_datetime_dtype(obj, expect):
assert types.is_datetime_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, True),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), True),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), True),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_list_dtype(obj, expect):
assert types.is_list_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, True),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
# (cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), True),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
# (cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), True),
# (cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_struct_dtype(obj, expect):
# TODO: All inputs of interval types are currently disabled due to
# inconsistent behavior of is_struct_dtype for interval types that will be
# fixed as part of the array refactor.
assert types.is_struct_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, True),
(cudf.Decimal64Dtype, True),
(cudf.Decimal32Dtype, True),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), True),
(cudf.Decimal64Dtype(5, 2), True),
(cudf.Decimal32Dtype(5, 2), True),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), True),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_decimal_dtype(obj, expect):
assert types.is_decimal_dtype(obj) == expect
@pytest.mark.parametrize(
"obj",
(
# Base Python objects.
bool(),
int(),
float(),
complex(),
str(),
"",
r"",
object(),
# Base Python types.
bool,
int,
float,
complex,
str,
object,
# NumPy types.
np.bool_,
np.int_,
np.float64,
np.complex128,
np.str_,
np.unicode_,
np.datetime64,
np.timedelta64,
# NumPy scalars.
np.bool_(),
np.int_(),
np.float64(),
np.complex128(),
np.str_(),
np.unicode_(),
np.datetime64(),
np.timedelta64(),
# NumPy dtype objects.
np.dtype("bool"),
np.dtype("int"),
np.dtype("float"),
np.dtype("complex"),
np.dtype("str"),
np.dtype("unicode"),
np.dtype("datetime64"),
np.dtype("timedelta64"),
np.dtype("object"),
# NumPy arrays.
np.array([], dtype=np.bool_),
np.array([], dtype=np.int_),
np.array([], dtype=np.float64),
np.array([], dtype=np.complex128),
np.array([], dtype=np.str_),
np.array([], dtype=np.unicode_),
np.array([], dtype=np.datetime64),
np.array([], dtype=np.timedelta64),
np.array([], dtype=object),
# Pandas dtypes.
# TODO: pandas does not consider these to be categoricals.
# pd.core.dtypes.dtypes.CategoricalDtypeType,
# pd.CategoricalDtype,
# Pandas objects.
pd.Series(dtype="bool"),
pd.Series(dtype="int"),
pd.Series(dtype="float"),
pd.Series(dtype="complex"),
pd.Series(dtype="str"),
pd.Series(dtype="unicode"),
pd.Series(dtype="datetime64[s]"),
pd.Series(dtype="timedelta64[s]"),
pd.Series(dtype="category"),
| pd.Series(dtype="object") | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 29 19:14:15 2020
@author: hp 3006tx
"""
import pandas as pd
import dash
from dash.dependencies import Input , State, Output
import dash_core_components as dcc
import dash_html_components as html
import webbrowser
import plotly.graph_objects as go
import plotly.express as px
# region country state city day month attacktype
app = dash.Dash()
app.title = 'Terrorism Data Analysis'
def load_data():
# world data
globalterror = 'global_terror.csv'
global df
df = | pd.read_csv(globalterror) | pandas.read_csv |
####################
# Import Libraries
####################
import os
import sys
from PIL import Image
import cv2
import numpy as np
import pandas as pd
import pytorch_lightning as pl
from pytorch_lightning.metrics import Accuracy
from pytorch_lightning import loggers
from pytorch_lightning import seed_everything
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import StratifiedKFold
from sklearn import model_selection
import albumentations as A
import timm
from omegaconf import OmegaConf
import glob
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
from nnAudio.Spectrogram import CQT1992v2, CQT2010v2
from scipy import signal
####################
# Utils
####################
def get_score(y_true, y_pred):
score = roc_auc_score(y_true, y_pred)
return score
def load_pytorch_model(ckpt_name, model, ignore_suffix='model'):
state_dict = torch.load(ckpt_name, map_location='cpu')["state_dict"]
new_state_dict = {}
for k, v in state_dict.items():
name = k
if name.startswith(str(ignore_suffix)+"."):
name = name.replace(str(ignore_suffix)+".", "", 1) # remove `model.`
new_state_dict[name] = v
model.load_state_dict(new_state_dict, strict=False)
return model
class CWT(nn.Module):
def __init__(
self,
wavelet_width,
fs,
lower_freq,
upper_freq,
n_scales,
size_factor=1.0,
border_crop=0,
stride=1
):
super().__init__()
self.initial_wavelet_width = wavelet_width
self.fs = fs
self.lower_freq = lower_freq
self.upper_freq = upper_freq
self.size_factor = size_factor
self.n_scales = n_scales
self.wavelet_width = wavelet_width
self.border_crop = border_crop
self.stride = stride
wavelet_bank_real, wavelet_bank_imag = self._build_wavelet_kernel()
self.wavelet_bank_real = nn.Parameter(wavelet_bank_real, requires_grad=False)
self.wavelet_bank_imag = nn.Parameter(wavelet_bank_imag, requires_grad=False)
self.kernel_size = self.wavelet_bank_real.size(3)
def _build_wavelet_kernel(self):
s_0 = 1 / self.upper_freq
s_n = 1 / self.lower_freq
base = np.power(s_n / s_0, 1 / (self.n_scales - 1))
scales = s_0 * np.power(base, np.arange(self.n_scales))
frequencies = 1 / scales
truncation_size = scales.max() * np.sqrt(4.5 * self.initial_wavelet_width) * self.fs
one_side = int(self.size_factor * truncation_size)
kernel_size = 2 * one_side + 1
k_array = np.arange(kernel_size, dtype=np.float32) - one_side
t_array = k_array / self.fs
wavelet_bank_real = []
wavelet_bank_imag = []
for scale in scales:
norm_constant = np.sqrt(np.pi * self.wavelet_width) * scale * self.fs / 2.0
scaled_t = t_array / scale
exp_term = np.exp(-(scaled_t ** 2) / self.wavelet_width)
kernel_base = exp_term / norm_constant
kernel_real = kernel_base * np.cos(2 * np.pi * scaled_t)
kernel_imag = kernel_base * np.sin(2 * np.pi * scaled_t)
wavelet_bank_real.append(kernel_real)
wavelet_bank_imag.append(kernel_imag)
wavelet_bank_real = np.stack(wavelet_bank_real, axis=0)
wavelet_bank_imag = np.stack(wavelet_bank_imag, axis=0)
wavelet_bank_real = torch.from_numpy(wavelet_bank_real).unsqueeze(1).unsqueeze(2)
wavelet_bank_imag = torch.from_numpy(wavelet_bank_imag).unsqueeze(1).unsqueeze(2)
return wavelet_bank_real, wavelet_bank_imag
def forward(self, x):
x = x.unsqueeze(dim=0)
border_crop = self.border_crop // self.stride
start = border_crop
end = (-border_crop) if border_crop > 0 else None
# x [n_batch, n_channels, time_len]
out_reals = []
out_imags = []
in_width = x.size(2)
out_width = int(np.ceil(in_width / self.stride))
pad_along_width = np.max((out_width - 1) * self.stride + self.kernel_size - in_width, 0)
padding = pad_along_width // 2 + 1
for i in range(3):
# [n_batch, 1, 1, time_len]
x_ = x[:, i, :].unsqueeze(1).unsqueeze(2)
out_real = nn.functional.conv2d(x_, self.wavelet_bank_real, stride=(1, self.stride), padding=(0, padding))
out_imag = nn.functional.conv2d(x_, self.wavelet_bank_imag, stride=(1, self.stride), padding=(0, padding))
out_real = out_real.transpose(2, 1)
out_imag = out_imag.transpose(2, 1)
out_reals.append(out_real)
out_imags.append(out_imag)
out_real = torch.cat(out_reals, axis=1)
out_imag = torch.cat(out_imags, axis=1)
out_real = out_real[:, :, :, start:end]
out_imag = out_imag[:, :, :, start:end]
scalograms = torch.sqrt(out_real ** 2 + out_imag ** 2)
return scalograms[0]
####################
# Config
####################
conf_dict = {'batch_size': 8,#32,
'epoch': 30,
'height': 512,#640,
'width': 512,
'model_name': 'efficientnet_b0',
'lr': 0.001,
'drop_rate': 0.0,
'drop_path_rate': 0.0,
'data_dir': '../input/seti-breakthrough-listen',
'model_path': None,
'output_dir': './',
'seed': 2021,
'snap': 1}
conf_base = OmegaConf.create(conf_dict)
####################
# Dataset
####################
class G2NetDataset(Dataset):
def __init__(self, df, transform=None, conf=None, train=True):
self.df = df.reset_index(drop=True)
self.dir_names = df['dir'].values
self.labels = df['target'].values
self.wave_transform = [
CQT1992v2(sr=2048, fmin=20, fmax=1024, hop_length=8, bins_per_octave=8, window='flattop'),
CQT1992v2(sr=2048, fmin=20, fmax=1024, hop_length=8, bins_per_octave=8, window='blackmanharris'),
CQT1992v2(sr=2048, fmin=20, fmax=1024, hop_length=8, bins_per_octave=8, window='nuttall'),
CWT(wavelet_width=8,fs=2048,lower_freq=20,upper_freq=1024,n_scales=384,stride=8)]
#self.wave_transform = CQT1992v2(sr=2048, fmin=10, fmax=1024, hop_length=8, bins_per_octave=8, window='flattop')
#self.wave_transform = CQT1992v2(sr=2048, fmin=20, fmax=1024, hop_length=1, bins_per_octave=14, window='flattop')
#self.wave_transform = CQT2010v2(sr=2048, fmin=10, fmax=1024, hop_length=32, n_bins=32, bins_per_octave=8, window='flattop')
self.stat = [
[0.013205823003608798,0.037445450696502146],
[0.009606230606511236,0.02489221471650526], # 10000 sample
[0.009523397709568962,0.024628402379527688],
[0.0010164694150735158,0.0015815201992169022]] # 10000 sample
# hop lengthは変えてみたほうが良いかも
self.transform = transform
self.conf = conf
self.train = train
def __len__(self):
return len(self.df)
def apply_qtransform(self, waves, transform):
#print(waves.shape)
#waves = np.hstack(waves)
#print(np.max(np.abs(waves), axis=1))
#waves = waves / np.max(np.abs(waves), axis=1, keepdims=True)
#waves = waves / np.max(waves)
waves = waves / 4.6152116213830774e-20
waves = torch.from_numpy(waves).float()
image = transform(waves)
return image
def __getitem__(self, idx):
img_id = self.df.loc[idx, 'id']
file_path = os.path.join(self.dir_names[idx],"{}/{}/{}/{}.npy".format(img_id[0], img_id[1], img_id[2], img_id))
waves = np.load(file_path)
label = torch.tensor([self.labels[idx]]).float()
image1 = self.apply_qtransform(waves, self.wave_transform[0])
image1 = image1.squeeze().numpy().transpose(1,2,0)
image1 = cv2.vconcat([image1[:,:,0],image1[:,:,1],image1[:,:,2]])
image1 = (image1-self.stat[0][0])/self.stat[0][1]
image1 = cv2.resize(image1, (self.conf.width, self.conf.height), interpolation=cv2.INTER_CUBIC)
image2 = self.apply_qtransform(waves, self.wave_transform[1])
image2 = image2.squeeze().numpy().transpose(1,2,0)
image2 = cv2.vconcat([image2[:,:,0],image2[:,:,1],image2[:,:,2]])
image2 = (image2-self.stat[1][0])/self.stat[1][1]
image2 = cv2.resize(image2, (self.conf.width, self.conf.height), interpolation=cv2.INTER_CUBIC)
image3 = self.apply_qtransform(waves, self.wave_transform[2])
image3 = image3.squeeze().numpy().transpose(1,2,0)
image3 = cv2.vconcat([image3[:,:,0],image3[:,:,1],image3[:,:,2]])
image3 = (image3-self.stat[2][0])/self.stat[2][1]
image3 = cv2.resize(image3, (self.conf.width, self.conf.height), interpolation=cv2.INTER_CUBIC)
image4 = self.apply_qtransform(waves, self.wave_transform[3])
image4 = image4.squeeze().numpy().transpose(1,2,0)
image4 = cv2.vconcat([image4[:,:,0],image4[:,:,1],image4[:,:,2]])
image4 = (image4-self.stat[3][0])/self.stat[3][1]
image4 = cv2.resize(image4, (self.conf.width, self.conf.height), interpolation=cv2.INTER_CUBIC)
#if self.transform is not None:
# image = self.transform(image=image)['image']
image1 = torch.from_numpy(image1).unsqueeze(dim=0)
image2 = torch.from_numpy(image2).unsqueeze(dim=0)
image3 = torch.from_numpy(image3).unsqueeze(dim=0)
image4 = torch.from_numpy(image4).unsqueeze(dim=0)
return image1, image2, image3, image4, label
####################
# Data Module
####################
class SETIDataModule(pl.LightningDataModule):
def __init__(self, conf):
super().__init__()
self.conf = conf
# OPTIONAL, called only on 1 GPU/machine(for download or tokenize)
def prepare_data(self):
pass
# OPTIONAL, called for every GPU/machine
def setup(self, stage=None, fold=None):
if stage == 'test':
#test_df = pd.read_csv(os.path.join(self.conf.data_dir, "sample_submission.csv"))
#test_df['dir'] = os.path.join(self.conf.data_dir, "test")
#self.test_dataset = G2NetDataset(test_df, transform=None,conf=self.conf, train=False)
df = pd.read_csv(os.path.join(self.conf.data_dir, "training_labels.csv"))
df['dir'] = os.path.join(self.conf.data_dir, "train")
# cv split
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=self.conf.seed)
for n, (train_index, val_index) in enumerate(skf.split(df, df['target'])):
df.loc[val_index, 'fold'] = int(n)
df['fold'] = df['fold'].astype(int)
train_df = df[df['fold'] != fold]
self.valid_df = df[df['fold'] == fold]
self.valid_dataset = G2NetDataset(self.valid_df, transform=None,conf=self.conf, train=False)
# ====================================================
# Inference function
# ====================================================
def inference(models, test_loader):
tk0 = tqdm(enumerate(test_loader), total=len(test_loader))
raw_probs = [[] for i in range(len(models))]
probs = []
probs_flattop = []
probs_blackmanharris = []
probs_nuttall = []
probs_cwt = []
with torch.no_grad():
for i, (images) in tk0:
images1 = images[0].cuda()
images2 = images[1].cuda()
images3 = images[2].cuda()
images4 = images[3].cuda()
avg_preds = []
flattop = []
blackmanharris = []
nuttall = []
cwt = []
for mid, model in enumerate(models):
y_preds_1 = model(images1)
y_preds_2 = model(images2)
y_preds_3 = model(images3)
y_preds_4 = model(images4)
y_preds = (y_preds_1 + y_preds_2 + y_preds_3 + y_preds_4)/4
avg_preds.append(y_preds.sigmoid().to('cpu').numpy())
flattop.append(y_preds_1.sigmoid().to('cpu').numpy())
blackmanharris.append(y_preds_2.sigmoid().to('cpu').numpy())
nuttall.append(y_preds_3.sigmoid().to('cpu').numpy())
cwt.append(y_preds_4.sigmoid().to('cpu').numpy())
#raw_probs[mid].append(y_preds.sigmoid().to('cpu').numpy())
avg_preds = np.mean(avg_preds, axis=0)
flattop = np.mean(flattop, axis=0)
blackmanharris = np.mean(blackmanharris, axis=0)
nuttall = np.mean(nuttall, axis=0)
cwt = np.mean(cwt, axis=0)
probs.append(avg_preds)
probs_flattop.append(flattop)
probs_blackmanharris.append(blackmanharris)
probs_nuttall.append(nuttall)
probs_cwt.append(cwt)
#for mid in range(len(models)):
# raw_probs[mid] = np.concatenate(raw_probs[mid])
probs = np.concatenate(probs)
probs_flattop = np.concatenate(probs_flattop)
probs_blackmanharris = np.concatenate(probs_blackmanharris)
probs_nuttall = np.concatenate(probs_nuttall)
probs_cwt = np.concatenate(probs_cwt)
return probs, probs_flattop, probs_blackmanharris, probs_nuttall, probs_cwt#, raw_probs
####################
# Train
####################
def main():
conf_cli = OmegaConf.from_cli()
conf = OmegaConf.merge(conf_base, conf_cli)
print(OmegaConf.to_yaml(conf))
seed_everything(2021)
# get model path
model_path = []
for i in range(5):
target_model = glob.glob(os.path.join(conf.model_dir, f'fold{i}/ckpt/*epoch*.ckpt'))
scores = [float(os.path.splitext(os.path.basename(i))[0].split('=')[-1]) for i in target_model]
model_path.append(target_model[scores.index(max(scores))])
models = []
for ckpt in model_path:
m = timm.create_model(model_name=conf.model_name, num_classes=1, pretrained=False, in_chans=1)
m = load_pytorch_model(ckpt, m, ignore_suffix='model')
m.cuda()
m.eval()
models.append(m)
# make oof
oof_df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@author: hkaneko
"""
# サンプルプログラムで使われる関数群
import matplotlib.figure as figure
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.spatial.distance import cdist
from sklearn import metrics
## 目的変数の実測値と推定値との間で、散布図を描いたり、r2, RMSE, MAE を計算したりする関数
# def performance_check_in_regression(y, estimated_y):
# plt.rcParams['font.size'] = 18 # 横軸や縦軸の名前の文字などのフォントのサイズ
# plt.figure(figsize=figure.figaspect(1)) # 図の形を正方形に
# plt.scatter(y, estimated_y.iloc[:, 0], c='blue') # 実測値 vs. 推定値プロット
# y_max = max(y.max(), estimated_y.iloc[:, 0].max()) # 実測値の最大値と、推定値の最大値の中で、より大きい値を取得
# y_min = min(y.min(), estimated_y.iloc[:, 0].min()) # 実測値の最小値と、推定値の最小値の中で、より小さい値を取得
# plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],
# [y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-') # 取得した最小値-5%から最大値+5%まで、対角線を作成
# plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # y 軸の範囲の設定
# plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # x 軸の範囲の設定
# plt.xlabel('actual y') # x 軸の名前
# plt.ylabel('estimated y') # y 軸の名前
# plt.show() # 以上の設定で描画
#
# r2 = metrics.r2_score(y, estimated_y) # r2
# rmse = metrics.mean_squared_error(y, estimated_y) ** 0.5 # RMSE
# mae = metrics.mean_absolute_error(y, estimated_y) # MAE
# return (r2, rmse, mae)
def k3n_error(x_1, x_2, k):
"""
k-nearest neighbor normalized error (k3n-error)
When X1 is data of X-variables and X2 is data of Z-variables
(low-dimensional data), this is k3n error in visualization (k3n-Z-error).
When X1 is Z-variables (low-dimensional data) and X2 is data of data of
X-variables, this is k3n error in reconstruction (k3n-X-error).
k3n-error = k3n-Z-error + k3n-X-error
Parameters
----------
x_1: numpy.array or pandas.DataFrame
x_2: numpy.array or pandas.DataFrame
k: int
The numbers of neighbor
Returns
-------
k3n_error : float
k3n-Z-error or k3n-X-error
"""
x_1 = np.array(x_1)
x_2 = np.array(x_2)
x_1_distance = cdist(x_1, x_1)
x_1_sorted_indexes = np.argsort(x_1_distance, axis=1)
x_2_distance = cdist(x_2, x_2)
for i in range(x_2.shape[0]):
_replace_zero_with_the_smallest_positive_values(x_2_distance[i, :])
identity_matrix = np.eye(len(x_1_distance), dtype=bool)
knn_distance_in_x_1 = np.sort(x_2_distance[:, x_1_sorted_indexes[:, 1:k + 1]][identity_matrix])
knn_distance_in_x_2 = np.sort(x_2_distance)[:, 1:k + 1]
sum_k3n_error = (
(knn_distance_in_x_1 - knn_distance_in_x_2) / knn_distance_in_x_2
).sum()
return sum_k3n_error / x_1.shape[0] / k
def _replace_zero_with_the_smallest_positive_values(arr):
"""
Replace zeros in array with the smallest positive values.
Parameters
----------
arr: numpy.array
"""
arr[arr == 0] = np.min(arr[arr != 0])
def plot_and_selection_of_hyperparameter(hyperparameter_values, metrics_values, x_label, y_label):
# ハイパーパラメータ (成分数、k-NN の k など) の値ごとの統計量 (CV 後のr2, 正解率など) をプロット
plt.rcParams['font.size'] = 18
plt.scatter(hyperparameter_values, metrics_values, c='blue')
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.show()
# 統計量 (CV 後のr2, 正解率など) が最大のときのハイパーパラメータ (成分数、k-NN の k など) の値を選択
return hyperparameter_values[metrics_values.index(max(metrics_values))]
def estimation_and_performance_check_in_regression_train_and_test(model, x_train, y_train, x_test, y_test):
# トレーニングデータの推定
estimated_y_train = model.predict(x_train) * y_train.std() + y_train.mean() # y を推定し、スケールをもとに戻します
estimated_y_train = pd.DataFrame(estimated_y_train, index=x_train.index,
columns=['estimated_y']) # Pandas の DataFrame 型に変換。行の名前・列の名前も設定
# トレーニングデータの実測値 vs. 推定値のプロット
plt.rcParams['font.size'] = 18 # 横軸や縦軸の名前の文字などのフォントのサイズ
plt.figure(figsize=figure.figaspect(1)) # 図の形を正方形に
plt.scatter(y_train, estimated_y_train.iloc[:, 0], c='blue') # 実測値 vs. 推定値プロット
y_max = max(y_train.max(), estimated_y_train.iloc[:, 0].max()) # 実測値の最大値と、推定値の最大値の中で、より大きい値を取得
y_min = min(y_train.min(), estimated_y_train.iloc[:, 0].min()) # 実測値の最小値と、推定値の最小値の中で、より小さい値を取得
plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],
[y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-') # 取得した最小値-5%から最大値+5%まで、対角線を作成
plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # y 軸の範囲の設定
plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # x 軸の範囲の設定
plt.xlabel('actual y') # x 軸の名前
plt.ylabel('estimated y') # y 軸の名前
plt.show() # 以上の設定で描画
# トレーニングデータのr2, RMSE, MAE
print('r^2 for training data :', metrics.r2_score(y_train, estimated_y_train))
print('RMSE for training data :', metrics.mean_squared_error(y_train, estimated_y_train) ** 0.5)
print('MAE for training data :', metrics.mean_absolute_error(y_train, estimated_y_train))
# トレーニングデータの結果の保存
y_train_for_save = pd.DataFrame(y_train) # Series のため列名は別途変更
y_train_for_save.columns = ['actual_y']
y_error_train = y_train_for_save.iloc[:, 0] - estimated_y_train.iloc[:, 0]
y_error_train = pd.DataFrame(y_error_train) # Series のため列名は別途変更
y_error_train.columns = ['error_of_y(actual_y-estimated_y)']
results_train = pd.concat([estimated_y_train, y_train_for_save, y_error_train], axis=1)
results_train.to_csv('estimated_y_train.csv') # 推定値を csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください
# テストデータの推定
estimated_y_test = model.predict(x_test) * y_train.std() + y_train.mean() # y を推定し、スケールをもとに戻します
estimated_y_test = pd.DataFrame(estimated_y_test, index=x_test.index,
columns=['estimated_y']) # Pandas の DataFrame 型に変換。行の名前・列の名前も設定
# テストデータの実測値 vs. 推定値のプロット
plt.rcParams['font.size'] = 18 # 横軸や縦軸の名前の文字などのフォントのサイズ
plt.figure(figsize=figure.figaspect(1)) # 図の形を正方形に
plt.scatter(y_test, estimated_y_test.iloc[:, 0], c='blue') # 実測値 vs. 推定値プロット
y_max = max(y_test.max(), estimated_y_test.iloc[:, 0].max()) # 実測値の最大値と、推定値の最大値の中で、より大きい値を取得
y_min = min(y_test.min(), estimated_y_test.iloc[:, 0].min()) # 実測値の最小値と、推定値の最小値の中で、より小さい値を取得
plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],
[y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-') # 取得した最小値-5%から最大値+5%まで、対角線を作成
plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # y 軸の範囲の設定
plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # x 軸の範囲の設定
plt.xlabel('actual y') # x 軸の名前
plt.ylabel('estimated y') # y 軸の名前
plt.show() # 以上の設定で描画
# テストデータのr2, RMSE, MAE
print('r^2 for test data :', metrics.r2_score(y_test, estimated_y_test))
print('RMSE for test data :', metrics.mean_squared_error(y_test, estimated_y_test) ** 0.5)
print('MAE for test data :', metrics.mean_absolute_error(y_test, estimated_y_test))
# テストデータの結果の保存
y_test_for_save = pd.DataFrame(y_test) # Series のため列名は別途変更
y_test_for_save.columns = ['actual_y']
y_error_test = y_test_for_save.iloc[:, 0] - estimated_y_test.iloc[:, 0]
y_error_test = pd.DataFrame(y_error_test) # Series のため列名は別途変更
y_error_test.columns = ['error_of_y(actual_y-estimated_y)']
results_test = pd.concat([estimated_y_test, y_test_for_save, y_error_test], axis=1)
results_test.to_csv('estimated_y_test.csv') # 推定値を csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください
def estimation_and_performance_check_in_classification_train_and_test(model, x_train, y_train, x_test, y_test):
class_types = list(set(y_train)) # クラスの種類。これで混同行列における縦と横のクラスの順番を定めます
class_types.sort(reverse=True) # 並び替え
# トレーニングデータのクラスの推定
estimated_y_train = pd.DataFrame(model.predict(x_train), index=x_train.index, columns=[
'estimated_class']) # トレーニングデータのクラスを推定し、Pandas の DataFrame 型に変換。行の名前・列の名前も設定
# トレーニングデータの混同行列
confusion_matrix_train = pd.DataFrame(
metrics.confusion_matrix(y_train, estimated_y_train, labels=class_types), index=class_types,
columns=class_types) # 混同行列を作成し、Pandas の DataFrame 型に変換。行の名前・列の名前を定めたクラスの名前として設定
confusion_matrix_train.to_csv('confusion_matrix_train.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください
print(confusion_matrix_train) # 混同行列の表示
print('Accuracy for training data :', metrics.accuracy_score(y_train, estimated_y_train), '\n') # 正解率の表示
# トレーニングデータの結果の保存
y_train_for_save = pd.DataFrame(y_train) # Series のため列名は別途変更
y_train_for_save.columns = ['actual_class']
y_error_train = y_train_for_save.iloc[:, 0] == estimated_y_train.iloc[:, 0]
y_error_train = pd.DataFrame(y_error_train) # Series のため列名は別途変更
y_error_train.columns = ['TRUE_if_estimated_class_is_correct']
results_train = | pd.concat([estimated_y_train, y_train_for_save, y_error_train], axis=1) | pandas.concat |
import pandas as pd
import json
import bids
import matplotlib.pyplot as plt
import plotje
# Download data from here: <NAME>. et al. Crowdsourced MRI quality metrics
# and expert quality annotations for training of humans and machines. Sci Data 6, 30 (2019).
# Then run make_distributions.py to summarize the data from this snapshot
summary_path = './data/summary/bold_curated'
dataset = '/home/william/datasets/es-fmri_v2/'
dfd = | pd.read_csv(summary_path + qc + '_summary.csv', index_col=[0]) | pandas.read_csv |
import pandas
import math
import csv
import random
import numpy
from sklearn import linear_model
from sklearn.model_selection import cross_val_score
# 当每支队伍没有elo等级分时,赋予其基础elo等级分
base_elo = 1600
team_elos = {}
team_stats = {}
x = []
y = []
folder = 'data'
# 根据每支队伍的Micellaneous, Opponent, Team统计数据csv文件进行初始化
def initialize_data(miscellaneous_stats, opponent_per_game_stats, team_per_game_stats):
miscellaneous_stats.drop(['Rk', 'Arena'], axis=1, inplace=True)
opponent_per_game_stats.drop(['Rk', 'G', 'MP'], axis=1, inplace=True)
team_per_game_stats.drop(['Rk', 'G', 'MP'], axis=1, inplace=True)
team_stats = | pandas.merge(miscellaneous_stats, opponent_per_game_stats, how='left', on='Team') | pandas.merge |
from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet
from sklearn.linear_model import LassoCV , ElasticNetCV , RidgeCV
from sklearn.pipeline import Pipeline, FeatureUnion
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import r2_score as r2
from sklearn.model_selection import cross_val_score, KFold, ShuffleSplit, GridSearchCV
from sklearn.ensemble import GradientBoostingRegressor as GBR
from xgboost import XGBRegressor as XGBR
from sklearn.tree import DecisionTreeRegressor as DTR
from sklearn.ensemble import RandomForestRegressor as RFR
from sklearn.preprocessing import PolynomialFeatures
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import PLSRegression as PLS
from sklearn.svm import SVR
from sklearn.kernel_ridge import KernelRidge
from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler
from plotly.offline import plot, iplot, init_notebook_mode
import plotly.io as pio
init_notebook_mode(connected=True)
pio.renderers.default = "notebook_connected"
import pandas as pd
import numpy as np
import time
import pickle
import joblib
import constant
original = pd.read_csv(constant.PATH + 'life_expectancy_data_fillna.csv')
class ModelPipeline:
def pipe_model(self):
pipe_linear = Pipeline([
('scl', StandardScaler()),
('poly', PolynomialFeatures()),
('fit', LinearRegression())])
pipe_tree = Pipeline([
('scl', StandardScaler()),
('fit', DTR())])
pipe_lasso = Pipeline([
('scl', StandardScaler()),
('poly', PolynomialFeatures()),
('fit', Lasso(random_state=13))])
pipe_ridge = Pipeline([
('scl', StandardScaler()),
('poly', PolynomialFeatures()),
('fit', Ridge(random_state=13))])
pipe_pca = Pipeline([
('scl', StandardScaler()),
('pca', PCA()),
('fit', LinearRegression())])
pipe_pls = Pipeline([
('scl', StandardScaler()),
('fit', PLS())])
pipe_gbr = Pipeline([
('scl', StandardScaler()),
('fit', GBR())])
pipe_xgbr = Pipeline([
('scl', StandardScaler()),
('fit', XGBR(random_state=13))])
pipe_rfr = Pipeline([
('scl', StandardScaler()),
('fit', RFR(random_state=13))])
pipe_svr = Pipeline([
('scl', StandardScaler()),
('fit', SVR())])
pipe_KR = Pipeline([
('scl', StandardScaler()),
('fit', KernelRidge())])
return [pipe_linear, pipe_tree, pipe_lasso, pipe_ridge, pipe_pca, pipe_pls,
pipe_gbr, pipe_xgbr, pipe_rfr, pipe_svr, pipe_KR]
def grid_params(self, max_depth, split_range):
max_depth = max_depth
min_samples_split_range = split_range
grid_params_linear = [{
"poly__degree": np.arange(1, 3),
"fit__fit_intercept": [True, False]
}]
grid_params_tree = [{
}]
grid_params_lasso = [{
"poly__degree": np.arange(1, 3),
"fit__tol": np.logspace(-5, 0, 10),
"fit__alpha": np.logspace(-5, 1, 10)
}]
grid_params_ridge = [{
"poly__degree": np.arange(1, 3),
"fit__alpha": np.linspace(2, 5, 10),
"fit__solver": ["cholesky", "lsqr", "sparse_cg"],
"fit__tol": np.logspace(-5, 0, 10)
}]
grid_params_pca = [{
"pca__n_components": np.arange(2, 8)
}]
grid_params_pls = [{
"fit__n_components": np.arange(2, 8)
}]
grid_params_gbr = [{
"fit__max_features": ["sqrt", "log2"],
"fit__loss": ["ls", "lad", "huber", "quantile"],
"fit__max_depth": max_depth,
"fit__min_samples_split": min_samples_split_range
}]
grid_params_xgbr = [{
"fit__max_features": ["sqrt", "log2"],
"fit__loss": ["ls", "lad", "huber", "quantile"],
"fit__max_depth": max_depth,
"fit__min_samples_split": min_samples_split_range
}]
grid_params_rfr = [{
}]
grid_params_svr = [{
"fit__kernel": ["rbf", "linear"],
"fit__degree": [2, 3, 5],
"fit__gamma": np.logspace(-5, 1, 10)
}]
grid_params_KR = [{
"fit__kernel": ["rbf", "linear"],
"fit__gamma": np.logspace(-5, 1, 10)
}]
return [grid_params_linear, grid_params_tree, grid_params_lasso, grid_params_ridge, grid_params_pca,
grid_params_pls, grid_params_gbr, grid_params_xgbr, grid_params_rfr, grid_params_svr, grid_params_KR]
def grid_cv(self, pipe, params):
jobs = -1
cv = KFold(n_splits=5, shuffle=True, random_state=13)
grid_dict = constant.GRID_DICT
return jobs, cv, grid_dict
def model_save(self, pipe, params, jobs, cv, grid_dict):
model_rmse, model_r2, model_best_params, model_fit_times, model_res = {}, {}, {}, {}, {}
for idx, (param, model) in enumerate(zip(params, pipe)):
start_time = time.time()
search = GridSearchCV(model, param, scoring="neg_mean_squared_error",
cv=cv, n_jobs=jobs, verbose=-1)
search.fit(X_train, y_train)
y_pred = search.predict(X_test)
model_rmse[grid_dict.get(idx)] = np.sqrt(mse(y_test, y_pred))
model_r2[grid_dict.get(idx)] = r2(y_test, y_pred)
model_best_params[grid_dict.get(idx)] = search.best_params_
model_fit_times[grid_dict.get(idx)] = time.time() - start_time
joblib.dump(search, f'../models/{grid_dict.get(idx)}.pkl')
print("------- all Model Saved -------")
return model_rmse, model_r2, model_best_params, model_fit_times
# Modeling 결과 시각화
def model_res_barchart(self, res_df):
fig, ax = plt.subplots(figsize=(20, 10))
sns.set(font_scale=2)
ax = sns.barplot(y="Model", x="R2", data=res_df)
return plt.show()
# Modeling 결과 데이터프레임 저장
def model_res_df(self, model_r2, model_rmse, model_fit_times):
output = pd.DataFrame([model_r2.keys(), model_r2.values(), model_rmse.values(), model_fit_times.values()],
index=["Model", "R2", "RMSE", "Fit_times"]).T
output.sort_values(["R2"], ascending=False, inplace=True)
output['R2'] = [float(_) for _ in output['R2']]
output['RMSE'] = [float(_) for _ in output['RMSE']]
return output
class Preprocessing:
# 컬럼 추가
def add_feature(self, original, filename=None):
path = constant.PATH + "worldbank_"
original.columns = [cols.upper() for cols in original.columns.tolist()]
if not filename == None:
df = | pd.read_csv(f"{path}{filename}.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sat May 5 00:27:52 2018
@author: sindu
About: Feature Selection on Genome Data"""
import pandas as pd
import numpy as np
import math
import operator
from sklearn import metrics
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors.nearest_centroid import NearestCentroid
from sklearn import linear_model
filename = 'GenomeTrainXY.txt'
data = pd.read_csv('GenomeTrainXY.txt', header=-1).as_matrix()
testDataFile = "GenomeTestX.txt"
testData = pd.read_csv("GenomeTestX.txt", header=-1).as_matrix()
headerinfo = data[0]
classlabelinfo = list(set(headerinfo))
clbl, clblcnt = np.unique(headerinfo, return_counts=True)
classlabelcountinfo = dict(zip(clbl, clblcnt))
n_genomesize = len(headerinfo)
k_groupsize = len(clbl)
df = pd.DataFrame(data)
dftranspose = df.transpose()
fscores = pd.DataFrame()
fscorenumval = None
fscoredenom = None
fscorenumdf = pd.DataFrame()
fscoredenomdf = pd.DataFrame()
#calculate mean of all features for a specific class label
featuremeandata = df.transpose().groupby(dftranspose[:][0]).mean()
featuremeandata = featuremeandata.loc[:, 1:]
centroidData = featuremeandata.transpose().as_matrix()
#calculate variance of all features for a specific class label
featurevardata = df.transpose().groupby(dftranspose[:][0]).var()
featurevardata = featurevardata.loc[:, 1:]
#calculate average of each of the feature
featureavg = df.mean(axis=1)
featureavgdata = pd.DataFrame(featureavg).transpose()
featureavgdata = featureavgdata.loc[:, 1:]
def getfeaturemeandata(classlblval, val):
meanrowdata = pd.DataFrame()
meanrowdatabyvalue = pd.DataFrame()
meannumdata = pd.DataFrame()
for i in range(k_groupsize):
if featuremeandata.index[i] == classlblval:
meanrowdata = pd.DataFrame(featuremeandata.loc[classlblval, :]).transpose()
meannumdata = meanrowdata.values - featureavgdata.values
meanrowdatabyvalue = val*(pd.DataFrame((meannumdata)**2))
return meanrowdatabyvalue
def getfeaturevardata(classlblval, val):
varrowdata = pd.DataFrame()
varrowdatabyvalue = pd.DataFrame()
for i in range(k_groupsize):
if featurevardata.index[i] == classlblval:
varrowdata = pd.DataFrame(featurevardata.loc[classlblval, :]).transpose()
varrowdatabyvalue = pd.DataFrame(((val-1)*varrowdata))
return varrowdatabyvalue
# pick genome observations based on top 100 f-score
def pickGenome():
for key, value in classlabelcountinfo.items():
# constructing fscore numerator and denominator vector
if list(classlabelcountinfo.keys()).index(key) == 0:
fscorenumdf = getfeaturemeandata(key, value)
fscoredenomdf = getfeaturevardata(key, value)
else:
testnumdf = getfeaturemeandata(key, value)
testdenomdf = getfeaturevardata(key, value)
fscorenumdf = pd.concat([fscorenumdf, testnumdf], axis=0, ignore_index=True)
fscoredenomdf = pd.concat([fscoredenomdf, testdenomdf], axis=0, ignore_index=True)
#print(fscorenumdf)
#print(fscoredenomdf)
# calculating all the f-score numerator vector by summing mean data and dividing by k-1
fscorenumdata = ((pd.DataFrame(fscorenumdf.sum(axis=0)).transpose())/(k_groupsize - 1))
#print(fscorenumdata)
# calculating all the f-score denominator vector by summing var data and dividing by n-k
fscorevardata = ((pd.DataFrame(fscoredenomdf.sum(axis=0)).transpose())/(n_genomesize - k_groupsize))
#print(fscorevardata)
fscorenumdata.columns = range(fscorenumdata.shape[1])
fscorevardata.columns = range(fscorevardata.shape[1])
#calculating f-score
fscores = (fscorenumdata / fscorevardata).transpose()
fscores.columns = ['Genome_fscore']
#print(fscores)
fscoreSorted = fscores.sort_values(by='Genome_fscore', ascending=False)
print("========== Sorted fscores below ==============\n")
print(fscoreSorted)
top100fscoreindices = fscoreSorted.head(100).index.tolist()
top100fscoreindices = [(x + 1) for x in top100fscoreindices]
print("\n========== Top 100 fscore indices below ==============\n")
print(top100fscoreindices)
storeTop100GenomeData(top100fscoreindices)
generateTop100TestData(top100fscoreindices)
# from the observations that has top 100 f-score data, create and store the train data from those 100 observations
def storeTop100GenomeData(genomeList):
file = open("GenomeTop100TrainData.txt", "w")
r1, = data[0][:].shape
rx,cx = data.shape
for i in range(0, r1):
file.write(str(int(data[0][:][i])))
if (i < r1 - 1):
file.write(',')
file.write("\n")
for a in genomeList:
for b in range(0, cx):
file.write(str(data[a][:][b]))
if(b < cx - 1):
file.write(',')
file.write("\n")
file.close()
# from the observations that has top 100 f-score data, create and store the test data from those 100 observations
def generateTop100TestData(genomeList):
file = open("GenomeTop100TestData.txt", "w")
rx,cx = testData.shape
for a in genomeList:
for b in range(0, cx):
file.write(str(testData[a-1][:][b]))
if(b < cx - 1):
file.write(',')
file.write("\n")
file.close()
pickGenome()
# using the train and test samples created from the 100 observations that has top f-score, classify the data using various classification models
def data_classify(classifier):
if (classifier == "KNN"):
#storeData(Xtrain, ytrain, Xtest, ytest, classifier)
file1 = pd.read_csv('GenomeTop100TrainData.txt', header=-1)
Xtrain = file1.loc[1:,:].transpose().as_matrix()
ytrain = file1.loc[0,:].transpose().as_matrix()
file2 = pd.read_csv('GenomeTop100TestData.txt', header=-1)
Xtest = file2.transpose().as_matrix()
knneighbors = KNeighborsClassifier(n_neighbors=5)
knneighbors.fit(Xtrain, ytrain)
# calculating prediction
predictions = knneighbors.predict(Xtest)
# print(predictions)
#actual = ytest
#accuracy = metrics.accuracy_score(actual, predictions) * 100
# printing accuracy
#print("Accuracy with KNN = ", accuracy)
print('\n KNN Predictions: ', predictions)
#accuracy = calc_accuracy(testData, predictions)
#print('Accuracy with KNN = ' + repr(accuracy) + '%')
elif (classifier == "Centroid"):
file1 = pd.read_csv('GenomeTop100TrainData.txt', header=-1)
Xtrain = file1.loc[1:,:].transpose().as_matrix()
ytrain = file1.loc[0,:].transpose().as_matrix()
file2 = pd.read_csv('GenomeTop100TestData.txt', header=-1)
Xtest = file2.transpose().as_matrix()
centroid = NearestCentroid()
centroid.fit(Xtrain, ytrain)
# calculating prediction
predictions = centroid.predict(Xtest)
# printing accuracy
#accuracy = metrics.accuracy_score(ytest, predictions) * 100
#print("Accuracy with Centroid = ", accuracy)
print('\n Centroid predictions: ', predictions)
#accuracy = calc_accuracy(testData, predictions)
#print('Accuracy with Centroid = ' + repr(accuracy) + '%')
elif (classifier == "SVM"):
file1 = | pd.read_csv('GenomeTop100TrainData.txt', header=-1) | pandas.read_csv |
import os.path
import ast
import pickle
import pandas as pd
Run = False
if not os.path.isfile('sp.txt'):
Run = True
print('process file sp')
if __name__ == "__main__":
Run = True
print("process file sp")
# functions to extract certain information in the data
def get_genre(dataframe):
"""
This function is to get information about genres
Input: dataframe
Output: list
"""
genres = []
for row_number in range(len(dataframe)):
genre = []
for id_name in ast.literal_eval(dataframe.genres.to_list()[row_number]):
genre.append(id_name['name'])
genres.append(genre)
return genres
def get_director(dataframe):
"""
This function is to get information about directors
Input: dataframe
Output: list
"""
directors = []
for row_number in range(len(dataframe)):
director = []
for crew_info in ast.literal_eval(dataframe.crew.to_list()[row_number]):
if crew_info['job'] == 'Director':
director.append(crew_info['name'])
break
directors.append(director)
return directors
def get_cast(dataframe):
"""
This function is to get information about actors
Input: dataframe
Output: list
"""
casts = []
for row_number in range(len(dataframe)):
cast = []
for cast_info in ast.literal_eval(dataframe.cast.to_list()[row_number]):
cast.append(cast_info['name'])
casts.append(cast)
return casts
def get_year(dataframe):
"""
This function is to get information about years
Input: dataframe
Output: list
"""
years = []
for date in dataframe.release_date.to_list():
years.append(date.split('-')[0])
return years
def get_countries(dataframe):
"""
This function is to get information about countries
Full names of countries are adopted
Input: dataframe
Output: list
"""
countries = []
for row_number in range(len(dataframe)):
country = []
for country_info in ast.literal_eval(dataframe.production_countries.to_list()[row_number]):
country.append(country_info['name'])
countries.append(country)
return countries
def weighted_rating(dataframe, mean_value, quantile_value):
"""
This function is to calculate weighted ratings
Input: A dataframe you want to calculate weighted ratings for, values of mean and quantile
Output: A list of weighted ratings
"""
count_vote = dataframe['vote_count']
average_vote = dataframe['vote_average']
return (count_vote / (count_vote + mean_value) * average_vote) + (mean_value / (mean_value + count_vote) * quantile_value)
class SimpleRecommendation:
'''
This is simple recommedation algorithm.
'''
def __init__(self):
meta_data = pd.read_csv('movies-dataset/movies_metadata.csv')
credits = pd.read_csv('movies-dataset/credits.csv')
# drop some inappropriate records
meta_data = meta_data[meta_data['release_date'].notnull()]
meta_data = meta_data.drop([19730, 29503, 35587])
# merge data
credits['id'] = credits['id'].astype('int')
meta_data['id'] = meta_data['id'].astype('int')
meta_data = meta_data.merge(credits, on='id')
self.meta_data = meta_data
self.meta_data['genres'] = get_genre(self.meta_data)
print(self.meta_data['genres'])
self.meta_data['director'] = get_director(self.meta_data)
print(self.meta_data['director'])
self.meta_data['cast'] = get_cast(self.meta_data)
print(self.meta_data['cast'])
self.meta_data['year'] = get_year(self.meta_data)
print(self.meta_data['year'])
self.meta_data['countries'] = get_countries(self.meta_data)
print(self.meta_data['countries'])
col_genre = self.meta_data.apply(lambda x: pd.Series(x['genres']), axis=1).stack().reset_index(level=1, drop=True)
col_genre.name = 'genre'
col_director = self.meta_data.apply(lambda x: pd.Series(x['director']), axis=1).stack().reset_index(level=1, drop=True)
col_director.name = 'director'
col_actor = self.meta_data.apply(lambda x: pd.Series(x['cast']), axis=1).stack().reset_index(level=1, drop=True)
col_actor.name = 'actor'
col_country = self.meta_data.apply(lambda x: | pd.Series(x['countries']) | pandas.Series |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from abc import ABCMeta, abstractmethod
from concurrent.futures import Future
from copy import copy
import datetime as dt
from typing import Iterable, List, Optional, Tuple, Union
import dateutil
import pandas as pd
from gs_quant.base import InstrumentBase, PricingKey
from gs_quant.common import AssetClass
from gs_quant.datetime import point_sort_order
from gs_quant.target.risk import RiskMeasure, RiskMeasureType, RiskMeasureUnit, \
PricingDateAndMarketDataAsOf as __PricingDateAndMarketDataAsOf
__column_sort_fns = {
'label1': point_sort_order,
'mkt_point': point_sort_order,
'point': point_sort_order
}
__risk_columns = ('date', 'time', 'marketDataType', 'assetId', 'pointClass', 'point')
__crif_columns = ('date', 'time', 'riskType', 'amountCurrency', 'qualifier', 'bucket', 'label1', 'label2')
class RiskResult:
def __init__(self, result, risk_measures: Iterable[RiskMeasure]):
self.__risk_measures = tuple(risk_measures)
self.__result = result
@property
def done(self) -> bool:
return self.__result.done()
@property
def risk_measures(self) -> Tuple[RiskMeasure]:
return self.__risk_measures
@property
def _result(self):
return self.__result
class ResultInfo(metaclass=ABCMeta):
def __init__(
self,
pricing_key: PricingKey,
unit: Optional[str] = None,
error: Optional[Union[str, dict]] = None,
calculation_time: Optional[int] = None,
queueing_time: Optional[int] = None
):
self.__pricing_key = pricing_key
self.__unit = unit
self.__error = error
self.__calculation_time = calculation_time
self.__queueing_time = queueing_time
@property
@abstractmethod
def raw_value(self):
...
@property
def pricing_key(self) -> PricingKey:
return self.__pricing_key
@property
def unit(self) -> str:
"""The units of this result"""
return self.__unit
@property
def error(self) -> Union[str, dict]:
"""Any error associated with this result"""
return self.__error
@property
def calculation_time(self) -> int:
"""The time (in milliseconds) taken to compute this result"""
return self.__calculation_time
@property
def queueing_time(self) -> int:
"""The time (in milliseconds) for which this computation was queued"""
return self.__queueing_time
@abstractmethod
def for_pricing_key(self, pricing_key: PricingKey):
...
class ErrorValue(ResultInfo):
def __init__(self, pricing_key: PricingKey, error: Union[str, dict]):
super().__init__(pricing_key, error=error)
def __repr__(self):
return self.error
@property
def raw_value(self):
return None
def for_pricing_key(self, pricing_key: PricingKey):
return self if pricing_key == self.pricing_key else None
class FloatWithInfo(float, ResultInfo):
def __new__(cls,
pricing_key: PricingKey,
value: Union[float, str],
unit: Optional[str] = None,
error: Optional[str] = None,
calculation_time: Optional[float] = None,
queueing_time: Optional[float] = None):
return float.__new__(cls, value)
def __init__(
self,
pricing_key: PricingKey,
value: Union[float, str],
unit: Optional[str] = None,
error: Optional[Union[str, dict]] = None,
calculation_time: Optional[float] = None,
queueing_time: Optional[float] = None):
float.__init__(value)
ResultInfo.__init__(
self,
pricing_key,
unit=unit,
error=error,
calculation_time=calculation_time,
queueing_time=queueing_time)
def __repr__(self):
return self.error if self.error else float.__repr__(self)
@property
def raw_value(self) -> float:
return float(self)
def for_pricing_key(self, pricing_key: PricingKey):
return self if pricing_key == self.pricing_key else None
@staticmethod
def compose(components, pricing_key: Optional[PricingKey] = None):
unit = None
error = {}
as_of = ()
dates = []
values = []
generated_pricing_key = None
for component in components:
generated_pricing_key = component.pricing_key
unit = unit or component.unit
as_of += component.pricing_key.pricing_market_data_as_of
date = component.pricing_key.pricing_market_data_as_of[0].pricing_date
dates.append(date)
values.append(component.raw_value)
if component.error:
error[date] = component.error
return SeriesWithInfo(
pricing_key or generated_pricing_key.clone(pricing_market_data_as_of=as_of),
pd.Series(index=dates, data=values).sort_index(),
unit=unit,
error=error)
class SeriesWithInfo(pd.Series, ResultInfo):
def __init__(
self,
pricing_key: PricingKey,
*args,
unit: Optional[str] = None,
error: Optional[Union[str, dict]] = None,
calculation_time: Optional[int] = None,
queueing_time: Optional[int] = None,
**kwargs
):
pd.Series.__init__(self, *args, **kwargs)
ResultInfo.__init__(
self,
pricing_key,
unit=unit,
error=error,
calculation_time=calculation_time,
queueing_time=queueing_time)
self.index.name = 'date'
def __repr__(self):
return self.error if self.error else pd.Series.__repr__(self)
@property
def raw_value(self) -> pd.Series:
return pd.Series(self)
def for_pricing_key(self, pricing_key: PricingKey):
dates = [as_of.pricing_date for as_of in pricing_key.pricing_market_data_as_of]
scalar = len(dates) == 1
error = self.error or {}
error = error.get(dates[0]) if scalar else {d: error[d] for d in dates if d in error}
if scalar:
return FloatWithInfo(pricing_key, self.loc[dates[0]], unit=self.unit, error=error)
return SeriesWithInfo(pricing_key, pd.Series(index=dates, data=self.loc[dates]), unit=self.unit, error=error)
class DataFrameWithInfo(pd.DataFrame, ResultInfo):
def __init__(
self,
pricing_key: PricingKey,
*args,
unit: Optional[str] = None,
error: Optional[Union[str, dict]] = None,
calculation_time: Optional[float] = None,
queueing_time: Optional[float] = None,
**kwargs
):
pd.DataFrame.__init__(self, *args, **kwargs)
properties = [i for i in dir(ResultInfo) if isinstance(getattr(ResultInfo, i), property)]
internal_names = properties + ['_ResultInfo__' + i for i in properties if i != 'raw_value']
self._internal_names.append(internal_names)
self._internal_names_set.update(internal_names)
ResultInfo.__init__(
self,
pricing_key,
unit=unit,
error=error,
calculation_time=calculation_time,
queueing_time=queueing_time)
def __repr__(self):
return self.error if self.error else pd.DataFrame.__repr__(self)
@property
def raw_value(self) -> pd.DataFrame:
return | pd.DataFrame(self) | pandas.DataFrame |
import pandas as pd
import repackage
import re
from camel_tools.utils.charsets import UNICODE_PUNCT_CHARSET
import logging
from funcy import log_durations
import argparse
from pathlib import Path
repackage.up()
from data.make_dataset import recompose, puncs
project_dir = Path(__file__).resolve().parents[2]
def load_traindf(path=f'{project_dir}/data/processed/train.tsv'):
return pd.read_csv(path,delimiter='\t')
def tokenize_skiphyph(sent,puncs=puncs):
chars = []
sent = str(sent)
for char in list(sent):
if char in puncs:
chars.append(' '+char+' ')
else:
chars.append(char)
sent = ''.join(chars)
sent = re.sub(r'\s+',r' ',sent)
return sent.strip()
@log_durations(logging.info)
def train_mle(traindf,size_ratio=1, return_dict=False): # levdis
'''trains on aligned sentences only (same len), #could also add a levenstein threshold alignment based on simple_translit to avoid first/last name switcharoos
returns dataframe, unless return_dict is set to true'''
traindf = traindf[:int(len(traindf)*size_ratio)]
parallel_tokens = []
skipped = [0]
def get_aligned_tokens(dfrow,parallel_tokens=parallel_tokens,skipped=skipped):
target = dfrow['rom']
source = dfrow['ar']
target = tokenize_skiphyph(target)
source = tokenize_skiphyph(source)
# create a df of the tokenizes sentence
target = pd.Series(target.split(),dtype=str)
source = pd.Series(source.split(),dtype=str)
if len(target)==len(source):
parallel_tokens.append(pd.DataFrame(data={'source':source,'target':target}))
else:
skipped[0] += 1
traindf.apply(get_aligned_tokens,axis=1)
parallel_tokens = | pd.concat(parallel_tokens) | pandas.concat |
"""Water network transfers maps
"""
import os
import sys
from collections import OrderedDict
import numpy as np
import geopandas as gpd
import pandas as pd
import cartopy.crs as ccrs
import cartopy.io.shapereader as shpreader
import matplotlib.pyplot as plt
import numpy as np
from shapely.geometry import LineString
from vtra.utils import *
def main(mode):
config = load_config()
if mode == 'road':
flow_file_path = os.path.join(config['paths']['output'], 'failure_results','minmax_combined_scenarios',
'single_edge_failures_transfers_national_road_10_percent_shift.csv')
elif mode == 'rail':
flow_file_path = os.path.join(config['paths']['output'], 'failure_results','minmax_combined_scenarios',
'single_edge_failures_transfers_national_rail_100_percent_shift.csv')
else:
raise ValueError("Mode must be road or rail")
region_file_path = os.path.join(config['paths']['data'], 'post_processed_networks',
'coastal_edges.shp')
region_file = gpd.read_file(region_file_path,encoding='utf-8')
flow_file = pd.read_csv(flow_file_path)
region_file = | pd.merge(region_file,flow_file,how='left', on=['edge_id']) | pandas.merge |
from datetime import datetime
import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
bdate_range,
notna,
)
@pytest.fixture
def series():
"""Make mocked series as fixture."""
arr = np.random.randn(100)
locs = np.arange(20, 40)
arr[locs] = np.NaN
series = Series(arr, index=bdate_range(datetime(2009, 1, 1), periods=100))
return series
@pytest.fixture
def frame():
"""Make mocked frame as fixture."""
return DataFrame(
np.random.randn(100, 10),
index=bdate_range(datetime(2009, 1, 1), periods=100),
columns=np.arange(10),
)
# create the data only once as we are not setting it
def _create_consistency_data():
def create_series():
return [
Series(dtype=object),
Series([np.nan]),
Series([np.nan, np.nan]),
Series([3.0]),
Series([np.nan, 3.0]),
Series([3.0, np.nan]),
Series([1.0, 3.0]),
Series([2.0, 2.0]),
Series([3.0, 1.0]),
Series(
[5.0, 5.0, 5.0, 5.0, np.nan, np.nan, np.nan, 5.0, 5.0, np.nan, np.nan]
),
Series(
[
np.nan,
5.0,
5.0,
5.0,
np.nan,
np.nan,
np.nan,
5.0,
5.0,
np.nan,
np.nan,
]
),
Series(
[
np.nan,
np.nan,
5.0,
5.0,
np.nan,
np.nan,
np.nan,
5.0,
5.0,
np.nan,
np.nan,
]
),
Series(
[
np.nan,
3.0,
np.nan,
3.0,
4.0,
5.0,
6.0,
np.nan,
np.nan,
7.0,
12.0,
13.0,
14.0,
15.0,
]
),
Series(
[
np.nan,
5.0,
np.nan,
2.0,
4.0,
0.0,
9.0,
np.nan,
np.nan,
3.0,
12.0,
13.0,
14.0,
15.0,
]
),
Series(
[
2.0,
3.0,
np.nan,
3.0,
4.0,
5.0,
6.0,
np.nan,
np.nan,
7.0,
12.0,
13.0,
14.0,
15.0,
]
),
Series(
[
2.0,
5.0,
np.nan,
2.0,
4.0,
0.0,
9.0,
np.nan,
np.nan,
3.0,
12.0,
13.0,
14.0,
15.0,
]
),
Series(range(10)),
Series(range(20, 0, -2)),
]
def create_dataframes():
return [
DataFrame(),
DataFrame(columns=["a"]),
DataFrame(columns=["a", "a"]),
DataFrame(columns=["a", "b"]),
DataFrame(np.arange(10).reshape((5, 2))),
DataFrame(np.arange(25).reshape((5, 5))),
DataFrame(np.arange(25).reshape((5, 5)), columns=["a", "b", 99, "d", "d"]),
] + [ | DataFrame(s) | pandas.DataFrame |
"""the_pile dataset"""
import io
import os
import pandas as pd
from ekorpkit import eKonf
from ekorpkit.io.download.web import web_download
from tqdm.auto import tqdm
try:
import simdjson as json
except ImportError:
print("Installing simdjson library")
os.system("pip install -q pysimdjson")
import json as json
parser = json.Parser()
def json_parser(x):
try:
line = parser.parse(x).as_dict()
return line
except ValueError:
return x
class PileReader:
def __init__(self, filenames, subset=None, segment_separator="\n\n"):
if not isinstance(filenames, list):
filenames = [filenames]
self.filenames = filenames
self.subset = subset
self.segment_separator = segment_separator
def _read_fn(self):
import zstandard
import jsonlines
for filename in self.filenames:
print(f"iterating over {filename}")
with open(filename, "rb") as f:
cctx = zstandard.ZstdDecompressor()
reader_stream = io.BufferedReader(cctx.stream_reader(f))
reader = jsonlines.Reader(reader_stream, loads=json_parser)
for i, item in enumerate(reader):
result = dict()
if isinstance(item, str):
result["text"] = item
result["subset"] = "the_pile"
else:
text = item["text"]
if isinstance(text, list):
text = self.segment_separator.join(text)
result["text"] = text
result["subset"] = item.get("meta", {}).get(
"pile_set_name", "the_pile"
)
if self.subset is None:
yield result
else:
if self.subset == result["subset"]:
yield result
def __iter__(self):
return self._read_fn()
class ThePile:
def __init__(self, **args):
self.cfg = eKonf.to_config(args)
self.name = self.cfg.name
self.subsets = self.cfg.get("subsets", [])
if self.name == "the_pile":
self.subset = None
else:
self.subset = self.name
if self.subset not in self.subsets:
raise ValueError(f"{self.subset} not in {self.subsets}")
self._parse_split_urls()
def _parse_split_urls(self):
self.splits = {}
for split, info in self.cfg.data_souces.items():
if info.get("splits", None):
urls = [
info["url"].format(str(i).zfill(info.zfill))
for i in range(info.splits)
]
else:
urls = [info["url"]]
paths = {}
for url in urls:
path = os.path.join(self.cfg.data_dir, url.split("/")[-1])
paths[path] = url
self.splits[split] = paths
def download(self):
for split, paths in self.splits.items():
for path, url in paths.items():
print(f"Downloading {split} from {url} to {path}")
web_download(url, path, self.name)
def load(self, split="train"):
paths = list(self.splits[split].keys())
return self._generate_examples(paths)
def _generate_examples(self, paths):
pipeline = PileReader(paths, self.subset)
for result in pipeline:
if result:
yield result
def load_pile_data(split_name, **args):
pile = ThePile(**args)
ds_iter = pile.load(split_name)
documents = []
for sample in tqdm(ds_iter):
documents.append(sample)
df = | pd.DataFrame(documents) | pandas.DataFrame |
from twembeddings.build_features_matrix import format_text, find_date_created_at, build_matrix
from twembeddings.embeddings import TfIdf
from twembeddings import ClusteringAlgoSparse
from twembeddings import general_statistics, cluster_event_match
from twembeddings.eval import cluster_acc
import logging
import sklearn.cluster
import pandas as pd
import os
import re
import igraph as ig
import louvain
import csv
from scipy import sparse
import numpy as np
from sklearn.preprocessing import normalize
from sklearn.preprocessing.data import _handle_zeros_in_scale
from datetime import datetime, timedelta
import argparse
from config import PATH, DATASET, THRESHOLDS, SIMILARITIES, DAYS, WEIGHTS, WINDOW_DAYS, \
QUALITY_FUNCTION, WRITE_CLUSTERS_TEXT, WRITE_CLUSTERS_SMALL_IDS
logging.basicConfig(filename='/usr/src/app/app.log', filemode='w', format='%(name)s - %(levelname)s - %(message)s')
def cosine_similarity(x, y):
s = normalize(x) * normalize(y).T
return s
def find_hashtag(text):
hashtags = []
for word in re.split(r"[.()' \n]", text):
if word.startswith("#"):
hashtags.append(word.strip('&,".—/'))
if hashtags:
return hashtags
def zero_one_scale(serie):
data_range = serie.max()
scale = 1 / _handle_zeros_in_scale(data_range)
serie *= scale
def compute_events(tweets_path, news_path, lang, binary, threshold_tweets, binary_news=False, threshold_news=0.7):
news = pd.read_csv(news_path, sep="\t", quoting=csv.QUOTE_ALL, dtype={"id": int, "label": float,
"created_at": str, "text": str, "url": str})
tweets = pd.read_csv(tweets_path, sep="\t", quoting=csv.QUOTE_ALL, dtype={"id": int, "label": float,
"created_at": str, "text": str, "url": str})
if tweets.id.min() <= news.id.max():
raise Exception("tweets.id.min() should be greater than news.id.max()")
if "pred" not in news.columns:
news_pred, news, _, X_tweets = fsd(news_path, lang, threshold=threshold_news, binary=binary_news,
window_days=WINDOW_DAYS)
news["pred"] = news_pred
news.id = news.id.astype(int)
else:
news["pred"] = news["pred"].fillna(news["id"]).astype(int)
if "pred" not in tweets.columns:
tweets_pred, tweets, _, X_news = fsd(tweets_path, lang, threshold=threshold_tweets, binary=binary,
window_days=WINDOW_DAYS)
tweets["pred"] = tweets_pred
tweets.id = tweets.id.astype(int)
# tweets.loc[(tweets.pred == -1) | (tweets.pred == -2), "pred"] = tweets["id"]
if tweets.pred.min() <= news.pred.max():
tweets["pred"] = tweets["pred"] + news.pred.max() + 3
logging.info("Total tweets: {} preds".format(tweets.pred.nunique()))
logging.info("Total news: {} preds".format(news.pred.nunique()))
news["type"] = "news"
tweets["type"] = "tweets"
return tweets, news
def louvain_macro_tfidf(tweets_path, news_path, lang, similarity, weights, binary=True,
threshold_tweets=0.7, model="ModularityVertexPartition", days=1):
"""
Apply Louvain algorithm on graph of events (an event consists in all documents in the same
fsd clusters).
:param str tweets_path: path to the tweets dataset in format "id" "label" "created_at" "text" "url"
"pred" is optional if fsd clustering is already done
:param str news_path: path to the news dataset in format "id" "label" "created_at" "text" "url"
"pred" is optional if fsd clustering is already done
:param str lang: "fr" or "en"
:param bool binary: if True, all non-zero term counts are set to 1 in tf calculation for tweets
:param bool binary_news: if True, all non-zero term counts are set to 1 in tf calculation for news
:return: y_pred, data, params
"""
tweets, news = compute_events(tweets_path, news_path, lang, binary, threshold_tweets)
data = pd.concat([tweets, news], ignore_index=True, sort=False)
# logging.info("save data")
local_path = tweets_path.split("data/")[0]
path = local_path + "data/" + (tweets_path + "_" + news_path).replace(local_path + "data/", "")
# data.to_csv(path, sep="\t", index=False, quoting=csv.QUOTE_ALL)
args = {"dataset": path, "model": "tfidf_all_tweets", "annotation": "no", "hashtag_split": True,
"lang": lang, "text+": False, "svd": False, "tfidf_weights": False, "save": False, "binary": False}
data["date"] = data["created_at"].apply(find_date_created_at)
logging.info("build matrix")
vectorizer = TfIdf(lang=args["lang"], binary=args["binary"])
vectorizer.load_history(args["lang"])
data.text = data.text.apply(format_text,
remove_mentions=True,
unidecode=True,
lower=True,
hashtag_split=args["hashtag_split"]
)
count_matrix = vectorizer.add_new_samples(data)
X = vectorizer.compute_vectors(count_matrix, min_df=10, svd=args["svd"], n_components=100)
if weights["hashtag"] != 0:
data["hashtag"] = data.hashtag.str.split("|")
if weights["url"] != 0:
data["url"] = data.url.str.split("|")
# logging.info("save data")
# data.to_csv(path, sep="\t", index=False, quoting=csv.QUOTE_ALL)
# data = pd.read_csv(path, sep="\t", quoting=csv.QUOTE_ALL, dtype={"date": str})
X = X[data.pred.argsort()]
# logging.info("save X")
# sparse.save_npz("/usr/src/app/data/X.npz", X)
data = data.sort_values("pred").reset_index(drop=True)
gb = data.groupby(["pred", "type"])
if weights["url"] != 0 or weights["hashtag"] != 0:
macro = gb.agg({
'date': ['min', 'max', 'size'],
'hashtag': lambda tdf: tdf.explode().tolist(),
'url': lambda tdf: tdf.explode().tolist()
})
macro.columns = ["mindate", "maxdate", "size", "hashtag", "url"]
else:
macro = gb["date"].agg(mindate=np.min, maxdate=np.max, size=np.size)
macro = macro.reset_index().sort_values("pred")
macro_tweets = macro[macro.type == "tweets"]
macro_news = macro[macro.type == "news"]
m = sparse.csr_matrix((
[1 for r in range(X.shape[0])],
([i for i, row in macro.iterrows() for r in range(row["size"])],
range(X.shape[0]))
))
logging.info("tfidf_sum")
tfidf_sum = m * X
logging.info("tfidf_mean")
for i, val in enumerate(macro["size"].tolist()):
tfidf_sum.data[tfidf_sum.indptr[i]:tfidf_sum.indptr[i + 1]] /= val
mean_tweets = tfidf_sum[np.array(macro.type == "tweets")]
mean_news = tfidf_sum[np.array(macro.type == "news")]
logging.info("load mean matrices")
# sparse.save_npz("/usr/src/app/data/mean_tweets.npz", mean_tweets)
# sparse.save_npz("/usr/src/app/data/mean_news.npz", mean_news)
# mean_news = sparse.load_npz("/usr/src/app/data/mean_news.npz")
# mean_tweets = sparse.load_npz("/usr/src/app/data/mean_tweets.npz")
edges_text = []
edges_hashtags = []
edges_urls = []
logging.info("cosine similarity")
min_max = macro_tweets[["mindate", "maxdate"]].drop_duplicates().reset_index(drop=True)
total = min_max.shape[0]
for iter, row in min_max.iterrows():
if iter % 10 == 0:
logging.info(iter/total)
batch_min = (datetime.strptime(row["mindate"], "%Y%m%d") - timedelta(days=days)).strftime("%Y%m%d")
batch_max = (datetime.strptime(row["maxdate"], "%Y%m%d") + timedelta(days=days)).strftime("%Y%m%d")
bool_tweets = (macro_tweets.mindate == row["mindate"]) & (macro_tweets.maxdate == row["maxdate"])
bool_news = ((batch_min <= macro_news.maxdate) & (macro_news.maxdate <= batch_max)) | (
(batch_min <= macro_news.mindate) & (macro_news.mindate <= batch_max)) | (
(row["mindate"] >= macro_news.mindate) & (row["maxdate"] <= macro_news.maxdate))
batch_tweets = macro_tweets[bool_tweets]
batch_news = macro_news[bool_news]
if weights["text"] != 0:
sim = cosine_similarity(mean_tweets[np.array(bool_tweets)], mean_news[np.array(bool_news)])
close_events = sparse.coo_matrix(sim >= similarity)
batch = [
(batch_tweets.iloc[i]["pred"],
batch_news.iloc[j]["pred"],
sim[i, j]
) for i, j in zip(close_events.row, close_events.col)
]
edges_text.extend(batch)
if weights["hashtag"] != 0:
hashtags_tweets = batch_tweets.explode("hashtag")
hashtags_news = batch_news.explode("hashtag")
# hashtags_tweets = hashtags_tweets.drop_duplicates(["pred", "hashtag"])
# hashtags_news = hashtags_news.drop_duplicates(["pred", "hashtag"])
hashtags_tweets = hashtags_tweets.groupby(["pred", "hashtag"]).size().reset_index(name="weight")
hashtags_news = hashtags_news.groupby(["pred", "hashtag"]).size().reset_index(name="weight")
batch = hashtags_tweets.merge(hashtags_news, on="hashtag", how='inner', suffixes=("_tweets", "_news"))
# batch["weight"] = batch["weight_tweets"] + batch["weight_news"]
batch = batch.groupby(["pred_tweets", "pred_news"])["weight_tweets"].agg(['sum', 'size']).reset_index()
# batch = batch.groupby(["pred_tweets", "pred_news"]).size().reset_index(name="weight")
batch = batch[batch["size"] > 3]
edges_hashtags.extend(batch[["pred_tweets", "pred_news", "sum"]].values.tolist())
if weights["url"] != 0:
urls_tweets = batch_tweets.explode("url")
urls_news = batch_news.explode("url")
urls_tweets = urls_tweets.groupby(["pred", "url"]).size().reset_index(name="weight")
urls_news = urls_news.groupby(["pred", "url"]).size().reset_index(name="weight")
batch = urls_tweets.merge(urls_news, on="url", how='inner', suffixes=("_tweets", "_news"))
# batch["weight"] = batch["weight_tweets"] + batch["weight_news"]
batch = batch.groupby(["pred_tweets", "pred_news"])["weight_tweets"].agg(['sum', 'size']).reset_index()
# batch = batch.groupby(["pred_tweets", "pred_news"]).size().reset_index(name="weight")
# batch = batch[batch["size"] > 1]
edges_urls.extend(batch[["pred_tweets", "pred_news", "sum"]].values.tolist())
edges_hashtags = pd.DataFrame(edges_hashtags, columns=["pred_tweets", "pred_news", "weight"])
zero_one_scale(edges_hashtags["weight"])
edges_hashtags["weight"] *= weights["hashtag"]
edges_urls = pd.DataFrame(edges_urls, columns=["pred_tweets", "pred_news", "weight"])
zero_one_scale(edges_urls["weight"])
edges_urls["weight"] *= weights["url"]
edges_text = pd.DataFrame(edges_text, columns=["pred_tweets", "pred_news", "weight"])
edges_text["weight"] *= weights["text"]
edges = pd.concat([edges_text, edges_hashtags, edges_urls]).groupby(
["pred_tweets", "pred_news"])["weight"].sum().sort_values()
g = ig.Graph.TupleList([(i[0], i[1], row) for i, row in edges.iteritems()],
weights=True)
logging.info("build partition")
partition = louvain.find_partition(g, getattr(louvain, model), weights="weight")
max_pred = int(data.pred.max()) + 1
clusters = {}
preds = []
logging.info("preds")
for cluster in range(len(partition)):
for doc in g.vs.select(partition[cluster])["name"]:
clusters[doc] = cluster + max_pred
for i, line in data.iterrows():
if line["pred"] in clusters:
preds.append(clusters[line["pred"]])
else:
preds.append(line["pred"])
# for cluster in range(len(partition)):
# data.loc[data.pred.isin(g.vs.select(partition[cluster])["name"]), "pred"] = cluster + max_pred
params = {"t": threshold_tweets,
"dataset": tweets_path + " " + news_path, "algo": "louvain_macro_tfidf", "lang": lang,
"similarity": similarity, "weights_text": weights["text"], "weights_hashtag": weights["hashtag"],
"weights_url": weights["url"], "binary": binary, "model": model, "days": days, "window_days": WINDOW_DAYS,
"ts": datetime.now().strftime("%d-%m-%Y %H:%M:%S")}
# logging.info("nb pred: {}".format(data.pred.nunique()))
# logging.info("save to /usr/src/app/data/3_months_joint_events.csv")
# data[["id", "pred"]].to_csv("/usr/src/app/data/3_months_joint_events.csv", quoting=csv.QUOTE_MINIMAL, index=False)
data["pred"] = preds
return preds, data, params
def fsd(corpus, lang, threshold, binary, window_days=1):
args = {"dataset": corpus, "model": "tfidf_all_tweets", "annotation": "annotated", "hashtag_split": True,
"lang": lang, "text+": False, "svd": False, "tfidf_weights": False, "save":False, "binary": binary}
X, data = build_matrix(**args)
batch_size = 8
window = int(data.groupby("date").size().mean() // batch_size * batch_size)*window_days
clustering = ClusteringAlgoSparse(threshold=float(threshold), window_size=window,
batch_size=batch_size, intel_mkl=False)
clustering.add_vectors(X)
y_pred = clustering.incremental_clustering()
params = {"t": threshold, "dataset": corpus, "algo": "FSD", "distance": "cosine", "lang": lang,
"binary": binary, "model": model}
return y_pred, data, params, X
def DBSCAN(corpus, lang, min_samples, eps, binary):
args = {"dataset": corpus, "model": "tfidf_all_tweets", "annotation": "annotated", "hashtag_split": True,
"lang": lang, "text+": False, "svd": False, "tfidf_weights": False, "save": True, "binary": binary}
X, data = build_matrix(**args)
logging.info("starting DBSCAN...")
clustering = sklearn.cluster.DBSCAN(eps=eps, metric="cosine", min_samples=min_samples).fit(X)
y_pred = clustering.labels_
params = {"dataset": corpus, "algo": "DBSCAN", "distance": "cosine", "eps": eps, "lang": lang,
"min_samples": min_samples, "binary": binary}
return y_pred, data, params
def percent_linked(data):
"""
return the share of tweets that get linked to news and the share of news that get linked to tweets
:param data:
:return:
"""
data.pred = data.pred.astype(int)
data.id = data.id.astype(int)
tweets = data[data.type=="tweets"]
news = data[data.type=="news"]
pred_tweets = set(tweets.pred.unique())
pred_news = set(news.pred.unique())
common = pred_tweets.intersection(pred_news)
n_linked_tweets = tweets[tweets.pred.isin(common)].shape[0]
n_linked_news = news[news.pred.isin(common)].shape[0]
return n_linked_tweets/tweets.shape[0], n_linked_news/news.shape[0]
def write_ids(path, dataset, table, days, full, small):
if full:
outpath = os.path.join(path, "data", "{}_joint_events_{}_days.csv".format(dataset, days))
logging.info("write preds to {}".format(outpath))
table[["id", "pred", "type", "text"]].to_csv(outpath, index=False, quoting=csv.QUOTE_MINIMAL)
if small:
logging.info("load id mapping")
with open(os.path.join(path, "data", "id_str_mapping.csv"), "r") as f:
reader = csv.reader(f, quoting=csv.QUOTE_NONE)
id_dict = {int(row[0]): int(row[1]) for row in reader}
logging.info("convert to small ids")
mask = (table["type"] == "tweets")
table['doc_id_small'] = table["id"]
table.loc[mask, "doc_id_small"] = table[mask]["id"].apply(lambda x: id_dict[int(x)])
table["is_tweet"] = mask
outpath = os.path.join(path, "data", "{}_joint_events_{}_days_small_ids.csv".format(dataset, days))
logging.info("write preds to {}".format(outpath))
table[["id", "doc_id_small", "pred", "is_tweet"]].to_csv(outpath, index=False, quoting=csv.QUOTE_MINIMAL)
def evaluate(y_pred, data, params, path, note):
stats = general_statistics(y_pred)
p, r, f1 = cluster_event_match(data, y_pred)
params.update({"p": p, "r": r, "f1": f1})
params["note"] = note
if "news" in data.type.unique():
linked_tweets, linked_news = percent_linked(data)
params.update({"linked_tweets": linked_tweets, "linked_news": linked_news})
else:
params.pop("linked_tweets")
params.pop("linked_news")
stats.update(params)
stats = pd.DataFrame(stats, index=[0])
logging.info("\n"+ str(stats[["f1", "p", "r", "t"]]))
try:
results = pd.read_csv(path)
except FileNotFoundError:
results = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 22 11:05:21 2018
@author: 028375
"""
from __future__ import unicode_literals, division
import pandas as pd
import os.path
import numpy as np
def Check2(lastmonth,thismonth,collateral):
ContractID=(thismonth['ContractID'].append(lastmonth['ContractID'])).append(collateral['ContractID']).drop_duplicates()
Outputs=pd.DataFrame(ContractID).reset_index(drop=True)
cost0=lastmonth[['ContractID','期权标的','标的类型','Upfront结算货币']]
Outputs=pd.merge(Outputs,cost0,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Upfront结算货币':'期初表Upfront','期权标的':'期初表期权标的','标的类型':'期初表标的类型'})
cost1=thismonth[['ContractID','期权标的','标的类型','Upfront结算货币']]
Outputs=pd.merge(Outputs,cost1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Upfront结算货币':'期末表Upfront','期权标的':'期末表期权标的','标的类型':'期末表标的类型'})
tmp1=collateral.groupby(['ContractID'])[['期权标的','标的类型']].first().reset_index()
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'期权标的':'资金表期权标的','标的类型':'资金表标的类型'})
collateral1=collateral.groupby(['ContractID','现金流类型'])['确认金额(结算货币)'].sum().reset_index()
collateral1=collateral1.rename(columns={'现金流类型':'CashType','确认金额(结算货币)':'Amount'})
tmp1=collateral1[collateral1['CashType']=='前端支付'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'前端支付'})
tmp1=collateral1[collateral1['CashType']=='前端期权费'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'前端期权费'})
tmp1=collateral1[collateral1['CashType']=='展期期权费'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'展期期权费'})
tmp1=collateral1[collateral1['CashType']=='到期结算'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'到期结算'})
tmp1=collateral1[collateral1['CashType']=='部分赎回'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'部分赎回'})
tmp1=collateral1[collateral1['CashType']=='全部赎回'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'全部赎回'})
tmp1=collateral1[collateral1['CashType']=='期间结算'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'期间结算'})
tmp1=collateral1[collateral1['CashType']=='红利支付'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'红利支付'})
tmp1=collateral1[collateral1['CashType']=='其他'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'其他'})
tmp1=collateral1[collateral1['CashType']=='定结期间结算'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'定结期间结算'})
Outputs['status1']=''
flag1=np.isnan(Outputs['期初表Upfront'])
flag2=np.isnan(Outputs['期末表Upfront'])
Outputs.loc[flag1&flag2,['status1']]='新起到期'
Outputs.loc[(~flag1)&flag2,['status1']]='存续到期'
Outputs.loc[flag1&(~flag2),['status1']]='新起存续'
Outputs.loc[(~flag1)&(~flag2),['status1']]='两期存续'
Outputs['status2']=''
flag1=(Outputs['status1']=='新起到期')
flag2=(Outputs['status1']=='存续到期')
flag3=(Outputs['status1']=='新起存续')
flag4=(Outputs['status1']=='两期存续')
colflag1=np.isnan(Outputs['前端支付'])
colflag2=np.isnan(Outputs['前端期权费'])
colflag3=np.isnan(Outputs['展期期权费'])
colflag4=np.isnan(Outputs['到期结算'])
colflag5=np.isnan(Outputs['全部赎回'])
colflag6=np.isnan(Outputs['部分赎回'])
colflag7=np.isnan(Outputs['定结期间结算']) #update 0.2.3
tmp1=Outputs[['ContractID','期初表Upfront','期末表Upfront','前端支付','前端期权费','展期期权费','到期结算','部分赎回','全部赎回','定结期间结算']]
tmp1=tmp1.replace(np.nan,0.)
flag5=(tmp1['期末表Upfront']!=0)
flag6=(tmp1['期末表Upfront']-tmp1['期初表Upfront']).round(decimals=4)==0
flag7=(tmp1['期末表Upfront']-tmp1['前端支付']).round(decimals=4)==0
flag8=(tmp1['期末表Upfront']-(tmp1['前端期权费']+tmp1['展期期权费']+tmp1['部分赎回'])).round(decimals=4)==0
#flag9=(tmp1['期末表Upfront']-(tmp1['期初表Upfront']+tmp1['展期期权费']+tmp1['部分赎回'])).round(decimals=4)==0 #update 0.2.3
flag9=(tmp1['期末表Upfront']-(tmp1['期初表Upfront']+tmp1['展期期权费']+tmp1['部分赎回']+tmp1['定结期间结算'])).round(decimals=4)==0 # update 0.2.3 增加定结期间结算
#新起到期
Outputs.loc[flag1,['status2']]='流水异常'
# Outputs.loc[flag1&((~colflag1)|(~colflag2))&((~colflag4)|(~colflag5)),['status2']]='流水正常' #update 0.2.3
Outputs.loc[flag1&((~colflag4)|(~colflag5)),['status2']]='流水正常' #update 0.2.3
#存续到期
Outputs.loc[flag2,['status2']]='流水异常'
Outputs.loc[flag2&((~colflag4)|(~colflag5)),['status2']]='流水正常'
#新起存续
Outputs.loc[flag3,['status2']]='流水异常'
Outputs.loc[flag3&flag5&((~colflag1)|(~colflag2))&colflag4&colflag5,['status2']]='流水正常'
tmp_flag=((~colflag1)&tmp1['前端支付']!=0)|((~colflag2)&tmp1['前端期权费']!=0) #前端支付/前端期权费存在,且不等于0
Outputs.loc[flag3&(~flag5)&(colflag4&colflag5)&(~tmp_flag),['status2']]='流水正常'
#两期存续
Outputs.loc[flag4,['status2']]='流水异常'
Outputs.loc[flag4&flag6&(colflag3&colflag6&colflag4&colflag5),['status2']]='流水正常'
# Outputs.loc[flag4&(~flag6)&((~colflag3)|(~colflag6)&colflag4&colflag5),['status2']]='流水正常' #update 0.2.3
Outputs.loc[flag4&(~flag6)&((~colflag3)|(~colflag6)|(~colflag7)&colflag4&colflag5),['status2']]='流水正常' #增加定结期间结算 #update 0.2.3
Outputs['status3']=''
flag10=(Outputs['status2']=='流水异常')
Outputs.loc[flag10,['status3']]='流水异常,未验证金额'
Outputs.loc[(~flag10)&flag1,['status3']]='无需验证金额'
Outputs.loc[(~flag10)&flag2,['status3']]='无需验证金额'
Outputs.loc[(~flag10)&flag3,['status3']]='金额异常'
Outputs.loc[(~flag10)&flag3&(flag7|flag8|(~flag5)),['status3']]='金额正常'
Outputs.loc[(~flag10)&flag4,['status3']]='金额异常'
Outputs.loc[(~flag10)&flag4&(flag6|flag9),['status3']]='金额正常'
return Outputs
def Check1(lastmonth,thismonth,collateral):
thismonth['Upfront结算货币']=pd.to_nu | meric(thismonth['Upfront结算货币'],errors='coerce') | pandas.to_numeric |
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
import plotly.express as px
# train-test split by a percentage.
# input: dataframe, label column name, split ration, and random state
# returns: x_train, x_test, y_train, y_test
def split_df(user_df: pd.DataFrame, label_name: str, split_ratio=0.8, random_value=42):
x_train = user_df.sample(frac=split_ratio, random_state=random_value)
x_test = user_df.drop(x_train.index)
return x_train.drop(label_name, axis=1), x_test.drop(label_name, axis=1), pd.DataFrame(
x_train[label_name]), pd.DataFrame(x_test[label_name])
# import data and preprocess it
def preprocessing(file_name: str):
# data import
heart_df = pd.read_csv(file_name)
# converting target to 1 and -1
new_label = []
for x in heart_df['target']:
if x == 1:
new_label.append(1)
else:
new_label.append(-1)
heart_df['target'] = new_label
# heart_df = heart_df.rename(columns={'target': 'label'})
# hot encoding of relevant features
dummy_features_list = ['sex', 'cp', 'fbs', 'restecg', 'exang', 'slope', 'ca', 'thal']
non_dummy_features_list = ['age', 'trestbps', 'chol', 'thalach', 'oldpeak', 'target']
new_heart_df = pd.DataFrame(heart_df[non_dummy_features_list])
for feature in dummy_features_list:
new_heart_df = new_heart_df.join(pd.get_dummies(heart_df[feature], prefix=feature))
return heart_df
# Create as arrays of stump tree in a given size
def create_stump_forest(forest_size: int, random_state_local: int):
stump_forest = []
for i in range(0, forest_size, 1):
stump_forest.append(DecisionTreeClassifier(criterion='gini', max_depth=1, random_state=random_state_local))
return stump_forest
# update weight of each row and randomly generate a new weighted data frame
# input: x/y data, predictions list, current stump weight
# return: new weighted x and y data frames
def create_new_weighted_data(x: pd.DataFrame, y: pd.DataFrame, predictions: np.ndarray, stump_weight: list):
# initiate weights
sample_weight = 1/len(x)
new_weights = []
# calculate new weights based on correct and incorrect decisions
for i in range(0, len(predictions), 1):
if predictions[i] == 1:
new_weights.append(sample_weight*np.exp(-np.sum(stump_weight)))
else:
new_weights.append(sample_weight*np.exp(np.sum(stump_weight)))
# normalize weights
sum_of_new_weights = sum(new_weights)
new_normalized_weights = new_weights/sum_of_new_weights
# create normalized distributions weights for random rows pulling
distribution_weights = []
accumulator = 0
for new_normalized_weight in new_normalized_weights:
accumulator += new_normalized_weight
distribution_weights.append(accumulator)
# based to rows weights values, randomly pick new data
new_x = pd.DataFrame(columns=x.columns)
new_y = | pd.DataFrame(columns=y.columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core import ops
from pandas.errors import NullFrequencyError
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = pd.timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = | tm.box_expected(tdser, box) | pandas.util.testing.box_expected |
# Import libraries
import os
import sys
import anemoi as an
import pandas as pd
import numpy as np
import pyodbc
from datetime import datetime
import requests
import collections
import json
import urllib3
def return_between_date_query_string(start_date, end_date):
if start_date != None and end_date != None:
start_end_str = '''AND [TimeStampLocal] >= '%s' AND [TimeStampLocal] < '%s' ''' %(start_date, end_date)
elif start_date != None and end_date == None:
start_end_str = '''AND [TimeStampLocal] >= '%s' ''' %(start_date)
elif start_date == None and end_date != None:
start_end_str = '''AND [TimeStampLocal] < '%s' ''' %(end_date)
else:
start_end_str = ''
return start_end_str
def sql_or_string_from_mvs_ids(mvs_ids):
or_string = ' OR '.join(['mvs_id = {}'.format(mvs_id) for mvs_id in mvs_ids])
return or_string
def sql_list_from_mvs_ids(mvs_ids):
if not isinstance(mvs_ids, list):
mvs_ids = [mvs_ids]
mvs_ids_list = ','.join([f"({mvs_id}_1)" for mvs_id in mvs_ids])
return mvs_ids_list
def rename_mvs_id_column(col, names, types):
name = names[int(col.split('_')[0])]
data_type = types[col.split('_')[1]]
return f'{name}_{data_type}'
# Define DataBase class
class M2D2(object):
'''Class to connect to RAG M2D2 PRD database
'''
def __init__(self):
'''Data structure for connecting to and downloading data from M2D2. Convention is::
import anemoi as an
m2d2 = an.io.database.M2D2()
:Parameters:
:Returns:
out: an.M2D2 object connected to M2D2
'''
self.database = 'M2D2'
server = '10.1.15.53' # PRD
#server = 'SDHQRAGDBDEV01\RAGSQLDBSTG' #STG
db = 'M2D2_DB_BE'
conn_str = 'DRIVER={SQL Server}; SERVER=%s; DATABASE=%s; Trusted_Connection=yes' %(server, db)
self.conn_str = conn_str #Assign connection string
try:
self.conn = pyodbc.connect(self.conn_str) #Apply connection string to connect to database
except:
print('Database connection error: you either don\'t have permission to the database or aren\'t signed onto the VPN')
def connection_check(self, database):
return self.database == database
def masts(self):
'''
:Returns:
out: DataFrame of all met masts with measured data in M2D2
Example::
import anemoi as an
m2d2 = an.io.database.M2D2()
m2d2.masts()
'''
if not self.connection_check('M2D2'):
raise ValueError('Need to connect to M2D2 to retrieve met masts. Use anemoi.DataBase(database="M2D2")')
sql_query_masts = '''
SELECT [Project]
,[AssetID]
,[wmm_id]
,[mvs_id]
,[Name]
,[Type]
,[StartDate]
,[StopDate]
FROM [M2D2_DB_BE].[dbo].[ViewProjectAssetSensors] WITH (NOLOCK)
'''
sql_query_coordinates='''
SELECT [wmm_id]
,[WMM_Latitude]
,[WMM_Longitude]
,[WMM_Elevation]
FROM [M2D2_DB_BE].[dbo].[ViewWindDataSet]'''
masts = pd.read_sql(sql_query_masts, self.conn, parse_dates=['StartDate', 'StopDate'])
coordinates = pd.read_sql(sql_query_coordinates, self.conn)
masts = masts.merge(coordinates, left_on='wmm_id', right_on='wmm_id')
masts.set_index(['Project', 'wmm_id', 'WMM_Latitude', 'WMM_Longitude', 'Type'], inplace=True)
masts.sort_index(inplace=True)
return masts
def mvs_ids(self):
masts = self.masts()
mvs_ids = masts.mvs_id.values.tolist()
return mvs_ids
def valid_signal_labels(self):
signal_type_query = '''
SELECT [MDVT_ID]
,[MDVT_Name]
FROM [M2D2_DB_BE].[dbo].[MDataValueType]'''
signal_types = pd.read_sql(signal_type_query, self.conn, index_col='MDVT_Name').MDVT_ID
return signal_types
def column_labels_for_masts(self):
masts = self.masts()
mvs_ids = masts.mvs_id.unique().tolist()
or_string = ' OR '.join(['mvs_id = {}'.format(mvs_id) for mvs_id in mvs_ids])
column_label_sql_query = '''
SELECT [column_id]
,[label]
FROM [M2D2_DB_BE].[dbo].[ViewWindogMetaData]
WITH (NOLOCK)
WHERE {}'''.format(or_string)
column_labels = pd.read_sql(column_label_sql_query, self.conn)
column_labels = column_labels.set_index('column_id')
return column_labels
def column_labels_for_data_from_mvs_ids(self, data):
masts = self.masts()
names_map = pd.Series(index=masts.mvs_id.values, data=masts.Name.values).to_dict()
types = self.valid_signal_labels()
types.loc['FLAG'] = 'Flag'
types_map = pd.Series(index=types.values.astype(str), data=types.index.values).to_dict()
data = data.rename(lambda x: rename_mvs_id_column(x, names=names_map, types=types_map), axis=1)
return data
def column_labels_for_wmm_id(self, wmm_id):
masts = self.masts()
mvs_ids = masts.loc[pd.IndexSlice[:,wmm_id],:].mvs_id.unique().tolist()
or_string = ' OR '.join(['mvs_id = {}'.format(mvs_id) for mvs_id in mvs_ids])
column_label_sql_query = '''
SELECT [column_id]
,[label]
FROM [M2D2_DB_BE].[dbo].[ViewWindogMetaData]
WITH (NOLOCK)
WHERE {}'''.format(or_string)
column_labels = pd.read_sql(column_label_sql_query, self.conn)
column_labels = column_labels.set_index('column_id')
return column_labels
def data_from_sensors_mvs_ids(self, mvs_ids, signal_type='AVG'):
'''Download sensor data from M2D2
:Parameters:
mvs_ids: int or list
Virtual sensor IDs (mvs_ids) in M2D2, can be singular
signal_type: str, default 'AVG' - NOT SUPPORTED AT THIS TIME
Signal type for download
For example: 'AVG', 'SD', 'MIN', 'MAX', 'GUST'
:Returns:
out: DataFrame with signal data from virtual sensor
'''
if not isinstance(mvs_ids, list):
mvs_ids = [mvs_ids]
valid_mvs_ids = self.mvs_ids()
assert all([mvs_id in valid_mvs_ids for mvs_id in mvs_ids]), f'One of the following is not a valid mvs_id: {mvs_ids}'
mvs_ids_list = sql_list_from_mvs_ids(mvs_ids)
sql_query= f"""
SET NOCOUNT ON
DECLARE @ColumnListID NVARCHAR(4000)
,@startDate DATETIME2
,@endDate DATETIME2
SET @ColumnListID= '{mvs_ids_list}'
SET @startDate = NULL
SET @endDate = NULL
EXECUTE [dbo].[proc_DataExport_GetDataByColumnList]
@ColumnListID
,@startDate
,@endDate
"""
data = pd.read_sql(sql_query, self.conn, index_col='CorrectedTimestamp')
data.index.name = 'stamp'
data.columns.name = 'sensor'
data = self.column_labels_for_data_from_mvs_ids(data)
return data
def data_from_mast_wmm_id(self, wmm_id):
'''Download data from all sensors on a mast from M2D2
:Parameters:
wmm_id: int
Mast ID (wmm_id) in M2D2
:Returns:
out: DataFrame with signal data from each virtual sensor on the mast
'''
masts = self.masts()
wmm_ids = masts.index.get_level_values('wmm_id').sort_values().unique().tolist()
assert wmm_id in wmm_ids, f'the following is not a valid wmm_id: {wmm_id}'
mvs_ids = masts.loc[pd.IndexSlice[:,wmm_id],:].mvs_id.values.tolist()
data = self.data_from_sensors_mvs_ids(mvs_ids)
return data
def metadata_from_mast_wmm_id(self, wmm_id):
'''Download mast metadata from M2D2
:Parameters:
wmm_id: int
Mast ID (wmm_id) in M2D2
:Returns:
out: DataFrame with mast metadata
'''
sql_query= '''
SELECT [WMM_Latitude]
,[WMM_Longitude]
,[WMM_Elevation]
FROM [M2D2_DB_BE].[dbo].[ViewWindDataSet]
WHERE wmm_id = {}
'''.format(wmm_id)
mast_metadata = pd.read_sql(sql_query, self.conn)
return mast_metadata
def mast_from_wmm_id(self, wmm_id):
'''Download an.MetMast from M2D2
:Parameters:
wmm_id: int
Mast ID (wmm_id) in M2D2
:Returns:
out: an.MetMast with data and metadata from M2D2
'''
print(f'Downloading Mast {wmm_id} from M2D2')
data = self.data_from_mast_wmm_id(wmm_id=wmm_id)
metadata = self.metadata_from_mast_wmm_id(wmm_id=wmm_id)
mast = an.MetMast(data=data,
name=wmm_id,
lat=metadata.WMM_Latitude[0],
lon=metadata.WMM_Longitude[0],
elev=metadata.WMM_Elevation[0])
return mast
def masts_from_project(self, project):
'''Download an.MetMasts from M2D2 for a given project
:Parameters:
project_name: str
Project name in M2D2
:Returns:
out: List of an.MetMasts with data and metadata from M2D2 for a given project
'''
masts = self.masts()
projects = masts.index.get_level_values('Project').unique().tolist()
assert project in projects, f'Project {project} not found in M2D2'.format(project)
wmm_ids = masts.loc[project,:].index.get_level_values('wmm_id').sort_values().unique().tolist()
masts = [self.mast_from_wmm_id(wmm_id) for wmm_id in wmm_ids]
return masts
# Define Turbine class
class Turbine(object):
'''Class to connect to EDF Wind Turbine database
'''
def __init__(self):
'''Data structure for connecting to and downloading data from M2D2. Convention is:
import anemoi as an
turb_db = an.io.database.Turbine()
:Parameters:
:Returns:
out: an.Turbine object connected to Turbine database
'''
self.database = 'Turbine'
server = '10.1.15.53'
db = 'Turbine_DB_BE'
conn_str = 'DRIVER={SQL Server}; SERVER=%s; DATABASE=%s; Trusted_Connection=yes' %(server, db)
self.conn_str = conn_str #Assign connection string
try:
self.conn = pyodbc.connect(self.conn_str) #Apply connection string to connect to database
except:
print('Database connection error: you either don\'t have permission to the database or aren\'t signed onto the VPN')
def is_connected(self, database):
return self.database == database
def metadata(self):
'''Get turbine model metadata'''
assert self.is_connected('Turbine'), 'Trying to query the Turbine DB without being connected.'
sql_query_turbines = '''
SELECT [TUR_Manufacturer]
,[TUR_RatedOutputkW]
,[TPC_MaxOutput]
,[TUR_RotorDia]
,[TUR_Model]
,[AllHubHeights]
,[TPC_DocumentDate]
,[TUR_ID]
,[IECClass]
,[TPG_ID]
,[TPG_Name]
,[TPC_ID]
,[TVR_VersionName]
,[TPC_dbalevel]
,[TPC_TIScenario]
,[TPC_BinType]
,[TTC_ID]
,[TRPMC_ID]
,[P_ID]
,[P_Name]
FROM [Turbine_DB_BE].[NodeEstimate].[AllPowerCurves]
WHERE TPC_Type = 'Manufacturer General Spec'
'''
turbines = pd.read_sql(sql_query_turbines, self.conn)
return turbines
def power_curve_from_tpc_id(self, tpc_id):
'''Get turbine model metadata'''
assert self.is_connected('Turbine'), 'Trying to query the Turbine DB without being connected.'
sql_query_thrust_curve = '''
SELECT TPCD_AirDensity,
TPCD_WindSpeedBin,
TPCD_OutputKW
FROM TPCDETAILS
WHERE TPC_id = {} AND TPCD_IsDeleted = 0;
'''.format(tpc_id)
thrust_curve = pd.read_sql(sql_query_thrust_curve, self.conn)
return thrust_curve
def trust_curve_from_ttc_id(self, ttc_id):
'''Get turbine model metadata'''
assert self.is_connected('Turbine'), 'Trying to query the Turbine DB without being connected.'
sql_query_thrust_curve = '''
SELECT TTCD_AirDensity,
TTCD_WindSpeedBin,
TTCD_ThrustValue
FROM TTCDETAILS
WHERE TTC_id = {} AND TTCD_IsDeleted = 0;
'''.format(ttc_id)
thrust_curve = pd.read_sql(sql_query_thrust_curve, self.conn)
return thrust_curve
# Define Padre class
class Padre(object):
'''Class to connect to PRE Padre database
'''
def __init__(self, database='PADREScada', conn_str=None, conn=None, domino=False):
'''Data structure with both database name and connection string.
:Parameters:
database: string, default None
Name of the padre database to connect to
conn_str: string, default None
SQL connection string needed to connect to the database
conn: object, default None
SQL connection object to database
'''
self.database = database
if self.database == 'PADREScada':
server = '10.1.106.44'
db = 'PADREScada'
elif self.database == 'PadrePI':
server = '10.1.106.44'
db = 'PADREScada'
conn_str = 'DRIVER={SQL Server}; SERVER=%s; DATABASE=%s; Trusted_Connection=yes' %(server, db)
self.conn_str = conn_str
try:
self.conn = pyodbc.connect(self.conn_str)
except:
print('Database connection error: you either don\'t have permission to the database or aren\'t signed onto the VPN')
def is_connected(self, database):
return self.database == database
def assets(self, project=None, turbines_only=False):
'''Returns:
DataFrame of all turbines within Padre
'''
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve turbines. Use anemoi.DataBase(database="Padre")')
sql_query_assets = '''
SELECT [AssetKey]
,Projects.[ProjectName]
,[AssetType]
,[AssetName]
,Turbines.[Latitude]
,Turbines.[Longitude]
,[elevation_mt]
FROM [PADREScada].[dbo].[Asset] as Turbines
WITH (NOLOCK)
INNER JOIN [PADREScada].[dbo].[Project] as Projects on Turbines.ProjectKey = Projects.ProjectKey
'''
assets = pd.read_sql(sql_query_assets, self.conn)
assets.set_index(['ProjectName', 'AssetName'], inplace=True)
assets.sort_index(axis=0, inplace=True)
if turbines_only:
assets = assets.loc[assets.AssetType == 'Turbine', :]
assets.drop('AssetType', axis=1, inplace=True)
if project is not None:
assets = assets.loc[project, :]
return assets
def operational_projects(self):
'''Returns:
List of all projects within Padre
'''
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve projects. Use anemoi.DataBase(database="Padre")')
padre_project_query = """
SELECT [ProjectKey]
,[ProjectName]
,[State]
,[NamePlateCapacity]
,[NumGenerators]
,[latitude]
,[longitude]
,[DateCOD]
FROM [PADREScada].[dbo].[Project]
WHERE technology = 'Wind'"""
projects = pd.read_sql(padre_project_query, self.conn)
projects.set_index('ProjectName', inplace=True)
return projects
def turbine_categorizations(self, category_type='EDF'):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve turbines. Use anemoi.DataBase(database="Padre")')
padre_cetegory_query = """
SELECT [CategoryKey]
,[StringName]
FROM [PADREScada].[dbo].[Categories]
WHERE CategoryType = '%s'""" %category_type
categories = pd.read_sql(padre_cetegory_query, self.conn)
categories.set_index('CategoryKey', inplace=True)
return categories
def QCd_turbine_data(self, asset_key):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve met masts. Use anemoi.DataBase(database="Padre")')
turbine_data_query = '''
SELECT [TimeStampLocal]
,[Average_Nacelle_Wdspd]
,[Average_Active_Power]
,[Average_Ambient_Temperature]
,[IEC Category]
,[EDF Category]
,[Expected Power (kW)]
,[Expected Energy (kWh)]
,[EnergyDelta (kWh)]
,[EnergyDelta (MWh)]
FROM [PADREScada].[dbo].[vw_10mDataBI]
WITH (NOLOCK)
WHERE [assetkey] = %i''' %asset_key
turbine_data = pd.read_sql(turbine_data_query, self.conn)
turbine_data['TimeStampLocal'] = pd.to_datetime(turbine_data['TimeStampLocal'], format='%Y-%m-%d %H:%M:%S')
turbine_data.set_index('TimeStampLocal', inplace=True)
turbine_data.sort_index(axis=0, inplace=True)
turbine_data = turbine_data.groupby(turbine_data.index).first()
return turbine_data
def raw_turbine_data(self, asset_key, start_date=None, end_date=None):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve met masts. Use anemoi.DataBase(database="Padre")')
turbine_data_query = '''
SELECT
[TimeStampLocal]
,[Average_Nacelle_Wdspd]
,[Average_Active_Power]
,[Average_Nacelle_Direction]
,[Average_Blade_Pitch]
,[Minimum_Blade_Pitch]
,[Maximum_Blade_Pitch]
,[Average_Rotor_Speed]
,[Minimum_Rotor_Speed]
,[Maximum_Rotor_Speed]
,[Average_Ambient_Temperature]
,coalesce([IECStringKey_Manual]
,[IECStringKey_FF]
,[IECStringKey_Default]) IECKey
,coalesce([EDFStringKey_Manual]
,[EDFStringKey_FF]
,[EDFStringKey_Default]) EDFKey
,coalesce([State_and_Fault_Manual]
,[State_and_Fault_FF]
,[State_and_Fault]) State_and_Fault
FROM [PADREScada].[dbo].[WTGCalcData10m]
WITH (NOLOCK)
WHERE [assetkey] = {} {}'''.format(asset_key, return_between_date_query_string(start_date, end_date))
turbine_data = pd.read_sql(turbine_data_query, self.conn)
turbine_data['TimeStampLocal'] = | pd.to_datetime(turbine_data['TimeStampLocal'], format='%Y-%m-%d %H:%M:%S') | pandas.to_datetime |
from datetime import datetime, timedelta
import warnings
import operator
from textwrap import dedent
import numpy as np
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timedelta)
from pandas._libs.lib import is_datetime_array
from pandas.compat import range, u, set_function_name
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.generic import (
ABCSeries, ABCDataFrame,
ABCMultiIndex,
ABCPeriodIndex, ABCTimedeltaIndex,
ABCDateOffset)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_platform_int,
is_integer,
is_float,
is_dtype_equal,
is_dtype_union_equal,
is_object_dtype,
is_categorical,
is_categorical_dtype,
is_interval_dtype,
is_period_dtype,
is_bool,
is_bool_dtype,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
is_integer_dtype, is_float_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_hashable,
needs_i8_conversion,
is_iterator, is_list_like,
is_scalar)
from pandas.core.base import PandasObject, IndexOpsMixin
import pandas.core.common as com
from pandas.core import ops
from pandas.util._decorators import (
Appender, Substitution, cache_readonly, deprecate_kwarg)
from pandas.core.indexes.frozen import FrozenList
import pandas.core.dtypes.concat as _concat
import pandas.core.missing as missing
import pandas.core.algorithms as algos
import pandas.core.sorting as sorting
from pandas.io.formats.printing import (
pprint_thing, default_pprint, format_object_summary, format_object_attrs)
from pandas.core.ops import make_invalid_op
from pandas.core.strings import StringMethods
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
target_klass='Index',
unique='Index', duplicated='np.ndarray')
_index_shared_docs = dict()
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
def _make_comparison_op(op, cls):
def cmp_method(self, other):
if isinstance(other, (np.ndarray, Index, ABCSeries)):
if other.ndim > 0 and len(self) != len(other):
raise ValueError('Lengths must match to compare')
# we may need to directly compare underlying
# representations
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
if is_object_dtype(self) and self.nlevels == 1:
# don't pass MultiIndex
with np.errstate(all='ignore'):
result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings(record=True):
with np.errstate(all='ignore'):
result = op(self.values, np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except TypeError:
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(cmp_method, name, cls)
def _make_arithmetic_op(op, cls):
def index_arithmetic_method(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif isinstance(other, ABCTimedeltaIndex):
# Defer to subclass implementation
return NotImplemented
other = self._validate_for_numeric_binop(other, op)
# handle time-based others
if isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):
return self._evaluate_with_timedelta_like(other, op)
elif isinstance(other, (datetime, np.datetime64)):
return self._evaluate_with_datetime_like(other, op)
values = self.values
with np.errstate(all='ignore'):
result = op(values, other)
result = missing.dispatch_missing(op, values, other, result)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
if op is divmod:
result = (Index(result[0], **attrs), Index(result[1], **attrs))
else:
result = Index(result, **attrs)
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(index_arithmetic_method, name, cls)
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
# required for backward compat, because PI can't be instantiated with
# ordinals through __new__ GH #13277
if issubclass(cls, ABCPeriodIndex):
from pandas.core.indexes.period import _new_PeriodIndex
return _new_PeriodIndex(cls, **d)
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
If dtype is None, we find the dtype that best fits the data.
If an actual dtype is provided, we coerce to that dtype if it's safe.
Otherwise, an error will be raised.
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
Examples
--------
>>> pd.Index([1, 2, 3])
Int64Index([1, 2, 3], dtype='int64')
>>> pd.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
See Also
---------
RangeIndex : Index implementing a monotonic integer range
CategoricalIndex : Index of :class:`Categorical` s.
MultiIndex : A multi-level, or hierarchical, Index
IntervalIndex : an Index of :class:`Interval` s.
DatetimeIndex, TimedeltaIndex, PeriodIndex
Int64Index, UInt64Index, Float64Index
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_left_indexer_unique = libjoin.left_join_indexer_unique_object
_left_indexer = libjoin.left_join_indexer_object
_inner_indexer = libjoin.inner_join_indexer_object
_outer_indexer = libjoin.outer_join_indexer_object
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_is_numeric_dtype = False
_can_hold_na = True
# would we like our indexing holder to defer to us
_defer_to_indexing = False
# prioritize current class for _shallow_copy_with_infer,
# used to infer integers as datetime-likes
_infer_as_myclass = False
_engine_type = libindex.ObjectEngine
_accessors = set(['str'])
str = CachedAccessor("str", StringMethods)
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, tupleize_cols=True, **kwargs):
if name is None and hasattr(data, 'name'):
name = data.name
if fastpath:
return cls._simple_new(data, name)
from .range import RangeIndex
# range
if isinstance(data, RangeIndex):
return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
elif isinstance(data, range):
return RangeIndex.from_range(data, copy=copy, dtype=dtype,
name=name)
# categorical
if is_categorical_dtype(data) or is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(data, dtype=dtype, copy=copy, name=name,
**kwargs)
# interval
if is_interval_dtype(data) or is_interval_dtype(dtype):
from .interval import IntervalIndex
closed = kwargs.get('closed', None)
return IntervalIndex(data, dtype=dtype, name=name, copy=copy,
closed=closed)
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
if (is_datetime64_any_dtype(data) or
(dtype is not None and is_datetime64_any_dtype(dtype)) or
'tz' in kwargs):
from pandas.core.indexes.datetimes import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name,
dtype=dtype, **kwargs)
if dtype is not None and is_dtype_equal(_o_dtype, dtype):
return Index(result.to_pydatetime(), dtype=_o_dtype)
else:
return result
elif (is_timedelta64_dtype(data) or
(dtype is not None and is_timedelta64_dtype(dtype))):
from pandas.core.indexes.timedeltas import TimedeltaIndex
result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pytimedelta(), dtype=_o_dtype)
else:
return result
if dtype is not None:
try:
# we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
# GH 11836
if is_integer_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'integer':
try:
data = np.array(data, copy=copy, dtype=dtype)
except OverflowError:
# gh-15823: a more user-friendly error message
raise OverflowError(
"the elements provided in the data cannot "
"all be casted to the dtype {dtype}"
.format(dtype=dtype))
elif inferred in ['floating', 'mixed-integer-float']:
if isna(data).any():
raise ValueError('cannot convert float '
'NaN to integer')
# If we are actually all equal to integers,
# then coerce to integer.
try:
return cls._try_convert_to_int_index(
data, copy, name, dtype)
except ValueError:
pass
# Return an actual float index.
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype,
name=name)
elif inferred == 'string':
pass
else:
data = data.astype(dtype)
elif is_float_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'string':
pass
else:
data = data.astype(dtype)
else:
data = np.array(data, dtype=dtype, copy=copy)
except (TypeError, ValueError) as e:
msg = str(e)
if 'cannot convert float' in msg:
raise
# maybe coerce to a sub-class
from pandas.core.indexes.period import (
PeriodIndex, IncompatibleFrequency)
if isinstance(data, PeriodIndex):
return PeriodIndex(data, copy=copy, name=name, **kwargs)
if is_signed_integer_dtype(data.dtype):
from .numeric import Int64Index
return Int64Index(data, copy=copy, dtype=dtype, name=name)
elif is_unsigned_integer_dtype(data.dtype):
from .numeric import UInt64Index
return UInt64Index(data, copy=copy, dtype=dtype, name=name)
elif is_float_dtype(data.dtype):
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):
subarr = data.astype('object')
else:
subarr = com._asarray_tuplesafe(data, dtype=object)
# _asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
if copy:
subarr = subarr.copy()
if dtype is None:
inferred = lib.infer_dtype(subarr)
if inferred == 'integer':
try:
return cls._try_convert_to_int_index(
subarr, copy, name, dtype)
except ValueError:
pass
return Index(subarr, copy=copy,
dtype=object, name=name)
elif inferred in ['floating', 'mixed-integer-float']:
from .numeric import Float64Index
return Float64Index(subarr, copy=copy, name=name)
elif inferred == 'interval':
from .interval import IntervalIndex
return IntervalIndex(subarr, name=name, copy=copy)
elif inferred == 'boolean':
# don't support boolean explicitly ATM
pass
elif inferred != 'string':
if inferred.startswith('datetime'):
if (lib.is_datetime_with_singletz_array(subarr) or
'tz' in kwargs):
# only when subarr has the same tz
from pandas.core.indexes.datetimes import (
DatetimeIndex)
try:
return DatetimeIndex(subarr, copy=copy,
name=name, **kwargs)
except libts.OutOfBoundsDatetime:
pass
elif inferred.startswith('timedelta'):
from pandas.core.indexes.timedeltas import (
TimedeltaIndex)
return TimedeltaIndex(subarr, copy=copy, name=name,
**kwargs)
elif inferred == 'period':
try:
return PeriodIndex(subarr, name=name, **kwargs)
except IncompatibleFrequency:
pass
return cls._simple_new(subarr, name)
elif hasattr(data, '__array__'):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name,
**kwargs)
elif data is None or is_scalar(data):
cls._scalar_data_error(data)
else:
if tupleize_cols and is_list_like(data) and data:
if is_iterator(data):
data = list(data)
# we must be all tuples, otherwise don't construct
# 10697
if all(isinstance(e, tuple) for e in data):
from .multi import MultiIndex
return MultiIndex.from_tuples(
data, names=name or kwargs.get('names'))
# other iterable of some kind
subarr = com._asarray_tuplesafe(data, dtype=object)
return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)
"""
NOTE for new Index creation:
- _simple_new: It returns new Index with the same type as the caller.
All metadata (such as name) must be provided by caller's responsibility.
Using _shallow_copy is recommended because it fills these metadata
otherwise specified.
- _shallow_copy: It returns new Index with the same type (using
_simple_new), but fills caller's metadata otherwise specified. Passed
kwargs will overwrite corresponding metadata.
- _shallow_copy_with_infer: It returns new Index inferring its type
from passed values. It fills caller's metadata otherwise specified as the
same as _shallow_copy.
See each method's docstring.
"""
@classmethod
def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
Must be careful not to recurse.
"""
if not hasattr(values, 'dtype'):
if (values is None or not len(values)) and dtype is not None:
values = np.empty(0, dtype=dtype)
else:
values = np.array(values, copy=False)
if is_object_dtype(values):
values = cls(values, name=name, dtype=dtype,
**kwargs)._ndarray_values
result = object.__new__(cls)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
return result._reset_identity()
_index_shared_docs['_shallow_copy'] = """
create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
return self._simple_new(values, **attributes)
def _shallow_copy_with_infer(self, values=None, **kwargs):
"""
create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes['copy'] = False
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
if self._infer_as_myclass:
try:
return self._constructor(values, **attributes)
except (TypeError, ValueError):
pass
return Index(values, **attributes)
def _deepcopy_if_needed(self, orig, copy=False):
"""
.. versionadded:: 0.19.0
Make a copy of self if data coincides (in memory) with orig.
Subclasses should override this if self._base is not an ndarray.
Parameters
----------
orig : ndarray
other ndarray to compare self._data against
copy : boolean, default False
when False, do not run any check, just return self
Returns
-------
A copy of self if needed, otherwise self : Index
"""
if copy:
# Retrieve the "base objects", i.e. the original memory allocations
if not isinstance(orig, np.ndarray):
# orig is a DatetimeIndex
orig = orig.values
orig = orig if orig.base is None else orig.base
new = self._data if self._data.base is None else self._data.base
if orig is new:
return self.copy(deep=True)
return self
def _update_inplace(self, result, **kwargs):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
def _sort_levels_monotonic(self):
""" compat with MultiIndex """
return self
_index_shared_docs['_get_grouper_for_level'] = """
Get index grouper corresponding to an index level
Parameters
----------
mapper: Group mapping function or None
Function mapping index values to groups
level : int or None
Index level
Returns
-------
grouper : Index
Index of values to group on
labels : ndarray of int or None
Array of locations in level_index
uniques : Index or None
Index of unique values for level
"""
@Appender(_index_shared_docs['_get_grouper_for_level'])
def _get_grouper_for_level(self, mapper, level=None):
assert level is None or level == 0
if mapper is None:
grouper = self
else:
grouper = self.map(mapper)
return grouper, None, None
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
return self._id is getattr(
other, '_id', Ellipsis) and self._id is not None
def _reset_identity(self):
"""Initializes or resets ``_id`` attribute with new object"""
self._id = _Identity()
return self
# ndarray compat
def __len__(self):
"""
return the length of the Index
"""
return len(self._data)
def __array__(self, dtype=None):
""" the array interface, return my values """
return self._data.view(np.ndarray)
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
if is_bool_dtype(result):
return result
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(result, **attrs)
@cache_readonly
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@cache_readonly
def dtype_str(self):
""" return the dtype str of the underlying data """
return str(self.dtype)
@property
def values(self):
""" return the underlying data as an ndarray """
return self._data.view(np.ndarray)
@property
def _values(self):
# type: () -> Union[ExtensionArray, Index]
# TODO(EA): remove index types as they become extension arrays
"""The best array representation.
This is an ndarray, ExtensionArray, or Index subclass. This differs
from ``_ndarray_values``, which always returns an ndarray.
Both ``_values`` and ``_ndarray_values`` are consistent between
``Series`` and ``Index``.
It may differ from the public '.values' method.
index | values | _values | _ndarray_values |
----------------- | -------------- -| ----------- | --------------- |
CategoricalIndex | Categorical | Categorical | codes |
DatetimeIndex[tz] | ndarray[M8ns] | DTI[tz] | ndarray[M8ns] |
For the following, the ``._values`` is currently ``ndarray[object]``,
but will soon be an ``ExtensionArray``
index | values | _values | _ndarray_values |
----------------- | --------------- | ------------ | --------------- |
PeriodIndex | ndarray[object] | ndarray[obj] | ndarray[int] |
IntervalIndex | ndarray[object] | ndarray[obj] | ndarray[object] |
See Also
--------
values
_ndarray_values
"""
return self.values
def get_values(self):
"""
Return `Index` data as an `numpy.ndarray`.
Returns
-------
numpy.ndarray
A one-dimensional numpy array of the `Index` values.
See Also
--------
Index.values : The attribute that get_values wraps.
Examples
--------
Getting the `Index` values of a `DataFrame`:
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
... index=['a', 'b', 'c'], columns=['A', 'B', 'C'])
>>> df
A B C
a 1 2 3
b 4 5 6
c 7 8 9
>>> df.index.get_values()
array(['a', 'b', 'c'], dtype=object)
Standalone `Index` values:
>>> idx = pd.Index(['1', '2', '3'])
>>> idx.get_values()
array(['1', '2', '3'], dtype=object)
`MultiIndex` arrays also have only one dimension:
>>> midx = pd.MultiIndex.from_arrays([[1, 2, 3], ['a', 'b', 'c']],
... names=('number', 'letter'))
>>> midx.get_values()
array([(1, 'a'), (2, 'b'), (3, 'c')], dtype=object)
>>> midx.get_values().ndim
1
"""
return self.values
@Appender(IndexOpsMixin.memory_usage.__doc__)
def memory_usage(self, deep=False):
result = super(Index, self).memory_usage(deep=deep)
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# ops compat
@deprecate_kwarg(old_arg_name='n', new_arg_name='repeats')
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an Index.
Returns a new index where each element of the current index
is repeated consecutively a given number of times.
Parameters
----------
repeats : int
The number of repetitions for each element.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
pandas.Index
Newly created Index with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series
numpy.repeat : Underlying implementation
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
>>> idx.repeat(2)
Int64Index([1, 1, 2, 2, 3, 3], dtype='int64')
>>> idx.repeat(3)
Int64Index([1, 1, 1, 2, 2, 2, 3, 3, 3], dtype='int64')
"""
nv.validate_repeat(args, kwargs)
return self._shallow_copy(self._values.repeat(repeats))
_index_shared_docs['where'] = """
.. versionadded:: 0.19.0
Return an Index of same shape as self and whose corresponding
entries are from self where cond is True and otherwise are from
other.
Parameters
----------
cond : boolean array-like with the same length as self
other : scalar, or array-like
"""
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
dtype = self.dtype
values = self.values
if is_bool(other) or is_bool_dtype(other):
# bools force casting
values = values.astype(object)
dtype = None
values = np.where(cond, values, other)
if self._is_numeric_dtype and np.any(isna(values)):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return self._shallow_copy_with_infer(values, dtype=dtype)
def ravel(self, order='C'):
"""
return an ndarray of the flattened values of the underlying data
See also
--------
numpy.ndarray.ravel
"""
return self._ndarray_values.ravel(order=order)
# construction helpers
@classmethod
def _try_convert_to_int_index(cls, data, copy, name, dtype):
"""
Attempt to convert an array of data into an integer index.
Parameters
----------
data : The data to convert.
copy : Whether to copy the data or not.
name : The name of the index returned.
Returns
-------
int_index : data converted to either an Int64Index or a
UInt64Index
Raises
------
ValueError if the conversion was not successful.
"""
from .numeric import Int64Index, UInt64Index
if not is_unsigned_integer_dtype(dtype):
# skip int64 conversion attempt if uint-like dtype is passed, as
# this could return Int64Index when UInt64Index is what's desrired
try:
res = data.astype('i8', copy=False)
if (res == data).all():
return Int64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
# Conversion to int64 failed (possibly due to overflow) or was skipped,
# so let's try now with uint64.
try:
res = data.astype('u8', copy=False)
if (res == data).all():
return UInt64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
raise ValueError
@classmethod
def _scalar_data_error(cls, data):
raise TypeError('{0}(...) must be called with a collection of some '
'kind, {1} was passed'.format(cls.__name__,
repr(data)))
@classmethod
def _string_data_error(cls, data):
raise TypeError('String dtype not supported, you may need '
'to explicitly cast to a numeric type')
@classmethod
def _coerce_to_ndarray(cls, data):
"""coerces data to ndarray, raises on scalar data. Converts other
iterables to list first and then to array. Does not touch ndarrays.
"""
if not isinstance(data, (np.ndarray, Index)):
if data is None or is_scalar(data):
cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
data = np.asarray(data)
return data
def _get_attributes_dict(self):
""" return an attributes dict for my class """
return {k: getattr(self, k, None) for k in self._attributes}
def view(self, cls=None):
# we need to see if we are subclassing an
# index type here
if cls is not None and not hasattr(cls, '_typ'):
result = self._data.view(cls)
else:
result = self._shallow_copy()
if isinstance(result, Index):
result._id = self._id
return result
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
dtype = self.dtype
if self._is_numeric_dtype and isna(item):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return Index([item], dtype=dtype, **self._get_attributes_dict())
_index_shared_docs['copy'] = """
Make a copy of this object. Name and dtype sets those attributes on
the new object.
Parameters
----------
name : string, optional
deep : boolean, default False
dtype : numpy dtype or pandas type
Returns
-------
copy : Index
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
"""
@Appender(_index_shared_docs['copy'])
def copy(self, name=None, deep=False, dtype=None, **kwargs):
if deep:
new_index = self._shallow_copy(self._data.copy())
else:
new_index = self._shallow_copy()
names = kwargs.get('names')
names = self._validate_names(name=name, names=names, deep=deep)
new_index = new_index.set_names(names)
if dtype:
new_index = new_index.astype(dtype)
return new_index
def __copy__(self, **kwargs):
return self.copy(**kwargs)
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
return self.copy(deep=True)
def _validate_names(self, name=None, names=None, deep=False):
"""
Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' parameter for MultiIndex.
"""
from copy import deepcopy
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
elif names is None and name is None:
return deepcopy(self.names) if deep else self.names
elif names is not None:
if not is_list_like(names):
raise TypeError("Must pass list-like as `names`.")
return names
else:
if not is_list_like(name):
return [name]
return name
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
klass = self.__class__.__name__
data = self._format_data()
attrs = self._format_attrs()
space = self._format_space()
prepr = (u(",%s") %
space).join(u("%s=%s") % (k, v) for k, v in attrs)
# no data provided, just attributes
if data is None:
data = ''
res = u("%s(%s%s)") % (klass, data, prepr)
return res
def _format_space(self):
# using space here controls if the attributes
# are line separated or not (the default)
# max_seq_items = get_option('display.max_seq_items')
# if len(self) > max_seq_items:
# space = "\n%s" % (' ' * (len(klass) + 1))
return " "
@property
def _formatter_func(self):
"""
Return the formatter function
"""
return default_pprint
def _format_data(self, name=None):
"""
Return the formatted data as a unicode string
"""
# do we want to justify (only do so for non-objects)
is_justify = not (self.inferred_type in ('string', 'unicode') or
(self.inferred_type == 'categorical' and
is_object_dtype(self.categories)))
return format_object_summary(self, self._formatter_func,
is_justify=is_justify, name=name)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
return format_object_attrs(self)
def to_series(self, index=None, name=None):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Parameters
----------
index : Index, optional
index of resulting Series. If None, defaults to original index
name : string, optional
name of resulting Series. If None, defaults to name of original
index
Returns
-------
Series : dtype will be based on the type of the Index values.
"""
from pandas import Series
if index is None:
index = self._shallow_copy()
if name is None:
name = self.name
return Series(self._to_embed(), index=index, name=name)
def to_frame(self, index=True):
"""
Create a DataFrame with a column containing the Index.
.. versionadded:: 0.21.0
Parameters
----------
index : boolean, default True
Set the index of the returned DataFrame as the original Index.
Returns
-------
DataFrame
DataFrame containing the original Index data.
See Also
--------
Index.to_series : Convert an Index to a Series.
Series.to_frame : Convert Series to DataFrame.
Examples
--------
>>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')
>>> idx.to_frame()
animal
animal
Ant Ant
Bear Bear
Cow Cow
By default, the original Index is reused. To enforce a new Index:
>>> idx.to_frame(index=False)
animal
0 Ant
1 Bear
2 Cow
"""
from pandas import DataFrame
result = DataFrame(self._shallow_copy(), columns=[self.name or 0])
if index:
result.index = self
return result
def _to_embed(self, keep_tz=False, dtype=None):
"""
*this is an internal non-public method*
return an array repr of this object, potentially casting to object
"""
if dtype is not None:
return self.astype(dtype)._to_embed(keep_tz=keep_tz)
return self.values.copy()
_index_shared_docs['astype'] = """
Create an Index with values cast to dtypes. The class of a new Index
is determined by dtype. When conversion is impossible, a ValueError
exception is raised.
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and internal requirements on dtype are
satisfied, the original data is used to create a new Index
or the original Index is returned.
.. versionadded:: 0.19.0
"""
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if is_dtype_equal(self.dtype, dtype):
return self.copy() if copy else self
elif is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(self.values, name=self.name, dtype=dtype,
copy=copy)
try:
return Index(self.values.astype(dtype, copy=copy), name=self.name,
dtype=dtype)
except (TypeError, ValueError):
msg = 'Cannot cast {name} to dtype {dtype}'
raise TypeError(msg.format(name=type(self).__name__, dtype=dtype))
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self
def _assert_can_do_setop(self, other):
if not is_list_like(other):
raise TypeError('Input must be Index or array-like')
return True
def _convert_can_do_setop(self, other):
if not isinstance(other, Index):
other = Index(other, name=self.name)
result_name = self.name
else:
result_name = self.name if self.name == other.name else None
return other, result_name
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
return value
def _assert_can_do_op(self, value):
""" Check value is valid for scalar op """
if not is_scalar(value):
msg = "'value' must be a scalar, passed: {0}"
raise TypeError(msg.format(type(value).__name__))
@property
def nlevels(self):
return 1
def _get_names(self):
return FrozenList((self.name, ))
def _set_names(self, values, level=None):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
Raises
------
TypeError if each name is not hashable.
"""
if not is_list_like(values):
raise ValueError('Names must be a list-like')
if len(values) != 1:
raise ValueError('Length of new names must be 1, got %d' %
len(values))
# GH 20527
# All items in 'name' need to be hashable:
for name in values:
if not is_hashable(name):
raise TypeError('{}.name must be a hashable type'
.format(self.__class__.__name__))
self.name = values[0]
names = property(fset=_set_names, fget=_get_names)
def set_names(self, names, level=None, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
names : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
Examples
--------
>>> Index([1, 2, 3, 4]).set_names('foo')
Int64Index([1, 2, 3, 4], dtype='int64', name='foo')
>>> Index([1, 2, 3, 4]).set_names(['foo'])
Int64Index([1, 2, 3, 4], dtype='int64', name='foo')
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_names(['baz', 'quz'])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'quz'])
>>> idx.set_names('baz', level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'bar'])
"""
from .multi import MultiIndex
if level is not None and not isinstance(self, MultiIndex):
raise ValueError('Level must be None for non-MultiIndex')
if level is not None and not is_list_like(level) and is_list_like(
names):
raise TypeError("Names must be a string")
if not is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if not is_list_like(names):
names = [names]
if level is not None and not is_list_like(level):
level = [level]
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._set_names(names, level=level)
if not inplace:
return idx
def rename(self, name, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
name : str or list
name to set
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
"""
return self.set_names([name], inplace=inplace)
@property
def _has_complex_internals(self):
# to disable groupby tricks in MultiIndex
return False
def _summary(self, name=None):
"""
Return a summarized representation
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
"""
if len(self) > 0:
head = self[0]
if (hasattr(head, 'format') and
not isinstance(head, compat.string_types)):
head = head.format()
tail = self[-1]
if (hasattr(tail, 'format') and
not isinstance(tail, compat.string_types)):
tail = tail.format()
index_summary = ', %s to %s' % (pprint_thing(head),
pprint_thing(tail))
else:
index_summary = ''
if name is None:
name = type(self).__name__
return '%s: %s entries%s' % (name, len(self), index_summary)
def summary(self, name=None):
"""
Return a summarized representation
.. deprecated:: 0.23.0
"""
warnings.warn("'summary' is deprecated and will be removed in a "
"future version.", FutureWarning, stacklevel=2)
return self._summary(name)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.values
_na_value = np.nan
"""The expected NA value to use with this index."""
# introspection
@property
def is_monotonic(self):
""" alias for is_monotonic_increasing (deprecated) """
return self.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
Examples
--------
>>> Index([1, 2, 3]).is_monotonic_increasing
True
>>> Index([1, 2, 2]).is_monotonic_increasing
True
>>> Index([1, 3, 2]).is_monotonic_increasing
False
"""
return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
Examples
--------
>>> Index([3, 2, 1]).is_monotonic_decreasing
True
>>> Index([3, 2, 2]).is_monotonic_decreasing
True
>>> Index([3, 1, 2]).is_monotonic_decreasing
False
"""
return self._engine.is_monotonic_decreasing
@property
def _is_strictly_monotonic_increasing(self):
"""return if the index is strictly monotonic increasing
(only increasing) values
Examples
--------
>>> Index([1, 2, 3])._is_strictly_monotonic_increasing
True
>>> Index([1, 2, 2])._is_strictly_monotonic_increasing
False
>>> Index([1, 3, 2])._is_strictly_monotonic_increasing
False
"""
return self.is_unique and self.is_monotonic_increasing
@property
def _is_strictly_monotonic_decreasing(self):
"""return if the index is strictly monotonic decreasing
(only decreasing) values
Examples
--------
>>> Index([3, 2, 1])._is_strictly_monotonic_decreasing
True
>>> Index([3, 2, 2])._is_strictly_monotonic_decreasing
False
>>> Index([3, 1, 2])._is_strictly_monotonic_decreasing
False
"""
return self.is_unique and self.is_monotonic_decreasing
def is_lexsorted_for_tuple(self, tup):
return True
@cache_readonly
def is_unique(self):
""" return if the index has unique values """
return self._engine.is_unique
@property
def has_duplicates(self):
return not self.is_unique
def is_boolean(self):
return self.inferred_type in ['boolean']
def is_integer(self):
return self.inferred_type in ['integer']
def is_floating(self):
return self.inferred_type in ['floating', 'mixed-integer-float']
def is_numeric(self):
return self.inferred_type in ['integer', 'floating']
def is_object(self):
return is_object_dtype(self.dtype)
def is_categorical(self):
"""
Check if the Index holds categorical data.
Returns
-------
boolean
True if the Index is categorical.
See Also
--------
CategoricalIndex : Index for categorical data.
Examples
--------
>>> idx = pd.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_categorical()
True
>>> idx = pd.Index([1, 3, 5, 7])
>>> idx.is_categorical()
False
>>> s = pd.Series(["Peter", "Victor", "Elisabeth", "Mar"])
>>> s
0 Peter
1 Victor
2 Elisabeth
3 Mar
dtype: object
>>> s.index.is_categorical()
False
"""
return self.inferred_type in ['categorical']
def is_interval(self):
return self.inferred_type in ['interval']
def is_mixed(self):
return self.inferred_type in ['mixed']
def holds_integer(self):
return self.inferred_type in ['integer', 'mixed-integer']
_index_shared_docs['_convert_scalar_indexer'] = """
Convert a scalar indexer.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
if kind == 'iloc':
return self._validate_indexer('positional', key, kind)
if len(self) and not isinstance(self, ABCMultiIndex,):
# we can raise here if we are definitive that this
# is positional indexing (eg. .ix on with a float)
# or label indexing if we are using a type able
# to be represented in the index
if kind in ['getitem', 'ix'] and is_float(key):
if not self.is_floating():
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_float(key):
# we want to raise KeyError on string/mixed here
# technically we *could* raise a TypeError
# on anything but mixed though
if self.inferred_type not in ['floating',
'mixed-integer-float',
'string',
'unicode',
'mixed']:
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_integer(key):
if not self.holds_integer():
return self._invalid_indexer('label', key)
return key
_index_shared_docs['_convert_slice_indexer'] = """
Convert a slice indexer.
By definition, these are labels unless 'iloc' is passed in.
Floats are not allowed as the start, step, or stop of the slice.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
@Appender(_index_shared_docs['_convert_slice_indexer'])
def _convert_slice_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
# validate iloc
if kind == 'iloc':
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# potentially cast the bounds to integers
start, stop, step = key.start, key.stop, key.step
# figure out if this is a positional indexer
def is_int(v):
return v is None or is_integer(v)
is_null_slicer = start is None and stop is None
is_index_slice = is_int(start) and is_int(stop)
is_positional = is_index_slice and not self.is_integer()
if kind == 'getitem':
"""
called from the getitem slicers, validate that we are in fact
integers
"""
if self.is_integer() or is_index_slice:
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# convert the slice to an indexer here
# if we are mixed and have integers
try:
if is_positional and self.is_mixed():
# TODO: i, j are not used anywhere
if start is not None:
i = self.get_loc(start) # noqa
if stop is not None:
j = self.get_loc(stop) # noqa
is_positional = False
except KeyError:
if self.inferred_type == 'mixed-integer-float':
raise
if is_null_slicer:
indexer = key
elif is_positional:
indexer = key
else:
try:
indexer = self.slice_indexer(start, stop, step, kind=kind)
except Exception:
if is_index_slice:
if self.is_integer():
raise
else:
indexer = key
else:
raise
return indexer
def _convert_listlike_indexer(self, keyarr, kind=None):
"""
Parameters
----------
keyarr : list-like
Indexer to convert.
Returns
-------
tuple (indexer, keyarr)
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
if isinstance(keyarr, Index):
keyarr = self._convert_index_indexer(keyarr)
else:
keyarr = self._convert_arr_indexer(keyarr)
indexer = self._convert_list_indexer(keyarr, kind=kind)
return indexer, keyarr
_index_shared_docs['_convert_arr_indexer'] = """
Convert an array-like indexer to the appropriate dtype.
Parameters
----------
keyarr : array-like
Indexer to convert.
Returns
-------
converted_keyarr : array-like
"""
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
keyarr = com._asarray_tuplesafe(keyarr)
return keyarr
_index_shared_docs['_convert_index_indexer'] = """
Convert an Index indexer to the appropriate dtype.
Parameters
----------
keyarr : Index (or sub-class)
Indexer to convert.
Returns
-------
converted_keyarr : Index (or sub-class)
"""
@Appender(_index_shared_docs['_convert_index_indexer'])
def _convert_index_indexer(self, keyarr):
return keyarr
_index_shared_docs['_convert_list_indexer'] = """
Convert a list-like indexer to the appropriate dtype.
Parameters
----------
keyarr : Index (or sub-class)
Indexer to convert.
kind : iloc, ix, loc, optional
Returns
-------
positional indexer or None
"""
@Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
if (kind in [None, 'iloc', 'ix'] and
is_integer_dtype(keyarr) and not self.is_floating() and
not isinstance(keyarr, ABCPeriodIndex)):
if self.inferred_type == 'mixed-integer':
indexer = self.get_indexer(keyarr)
if (indexer >= 0).all():
return indexer
# missing values are flagged as -1 by get_indexer and negative
# indices are already converted to positive indices in the
# above if-statement, so the negative flags are changed to
# values outside the range of indices so as to trigger an
# IndexError in maybe_convert_indices
indexer[indexer < 0] = len(self)
from pandas.core.indexing import maybe_convert_indices
return maybe_convert_indices(indexer, len(self))
elif not self.inferred_type == 'integer':
keyarr = np.where(keyarr < 0, len(self) + keyarr, keyarr)
return keyarr
return None
def _invalid_indexer(self, form, key):
""" consistent invalid indexer message """
raise TypeError("cannot do {form} indexing on {klass} with these "
"indexers [{key}] of {kind}".format(
form=form, klass=type(self), key=key,
kind=type(key)))
def get_duplicates(self):
"""
Extract duplicated index elements.
Returns a sorted list of index elements which appear more than once in
the index.
.. deprecated:: 0.23.0
Use idx[idx.duplicated()].unique() instead
Returns
-------
array-like
List of duplicated indexes.
See Also
--------
Index.duplicated : Return boolean array denoting duplicates.
Index.drop_duplicates : Return Index with duplicates removed.
Examples
--------
Works on different Index of types.
>>> pd.Index([1, 2, 2, 3, 3, 3, 4]).get_duplicates()
[2, 3]
>>> pd.Index([1., 2., 2., 3., 3., 3., 4.]).get_duplicates()
[2.0, 3.0]
>>> pd.Index(['a', 'b', 'b', 'c', 'c', 'c', 'd']).get_duplicates()
['b', 'c']
Note that for a DatetimeIndex, it does not return a list but a new
DatetimeIndex:
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03',
... '2018-01-03', '2018-01-04', '2018-01-04'],
... format='%Y-%m-%d')
>>> pd.Index(dates).get_duplicates()
DatetimeIndex(['2018-01-03', '2018-01-04'],
dtype='datetime64[ns]', freq=None)
Sorts duplicated elements even when indexes are unordered.
>>> pd.Index([1, 2, 3, 2, 3, 4, 3]).get_duplicates()
[2, 3]
Return empty array-like structure when all elements are unique.
>>> pd.Index([1, 2, 3, 4]).get_duplicates()
[]
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03'],
... format='%Y-%m-%d')
>>> pd.Index(dates).get_duplicates()
DatetimeIndex([], dtype='datetime64[ns]', freq=None)
"""
warnings.warn("'get_duplicates' is deprecated and will be removed in "
"a future release. You can use "
"idx[idx.duplicated()].unique() instead",
FutureWarning, stacklevel=2)
return self[self.duplicated()].unique()
def _cleanup(self):
self._engine.clear_mapping()
@cache_readonly
def _constructor(self):
return type(self)
@cache_readonly
def _engine(self):
# property, for now, slow to look up
return self._engine_type(lambda: self._ndarray_values, len(self))
def _validate_index_level(self, level):
"""
Validate index level.
For single-level Index getting level number is a no-op, but some
verification must be done like in MultiIndex.
"""
if isinstance(level, int):
if level < 0 and level != -1:
raise IndexError("Too many levels: Index has only 1 level,"
" %d is not a valid level number" % (level, ))
elif level > 0:
raise IndexError("Too many levels:"
" Index has only 1 level, not %d" %
(level + 1))
elif level != self.name:
raise KeyError('Level %s must be same as name (%s)' %
(level, self.name))
def _get_level_number(self, level):
self._validate_index_level(level)
return 0
@cache_readonly
def inferred_type(self):
""" return a string of the type inferred from the values """
return lib.infer_dtype(self)
def _is_memory_usage_qualified(self):
""" return a boolean if we need a qualified .info display """
return self.is_object()
def is_type_compatible(self, kind):
return kind == self.inferred_type
@cache_readonly
def is_all_dates(self):
if self._data is None:
return False
return is_datetime_array(_ensure_object(self.values))
def __reduce__(self):
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_Index, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
self._data = state.pop('data')
for k, v in compat.iteritems(state):
setattr(self, k, v)
elif isinstance(state, tuple):
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(data, state)
self._data = data
self._reset_identity()
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def __nonzero__(self):
raise ValueError("The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
.format(self.__class__.__name__))
__bool__ = __nonzero__
_index_shared_docs['__contains__'] = """
return a boolean if this key is IN the index
Parameters
----------
key : object
Returns
-------
boolean
"""
@Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)
def __contains__(self, key):
hash(key)
try:
return key in self._engine
except (OverflowError, TypeError, ValueError):
return False
_index_shared_docs['contains'] = """
return a boolean if this key is IN the index
Parameters
----------
key : object
Returns
-------
boolean
"""
@Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
def contains(self, key):
hash(key)
try:
return key in self._engine
except (TypeError, ValueError):
return False
def __hash__(self):
raise TypeError("unhashable type: %r" % type(self).__name__)
def __setitem__(self, key, value):
raise TypeError("Index does not support mutable operations")
def __getitem__(self, key):
"""
Override numpy.ndarray's __getitem__ method to work as desired.
This function adds lists and Series as valid boolean indexers
(ndarrays only supports ndarray with dtype=bool).
If resulting ndim != 1, plain ndarray is returned instead of
corresponding `Index` subclass.
"""
# There's no custom logic to be implemented in __getslice__, so it's
# not overloaded intentionally.
getitem = self._data.__getitem__
promote = self._shallow_copy
if is_scalar(key):
return getitem(key)
if isinstance(key, slice):
# This case is separated from the conditional above to avoid
# pessimization of basic indexing.
return promote(getitem(key))
if com.is_bool_indexer(key):
key = np.asarray(key)
key = com._values_from_object(key)
result = getitem(key)
if not is_scalar(result):
return promote(result)
else:
return result
def _can_hold_identifiers_and_holds_name(self, name):
"""
Faster check for ``name in self`` when we know `name` is a Python
identifier (e.g. in NDFrame.__getattr__, which hits this to support
. key lookup). For indexes that can't hold identifiers (everything
but object & categorical) we just return False.
https://github.com/pandas-dev/pandas/issues/19764
"""
if self.is_object() or self.is_categorical():
return name in self
return False
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if not isinstance(obj, Index):
raise TypeError('all inputs must be Index')
names = {obj.name for obj in to_concat}
name = None if len(names) > 1 else self.name
return self._concat(to_concat, name)
def _concat(self, to_concat, name):
typs = _concat.get_dtype_kinds(to_concat)
if len(typs) == 1:
return self._concat_same_dtype(to_concat, name=name)
return _concat._concat_index_asobject(to_concat, name=name)
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class
"""
# must be overridden in specific classes
return _concat._concat_index_asobject(to_concat, name)
_index_shared_docs['take'] = """
return a new %(klass)s of the values selected by the indices
For internal compatibility with numpy arrays.
Parameters
----------
indices : list
Indices to be taken
axis : int, optional
The axis over which to select values, always 0.
allow_fill : bool, default True
fill_value : bool, default None
If allow_fill=True and fill_value is not None, indices specified by
-1 is regarded as NA. If Index doesn't hold NA, raise ValueError
See also
--------
numpy.ndarray.take
"""
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
if kwargs:
nv.validate_take(tuple(), kwargs)
indices = _ensure_platform_int(indices)
if self._can_hold_na:
taken = self._assert_take_fillable(self.values, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=self._na_value)
else:
if allow_fill and fill_value is not None:
msg = 'Unable to fill values because {0} cannot contain NA'
raise ValueError(msg.format(self.__class__.__name__))
taken = self.values.take(indices)
return self._shallow_copy(taken)
def _assert_take_fillable(self, values, indices, allow_fill=True,
fill_value=None, na_value=np.nan):
""" Internal method to handle NA filling of take """
indices = _ensure_platform_int(indices)
# only fill if we are passing a non-None fill_value
if allow_fill and fill_value is not None:
if (indices < -1).any():
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
raise ValueError(msg)
taken = algos.take(values,
indices,
allow_fill=allow_fill,
fill_value=na_value)
else:
taken = values.take(indices)
return taken
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
if self._can_hold_na:
return isna(self)
else:
# shouldn't reach to this condition by checking hasnans beforehand
values = np.empty(len(self), dtype=np.bool_)
values.fill(False)
return values
@cache_readonly
def _nan_idxs(self):
if self._can_hold_na:
w, = self._isnan.nonzero()
return w
else:
return np.array([], dtype=np.int64)
@cache_readonly
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
if self._can_hold_na:
return self._isnan.any()
else:
return False
def isna(self):
"""
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get
mapped to ``True`` values.
Everything else get mapped to ``False`` values. Characters such as
empty strings `''` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
.. versionadded:: 0.20.0
Returns
-------
numpy.ndarray
A boolean array of whether my values are NA
See Also
--------
pandas.Index.notna : boolean inverse of isna.
pandas.Index.dropna : omit entries with missing values.
pandas.isna : top-level isna.
Series.isna : detect missing values in Series object.
Examples
--------
Show which entries in a pandas.Index are NA. The result is an
array.
>>> idx = pd.Index([5.2, 6.0, np.NaN])
>>> idx
Float64Index([5.2, 6.0, nan], dtype='float64')
>>> idx.isna()
array([False, False, True], dtype=bool)
Empty strings are not considered NA values. None is considered an NA
value.
>>> idx = pd.Index(['black', '', 'red', None])
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.isna()
array([False, False, False, True], dtype=bool)
For datetimes, `NaT` (Not a Time) is considered as an NA value.
>>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'),
... pd.Timestamp(''), None, pd.NaT])
>>> idx
DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]', freq=None)
>>> idx.isna()
array([False, True, True, True], dtype=bool)
"""
return self._isnan
isnull = isna
def notna(self):
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to ``True``. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False``
values.
.. versionadded:: 0.20.0
Returns
-------
numpy.ndarray
Boolean array to indicate which entries are not NA.
See also
--------
Index.notnull : alias of notna
Index.isna: inverse of notna
pandas.notna : top-level notna
Examples
--------
Show which entries in an Index are not NA. The result is an
array.
>>> idx = pd.Index([5.2, 6.0, np.NaN])
>>> idx
Float64Index([5.2, 6.0, nan], dtype='float64')
>>> idx.notna()
array([ True, True, False])
Empty strings are not considered NA values. None is considered a NA
value.
>>> idx = pd.Index(['black', '', 'red', None])
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.notna()
array([ True, True, True, False])
"""
return ~self.isna()
notnull = notna
def putmask(self, mask, value):
"""
return a new Index of the values set with the mask
See also
--------
numpy.ndarray.putmask
"""
values = self.values.copy()
try:
np.putmask(values, mask, self._convert_for_op(value))
return self._shallow_copy(values)
except (ValueError, TypeError) as err:
if is_object_dtype(self):
raise err
# coerces to object
return self.astype(object).putmask(mask, value)
def format(self, name=False, formatter=None, **kwargs):
"""
Render a string representation of the Index
"""
header = []
if name:
header.append(pprint_thing(self.name,
escape_chars=('\t', '\r', '\n')) if
self.name is not None else '')
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header, **kwargs)
def _format_with_header(self, header, na_rep='NaN', **kwargs):
values = self.values
from pandas.io.formats.format import format_array
if is_categorical_dtype(values.dtype):
values = np.array(values)
elif is_object_dtype(values.dtype):
values = lib.maybe_convert_objects(values, safe=1)
if is_object_dtype(values.dtype):
result = [pprint_thing(x, escape_chars=('\t', '\r', '\n'))
for x in values]
# could have nans
mask = isna(values)
if mask.any():
result = np.array(result)
result[mask] = na_rep
result = result.tolist()
else:
result = _trim_front(format_array(values, None, justify='left'))
return header + result
def to_native_types(self, slicer=None, **kwargs):
"""
Format specified values of `self` and return them.
Parameters
----------
slicer : int, array-like
An indexer into `self` that specifies which values
are used in the formatting process.
kwargs : dict
Options for specifying how the values should be formatted.
These options include the following:
1) na_rep : str
The value that serves as a placeholder for NULL values
2) quoting : bool or None
Whether or not there are quoted values in `self`
3) date_format : str
The format used to represent date-like values
"""
values = self
if slicer is not None:
values = values[slicer]
return values._format_native_types(**kwargs)
def _format_native_types(self, na_rep='', quoting=None, **kwargs):
""" actually format my specific types """
mask = isna(self)
if not self.is_object() and not quoting:
values = np.asarray(self).astype(str)
else:
values = np.array(self, dtype=object, copy=True)
values[mask] = na_rep
return values
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if is_object_dtype(self) and not is_object_dtype(other):
# if other is not object, use other's logic for coercion
return other.equals(self)
try:
return array_equivalent(com._values_from_object(self),
com._values_from_object(other))
except Exception:
return False
def identical(self, other):
"""Similar to equals, but check that other comparable attributes are
also equal
"""
return (self.equals(other) and
all((getattr(self, c, None) == getattr(other, c, None)
for c in self._comparables)) and
type(self) == type(other))
def asof(self, label):
"""
For a sorted index, return the most recent label up to and including
the passed label. Return NaN if not found.
See also
--------
get_loc : asof is a thin wrapper around get_loc with method='pad'
"""
try:
loc = self.get_loc(label, method='pad')
except KeyError:
return self._na_value
else:
if isinstance(loc, slice):
loc = loc.indices(len(self))[-1]
return self[loc]
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
locs = self.values[mask].searchsorted(where.values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where < self.values[first])] = -1
return result
def sort_values(self, return_indexer=False, ascending=True):
"""
Return a sorted copy of the index.
Return a sorted copy of the index, and optionally return the indices
that sorted the index itself.
Parameters
----------
return_indexer : bool, default False
Should the indices that would sort the index be returned.
ascending : bool, default True
Should the index values be sorted in an ascending order.
Returns
-------
sorted_index : pandas.Index
Sorted copy of the index.
indexer : numpy.ndarray, optional
The indices that the index itself was sorted by.
See Also
--------
pandas.Series.sort_values : Sort values of a Series.
pandas.DataFrame.sort_values : Sort values in a DataFrame.
Examples
--------
>>> idx = pd.Index([10, 100, 1, 1000])
>>> idx
Int64Index([10, 100, 1, 1000], dtype='int64')
Sort values in ascending order (default behavior).
>>> idx.sort_values()
Int64Index([1, 10, 100, 1000], dtype='int64')
Sort values in descending order, and also get the indices `idx` was
sorted by.
>>> idx.sort_values(ascending=False, return_indexer=True)
(Int64Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2]))
"""
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
if return_indexer:
return sorted_index, _as
else:
return sorted_index
def sort(self, *args, **kwargs):
raise TypeError("cannot sort an Index object in-place, use "
"sort_values instead")
def sortlevel(self, level=None, ascending=True, sort_remaining=None):
"""
For internal compatibility with with the Index API
Sort the Index. This is for compat with MultiIndex
Parameters
----------
ascending : boolean, default True
False to sort in descending order
level, sort_remaining are compat parameters
Returns
-------
sorted_index : Index
"""
return self.sort_values(return_indexer=True, ascending=ascending)
def shift(self, periods=1, freq=None):
"""
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int, default 1
Number of periods (or increments) to shift by,
can be positive or negative.
freq : pandas.DateOffset, pandas.Timedelta or string, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.Index
shifted index
See Also
--------
Series.shift : Shift values of Series.
Examples
--------
Put the first 5 month starts of 2011 into an index.
>>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS')
>>> month_starts
DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01',
'2011-05-01'],
dtype='datetime64[ns]', freq='MS')
Shift the index by 10 days.
>>> month_starts.shift(10, freq='D')
DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11',
'2011-05-11'],
dtype='datetime64[ns]', freq=None)
The default value of `freq` is the `freq` attribute of the index,
which is 'MS' (month start) in this example.
>>> month_starts.shift(10)
DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01',
'2012-03-01'],
dtype='datetime64[ns]', freq='MS')
Notes
-----
This method is only implemented for datetime-like index classes,
i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex.
"""
raise NotImplementedError("Not supported for type %s" %
type(self).__name__)
def argsort(self, *args, **kwargs):
"""
Return the integer indices that would sort the index.
Parameters
----------
*args
Passed to `numpy.ndarray.argsort`.
**kwargs
Passed to `numpy.ndarray.argsort`.
Returns
-------
numpy.ndarray
Integer indices that would sort the index if used as
an indexer.
See also
--------
numpy.argsort : Similar method for NumPy arrays.
Index.sort_values : Return sorted copy of Index.
Examples
--------
>>> idx = pd.Index(['b', 'a', 'd', 'c'])
>>> idx
Index(['b', 'a', 'd', 'c'], dtype='object')
>>> order = idx.argsort()
>>> order
array([1, 0, 3, 2])
>>> idx[order]
Index(['a', 'b', 'c', 'd'], dtype='object')
"""
result = self.asi8
if result is None:
result = np.array(self)
return result.argsort(*args, **kwargs)
def __add__(self, other):
return Index(np.array(self) + other)
def __radd__(self, other):
return Index(other + np.array(self))
def __iadd__(self, other):
# alias for __add__
return self + other
def __sub__(self, other):
raise TypeError("cannot perform __sub__ with this index type: "
"{typ}".format(typ=type(self).__name__))
def __and__(self, other):
return self.intersection(other)
def __or__(self, other):
return self.union(other)
def __xor__(self, other):
return self.symmetric_difference(other)
def _get_consensus_name(self, other):
"""
Given 2 indexes, give a consensus name meaning
we take the not None one, or None if the names differ.
Return a new object if we are resetting the name
"""
if self.name != other.name:
if self.name is None or other.name is None:
name = self.name or other.name
else:
name = None
if self.name != name:
return self._shallow_copy(name=name)
return self
def union(self, other):
"""
Form the union of two Index objects and sorts if possible.
Parameters
----------
other : Index or array-like
Returns
-------
union : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.union(idx2)
Int64Index([1, 2, 3, 4, 5, 6], dtype='int64')
"""
self._assert_can_do_setop(other)
other = _ensure_index(other)
if len(other) == 0 or self.equals(other):
return self._get_consensus_name(other)
if len(self) == 0:
return other._get_consensus_name(self)
# TODO: is_dtype_union_equal is a hack around
# 1. buggy set ops with duplicates (GH #13432)
# 2. CategoricalIndex lacking setops (GH #10186)
# Once those are fixed, this workaround can be removed
if not is_dtype_union_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.union(other)
# TODO(EA): setops-refactor, clean all this up
if is_period_dtype(self) or is_datetime64tz_dtype(self):
lvals = self._ndarray_values
else:
lvals = self._values
if is_period_dtype(other) or is_datetime64tz_dtype(other):
rvals = other._ndarray_values
else:
rvals = other._values
if self.is_monotonic and other.is_monotonic:
try:
result = self._outer_indexer(lvals, rvals)[0]
except TypeError:
# incomparable objects
result = list(lvals)
# worth making this faster? a very unusual case
value_set = set(lvals)
result.extend([x for x in rvals if x not in value_set])
else:
indexer = self.get_indexer(other)
indexer, = (indexer == -1).nonzero()
if len(indexer) > 0:
other_diff = algos.take_nd(rvals, indexer,
allow_fill=False)
result = _concat._concat_compat((lvals, other_diff))
try:
lvals[0] < other_diff[0]
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning,
stacklevel=3)
else:
types = frozenset((self.inferred_type,
other.inferred_type))
if not types & _unsortable_types:
result.sort()
else:
result = lvals
try:
result = np.sort(result)
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning,
stacklevel=3)
# for subclasses
return self._wrap_union_result(other, result)
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
return self.__class__(result, name=name)
def intersection(self, other):
"""
Form the intersection of two Index objects.
This returns a new Index with elements common to the index and `other`,
preserving the order of the calling index.
Parameters
----------
other : Index or array-like
Returns
-------
intersection : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.intersection(idx2)
Int64Index([3, 4], dtype='int64')
"""
self._assert_can_do_setop(other)
other = _ensure_index(other)
if self.equals(other):
return self._get_consensus_name(other)
if not is_dtype_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.intersection(other)
# TODO(EA): setops-refactor, clean all this up
if is_period_dtype(self):
lvals = self._ndarray_values
else:
lvals = self._values
if is_period_dtype(other):
rvals = other._ndarray_values
else:
rvals = other._values
if self.is_monotonic and other.is_monotonic:
try:
result = self._inner_indexer(lvals, rvals)[0]
return self._wrap_union_result(other, result)
except TypeError:
pass
try:
indexer = Index(rvals).get_indexer(lvals)
indexer = indexer.take((indexer != -1).nonzero()[0])
except Exception:
# duplicates
indexer = algos.unique1d(
Index(rvals).get_indexer_non_unique(lvals)[0])
indexer = indexer[indexer != -1]
taken = other.take(indexer)
if self.name != other.name:
taken.name = None
return taken
def difference(self, other):
"""
Return a new Index with elements from the index that are not in
`other`.
This is the set difference of two Index objects.
It's sorted if sorting is possible.
Parameters
----------
other : Index or array-like
Returns
-------
difference : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.difference(idx2)
Int64Index([1, 2], dtype='int64')
"""
self._assert_can_do_setop(other)
if self.equals(other):
return self._shallow_copy([])
other, result_name = self._convert_can_do_setop(other)
this = self._get_unique_index()
indexer = this.get_indexer(other)
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer,
assume_unique=True)
the_diff = this.values.take(label_diff)
try:
the_diff = sorting.safe_sort(the_diff)
except TypeError:
pass
return this._shallow_copy(the_diff, name=result_name, freq=None)
def symmetric_difference(self, other, result_name=None):
"""
Compute the symmetric difference of two Index objects.
It's sorted if sorting is possible.
Parameters
----------
other : Index or array-like
result_name : str
Returns
-------
symmetric_difference : Index
Notes
-----
``symmetric_difference`` contains elements that appear in either
``idx1`` or ``idx2`` but not both. Equivalent to the Index created by
``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates
dropped.
Examples
--------
>>> idx1 = Index([1, 2, 3, 4])
>>> idx2 = Index([2, 3, 4, 5])
>>> idx1.symmetric_difference(idx2)
Int64Index([1, 5], dtype='int64')
You can also use the ``^`` operator:
>>> idx1 ^ idx2
Int64Index([1, 5], dtype='int64')
"""
self._assert_can_do_setop(other)
other, result_name_update = self._convert_can_do_setop(other)
if result_name is None:
result_name = result_name_update
this = self._get_unique_index()
other = other._get_unique_index()
indexer = this.get_indexer(other)
# {this} minus {other}
common_indexer = indexer.take((indexer != -1).nonzero()[0])
left_indexer = np.setdiff1d(np.arange(this.size), common_indexer,
assume_unique=True)
left_diff = this.values.take(left_indexer)
# {other} minus {this}
right_indexer = (indexer == -1).nonzero()[0]
right_diff = other.values.take(right_indexer)
the_diff = _concat._concat_compat([left_diff, right_diff])
try:
the_diff = sorting.safe_sort(the_diff)
except TypeError:
pass
attribs = self._get_attributes_dict()
attribs['name'] = result_name
if 'freq' in attribs:
attribs['freq'] = None
return self._shallow_copy_with_infer(the_diff, **attribs)
def _get_unique_index(self, dropna=False):
"""
Returns an index containing unique values.
Parameters
----------
dropna : bool
If True, NaN values are dropped.
Returns
-------
uniques : index
"""
if self.is_unique and not dropna:
return self
values = self.values
if not self.is_unique:
values = self.unique()
if dropna:
try:
if self.hasnans:
values = values[~isna(values)]
except NotImplementedError:
pass
return self._shallow_copy(values)
_index_shared_docs['get_loc'] = """
Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
tolerance : optional
Maximum distance from index value for inexact matches. The value of
the index at the matching location most satisfy the equation
``abs(index[loc] - key) <= tolerance``.
Tolerance may be a scalar
value, which applies the same tolerance to all values, or
list-like, which applies variable tolerance per element. List-like
includes list, tuple, array, Series, and must be the same size as
the index and its dtype must exactly match the index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
loc : int if unique index, slice if monotonic index, else mask
Examples
---------
>>> unique_index = pd.Index(list('abc'))
>>> unique_index.get_loc('b')
1
>>> monotonic_index = pd.Index(list('abbc'))
>>> monotonic_index.get_loc('b')
slice(1, 3, None)
>>> non_monotonic_index = pd.Index(list('abcb'))
>>> non_monotonic_index.get_loc('b')
array([False, True, False, True], dtype=bool)
"""
@Appender(_index_shared_docs['get_loc'])
def get_loc(self, key, method=None, tolerance=None):
if method is None:
if tolerance is not None:
raise ValueError('tolerance argument only valid if using pad, '
'backfill or nearest lookups')
try:
return self._engine.get_loc(key)
except KeyError:
return self._engine.get_loc(self._maybe_cast_indexer(key))
indexer = self.get_indexer([key], method=method, tolerance=tolerance)
if indexer.ndim > 1 or indexer.size > 1:
raise TypeError('get_loc requires scalar valued input')
loc = indexer.item()
if loc == -1:
raise KeyError(key)
return loc
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
# if we have something that is Index-like, then
# use this, e.g. DatetimeIndex
s = getattr(series, '_values', None)
if isinstance(s, (ExtensionArray, Index)) and is_scalar(key):
# GH 20825
# Unify Index and ExtensionArray treatment
# First try to convert the key to a location
# If that fails, see if key is an integer, and
# try that
try:
iloc = self.get_loc(key)
return s[iloc]
except KeyError:
if is_integer(key):
return s[key]
s = com._values_from_object(series)
k = com._values_from_object(key)
k = self._convert_scalar_indexer(k, kind='getitem')
try:
return self._engine.get_value(s, k,
tz=getattr(series.dtype, 'tz', None))
except KeyError as e1:
if len(self) > 0 and self.inferred_type in ['integer', 'boolean']:
raise
try:
return libindex.get_value_box(s, key)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# python 3
if is_scalar(key): # pragma: no cover
raise IndexError(key)
raise InvalidIndexError(key)
def set_value(self, arr, key, value):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
self._engine.set_value(com._values_from_object(arr),
com._values_from_object(key), value)
def _get_level_values(self, level):
"""
Return an Index of values for requested level, equal to the length
of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
``self``, as there is only one level in the Index.
See also
---------
pandas.MultiIndex.get_level_values : get values for a level of a
MultiIndex
"""
self._validate_index_level(level)
return self
get_level_values = _get_level_values
def droplevel(self, level=0):
"""
Return index with requested level(s) removed. If resulting index has
only 1 level left, the result will be of Index type, not MultiIndex.
.. versionadded:: 0.23.1 (support for non-MultiIndex)
Parameters
----------
level : int, str, or list-like, default 0
If a string is given, must be the name of a level
If list-like, elements must be names or indexes of levels.
Returns
-------
index : Index or MultiIndex
"""
if not isinstance(level, (tuple, list)):
level = [level]
levnums = sorted(self._get_level_number(lev) for lev in level)[::-1]
if len(level) == 0:
return self
if len(level) >= self.nlevels:
raise ValueError("Cannot remove {} levels from an index with {} "
"levels: at least one level must be "
"left.".format(len(level), self.nlevels))
# The two checks above guarantee that here self is a MultiIndex
new_levels = list(self.levels)
new_labels = list(self.labels)
new_names = list(self.names)
for i in levnums:
new_levels.pop(i)
new_labels.pop(i)
new_names.pop(i)
if len(new_levels) == 1:
# set nan if needed
mask = new_labels[0] == -1
result = new_levels[0].take(new_labels[0])
if mask.any():
result = result.putmask(mask, np.nan)
result.name = new_names[0]
return result
else:
from .multi import MultiIndex
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
_index_shared_docs['get_indexer'] = """
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : %(target_klass)s
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
limit : int, optional
Maximum number of consecutive labels in ``target`` to match for
inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Examples
--------
>>> indexer = index.get_indexer(new_index)
>>> new_values = cur_values.take(indexer)
Returns
-------
indexer : ndarray of int
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
"""
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = _ensure_index(target)
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, target)
# Treat boolean labels passed to a numeric index as not found. Without
# this fix False and True would be treated as 0 and 1 respectively.
# (GH #16877)
if target.is_boolean() and self.is_numeric():
return _ensure_platform_int(np.repeat(-1, target.size))
pself, ptarget = self._maybe_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer(ptarget, method=method, limit=limit,
tolerance=tolerance)
if not is_dtype_equal(self.dtype, target.dtype):
this = self.astype(object)
target = target.astype(object)
return this.get_indexer(target, method=method, limit=limit,
tolerance=tolerance)
if not self.is_unique:
raise InvalidIndexError('Reindexing only valid with uniquely'
' valued Index objects')
if method == 'pad' or method == 'backfill':
indexer = self._get_fill_indexer(target, method, limit, tolerance)
elif method == 'nearest':
indexer = self._get_nearest_indexer(target, limit, tolerance)
else:
if tolerance is not None:
raise ValueError('tolerance argument only valid if doing pad, '
'backfill or nearest reindexing')
if limit is not None:
raise ValueError('limit argument only valid if doing pad, '
'backfill or nearest reindexing')
indexer = self._engine.get_indexer(target._ndarray_values)
return _ensure_platform_int(indexer)
def _convert_tolerance(self, tolerance, target):
# override this method on subclasses
tolerance = np.asarray(tolerance)
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError('list-like tolerance size must match '
'target index size')
return tolerance
def _get_fill_indexer(self, target, method, limit=None, tolerance=None):
if self.is_monotonic_increasing and target.is_monotonic_increasing:
method = (self._engine.get_pad_indexer if method == 'pad' else
self._engine.get_backfill_indexer)
indexer = method(target._ndarray_values, limit)
else:
indexer = self._get_fill_indexer_searchsorted(target, method,
limit)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target._ndarray_values,
indexer,
tolerance)
return indexer
def _get_fill_indexer_searchsorted(self, target, method, limit=None):
"""
Fallback pad/backfill get_indexer that works for monotonic decreasing
indexes and non-monotonic targets
"""
if limit is not None:
raise ValueError('limit argument for %r method only well-defined '
'if index and target are monotonic' % method)
side = 'left' if method == 'pad' else 'right'
# find exact matches first (this simplifies the algorithm)
indexer = self.get_indexer(target)
nonexact = (indexer == -1)
indexer[nonexact] = self._searchsorted_monotonic(target[nonexact],
side)
if side == 'left':
# searchsorted returns "indices into a sorted array such that,
# if the corresponding elements in v were inserted before the
# indices, the order of a would be preserved".
# Thus, we need to subtract 1 to find values to the left.
indexer[nonexact] -= 1
# This also mapped not found values (values of 0 from
# np.searchsorted) to -1, which conveniently is also our
# sentinel for missing values
else:
# Mark indices to the right of the largest value as not found
indexer[indexer == len(self)] = -1
return indexer
def _get_nearest_indexer(self, target, limit, tolerance):
"""
Get the indexer for the nearest index labels; requires an index with
values that can be subtracted from each other (e.g., not strings or
tuples).
"""
left_indexer = self.get_indexer(target, 'pad', limit=limit)
right_indexer = self.get_indexer(target, 'backfill', limit=limit)
target = np.asarray(target)
left_distances = abs(self.values[left_indexer] - target)
right_distances = abs(self.values[right_indexer] - target)
op = operator.lt if self.is_monotonic_increasing else operator.le
indexer = np.where(op(left_distances, right_distances) |
(right_indexer == -1), left_indexer, right_indexer)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target, indexer,
tolerance)
return indexer
def _filter_indexer_tolerance(self, target, indexer, tolerance):
distance = abs(self.values[indexer] - target)
indexer = np.where(distance <= tolerance, indexer, -1)
return indexer
_index_shared_docs['get_indexer_non_unique'] = """
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : %(target_klass)s
Returns
-------
indexer : ndarray of int
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
missing : ndarray of int
An indexer into the target of the values not found.
These correspond to the -1 in the indexer array
"""
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = _ensure_index(target)
if is_categorical(target):
target = target.astype(target.dtype.categories.dtype)
pself, ptarget = self._maybe_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer_non_unique(ptarget)
if self.is_all_dates:
self = Index(self.asi8)
tgt_values = target.asi8
else:
tgt_values = target._ndarray_values
indexer, missing = self._engine.get_indexer_non_unique(tgt_values)
return _ensure_platform_int(indexer), missing
def get_indexer_for(self, target, **kwargs):
"""
guaranteed return of an indexer even when non-unique
This dispatches to get_indexer or get_indexer_nonunique as appropriate
"""
if self.is_unique:
return self.get_indexer(target, **kwargs)
indexer, _ = self.get_indexer_non_unique(target, **kwargs)
return indexer
def _maybe_promote(self, other):
# A hack, but it works
from pandas.core.indexes.datetimes import DatetimeIndex
if self.inferred_type == 'date' and isinstance(other, DatetimeIndex):
return DatetimeIndex(self), other
elif self.inferred_type == 'boolean':
if not is_object_dtype(self.dtype):
return self.astype('object'), other.astype('object')
return self, other
def groupby(self, values):
"""
Group the index labels by a given array of values.
Parameters
----------
values : array
Values used to determine the groups.
Returns
-------
groups : dict
{group name -> group labels}
"""
# TODO: if we are a MultiIndex, we can do better
# that converting to tuples
from .multi import MultiIndex
if isinstance(values, MultiIndex):
values = values.values
values = _ensure_categorical(values)
result = values._reverse_indexer()
# map to the label
result = {k: self.take(v) for k, v in compat.iteritems(result)}
return result
def map(self, mapper, na_action=None):
"""
Map values using input correspondence (a dict, Series, or function).
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping correspondence.
Returns
-------
applied : Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
from .multi import MultiIndex
new_values = super(Index, self)._map_values(
mapper, na_action=na_action)
attributes = self._get_attributes_dict()
# we can return a MultiIndex
if new_values.size and isinstance(new_values[0], tuple):
if isinstance(self, MultiIndex):
names = self.names
elif attributes.get('name'):
names = [attributes.get('name')] * len(new_values[0])
else:
names = None
return MultiIndex.from_tuples(new_values,
names=names)
attributes['copy'] = False
if not new_values.size:
# empty
attributes['dtype'] = self.dtype
return Index(new_values, **attributes)
def isin(self, values, level=None):
"""
Return a boolean array where the index values are in `values`.
Compute boolean array of whether each index value is found in the
passed set of values. The length of the returned boolean array matches
the length of the index.
Parameters
----------
values : set or list-like
Sought values.
.. versionadded:: 0.18.1
Support for values as a set.
level : str or int, optional
Name or position of the index level to use (if the index is a
`MultiIndex`).
Returns
-------
is_contained : ndarray
NumPy array of boolean values.
See also
--------
Series.isin : Same for Series.
DataFrame.isin : Same method for DataFrames.
Notes
-----
In the case of `MultiIndex` you must either specify `values` as a
list-like object containing tuples that are the same length as the
number of levels, or specify `level`. Otherwise it will raise a
``ValueError``.
If `level` is specified:
- if it is the name of one *and only one* index level, use that level;
- otherwise it should be a number indicating level position.
Examples
--------
>>> idx = pd.Index([1,2,3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
Check whether each index value in a list of values.
>>> idx.isin([1, 4])
array([ True, False, False])
>>> midx = pd.MultiIndex.from_arrays([[1,2,3],
... ['red', 'blue', 'green']],
... names=('number', 'color'))
>>> midx
MultiIndex(levels=[[1, 2, 3], ['blue', 'green', 'red']],
labels=[[0, 1, 2], [2, 0, 1]],
names=['number', 'color'])
Check whether the strings in the 'color' level of the MultiIndex
are in a list of colors.
>>> midx.isin(['red', 'orange', 'yellow'], level='color')
array([ True, False, False])
To check across the levels of a MultiIndex, pass a list of tuples:
>>> midx.isin([(1, 'red'), (3, 'red')])
array([ True, False, False])
For a DatetimeIndex, string values in `values` are converted to
Timestamps.
>>> dates = ['2000-03-11', '2000-03-12', '2000-03-13']
>>> dti = pd.to_datetime(dates)
>>> dti
DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'],
dtype='datetime64[ns]', freq=None)
>>> dti.isin(['2000-03-11'])
array([ True, False, False])
"""
if level is not None:
self._validate_index_level(level)
return algos.isin(self, values)
def _can_reindex(self, indexer):
"""
*this is an internal non-public method*
Check if we are allowing reindexing with this particular indexer
Parameters
----------
indexer : an integer indexer
Raises
------
ValueError if its a duplicate axis
"""
# trying to reindex on an axis with duplicates
if not self.is_unique and len(indexer):
raise ValueError("cannot reindex from a duplicate axis")
def reindex(self, target, method=None, level=None, limit=None,
tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, 'name')
# GH7774: preserve dtype/tz if target is empty and not an Index.
target = _ensure_has_len(target) # target may be an iterator
if not isinstance(target, Index) and len(target) == 0:
attrs = self._get_attributes_dict()
attrs.pop('freq', None) # don't preserve freq
target = self._simple_new(None, dtype=self.dtype, **attrs)
else:
target = _ensure_index(target)
if level is not None:
if method is not None:
raise TypeError('Fill method not supported if level passed')
_, indexer, _ = self._join_level(target, level, how='right',
return_indexers=True)
else:
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
limit=limit,
tolerance=tolerance)
else:
if method is not None or limit is not None:
raise ValueError("cannot reindex a non-unique index "
"with a method or limit")
indexer, missing = self.get_indexer_non_unique(target)
if preserve_names and target.nlevels == 1 and target.name != self.name:
target = target.copy()
target.name = self.name
return target, indexer
def _reindex_non_unique(self, target):
"""
*this is an internal non-public method*
Create a new index with target's values (move/add/delete values as
necessary) use with non-unique Index and a possibly non-unique target
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
target = _ensure_index(target)
indexer, missing = self.get_indexer_non_unique(target)
check = indexer != -1
new_labels = self.take(indexer[check])
new_indexer = None
if len(missing):
length = np.arange(len(indexer))
missing = _ensure_platform_int(missing)
missing_labels = target.take(missing)
missing_indexer = _ensure_int64(length[~check])
cur_labels = self.take(indexer[check]).values
cur_indexer = _ensure_int64(length[check])
new_labels = np.empty(tuple([len(indexer)]), dtype=object)
new_labels[cur_indexer] = cur_labels
new_labels[missing_indexer] = missing_labels
# a unique indexer
if target.is_unique:
# see GH5553, make sure we use the right indexer
new_indexer = np.arange(len(indexer))
new_indexer[cur_indexer] = np.arange(len(cur_labels))
new_indexer[missing_indexer] = -1
# we have a non_unique selector, need to use the original
# indexer here
else:
# need to retake to have the same size as the indexer
indexer[~check] = 0
# reset the new indexer to account for the new size
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[~check] = -1
new_index = self._shallow_copy_with_infer(new_labels, freq=None)
return new_index, indexer, new_indexer
_index_shared_docs['join'] = """
*this is an internal non-public method*
Compute join_index and indexers to conform data
structures to the new index.
Parameters
----------
other : Index
how : {'left', 'right', 'inner', 'outer'}
level : int or level name, default None
return_indexers : boolean, default False
sort : boolean, default False
Sort the join keys lexicographically in the result Index. If False,
the order of the join keys depends on the join type (how keyword)
.. versionadded:: 0.20.0
Returns
-------
join_index, (left_indexer, right_indexer)
"""
@Appender(_index_shared_docs['join'])
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
from .multi import MultiIndex
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# try to figure out the join level
# GH3662
if level is None and (self_is_mi or other_is_mi):
# have the same levels/names so a simple join
if self.names == other.names:
pass
else:
return self._join_multi(other, how=how,
return_indexers=return_indexers)
# join on the level
if level is not None and (self_is_mi or other_is_mi):
return self._join_level(other, level, how=how,
return_indexers=return_indexers)
other = _ensure_index(other)
if len(other) == 0 and how in ('left', 'outer'):
join_index = self._shallow_copy()
if return_indexers:
rindexer = np.repeat(-1, len(join_index))
return join_index, None, rindexer
else:
return join_index
if len(self) == 0 and how in ('right', 'outer'):
join_index = other._shallow_copy()
if return_indexers:
lindexer = np.repeat(-1, len(join_index))
return join_index, lindexer, None
else:
return join_index
if self._join_precedence < other._join_precedence:
how = {'right': 'left', 'left': 'right'}.get(how, how)
result = other.join(self, how=how, level=level,
return_indexers=return_indexers)
if return_indexers:
x, y, z = result
result = x, z, y
return result
if not is_dtype_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.join(other, how=how, return_indexers=return_indexers)
_validate_join_method(how)
if not self.is_unique and not other.is_unique:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif not self.is_unique or not other.is_unique:
if self.is_monotonic and other.is_monotonic:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
else:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif self.is_monotonic and other.is_monotonic:
try:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
except TypeError:
pass
if how == 'left':
join_index = self
elif how == 'right':
join_index = other
elif how == 'inner':
join_index = self.intersection(other)
elif how == 'outer':
join_index = self.union(other)
if sort:
join_index = join_index.sort_values()
if return_indexers:
if join_index is self:
lindexer = None
else:
lindexer = self.get_indexer(join_index)
if join_index is other:
rindexer = None
else:
rindexer = other.get_indexer(join_index)
return join_index, lindexer, rindexer
else:
return join_index
def _join_multi(self, other, how, return_indexers=True):
from .multi import MultiIndex
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# figure out join names
self_names = com._not_none(*self.names)
other_names = com._not_none(*other.names)
overlap = list(set(self_names) & set(other_names))
# need at least 1 in common, but not more than 1
if not len(overlap):
raise ValueError("cannot join with no level specified and no "
"overlapping names")
if len(overlap) > 1:
raise NotImplementedError("merging with more than one level "
"overlap on a multi-index is not "
"implemented")
jl = overlap[0]
# make the indices into mi's that match
if not (self_is_mi and other_is_mi):
flip_order = False
if self_is_mi:
self, other = other, self
flip_order = True
# flip if join method is right or left
how = {'right': 'left', 'left': 'right'}.get(how, how)
level = other.names.index(jl)
result = self._join_level(other, level, how=how,
return_indexers=return_indexers)
if flip_order:
if isinstance(result, tuple):
return result[0], result[2], result[1]
return result
# 2 multi-indexes
raise NotImplementedError("merging with both multi-indexes is not "
"implemented")
def _join_non_unique(self, other, how='left', return_indexers=False):
from pandas.core.reshape.merge import _get_join_indexers
left_idx, right_idx = _get_join_indexers([self._ndarray_values],
[other._ndarray_values],
how=how,
sort=True)
left_idx = _ensure_platform_int(left_idx)
right_idx = _ensure_platform_int(right_idx)
join_index = np.asarray(self._ndarray_values.take(left_idx))
mask = left_idx == -1
np.putmask(join_index, mask, other._ndarray_values.take(right_idx))
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
return join_index, left_idx, right_idx
else:
return join_index
def _join_level(self, other, level, how='left', return_indexers=False,
keep_order=True):
"""
The join method *only* affects the level of the resulting
MultiIndex. Otherwise it just exactly aligns the Index data to the
labels of the level in the MultiIndex. If `keep_order` == True, the
order of the data indexed by the MultiIndex will not be changed;
otherwise, it will tie out with `other`.
"""
from .multi import MultiIndex
def _get_leaf_sorter(labels):
"""
returns sorter for the inner most level while preserving the
order of higher levels
"""
if labels[0].size == 0:
return np.empty(0, dtype='int64')
if len(labels) == 1:
lab = _ensure_int64(labels[0])
sorter, _ = libalgos.groupsort_indexer(lab, 1 + lab.max())
return sorter
# find indexers of beginning of each set of
# same-key labels w.r.t all but last level
tic = labels[0][:-1] != labels[0][1:]
for lab in labels[1:-1]:
tic |= lab[:-1] != lab[1:]
starts = np.hstack(([True], tic, [True])).nonzero()[0]
lab = _ensure_int64(labels[-1])
return lib.get_level_sorter(lab, _ensure_int64(starts))
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
raise TypeError('Join on level between two MultiIndex objects '
'is ambiguous')
left, right = self, other
flip_order = not isinstance(self, MultiIndex)
if flip_order:
left, right = right, left
how = {'right': 'left', 'left': 'right'}.get(how, how)
level = left._get_level_number(level)
old_level = left.levels[level]
if not right.is_unique:
raise NotImplementedError('Index._join_level on non-unique index '
'is not implemented')
new_level, left_lev_indexer, right_lev_indexer = \
old_level.join(right, how=how, return_indexers=True)
if left_lev_indexer is None:
if keep_order or len(left) == 0:
left_indexer = None
join_index = left
else: # sort the leaves
left_indexer = _get_leaf_sorter(left.labels[:level + 1])
join_index = left[left_indexer]
else:
left_lev_indexer = _ensure_int64(left_lev_indexer)
rev_indexer = lib.get_reverse_indexer(left_lev_indexer,
len(old_level))
new_lev_labels = algos.take_nd(rev_indexer, left.labels[level],
allow_fill=False)
new_labels = list(left.labels)
new_labels[level] = new_lev_labels
new_levels = list(left.levels)
new_levels[level] = new_level
if keep_order: # just drop missing values. o.w. keep order
left_indexer = np.arange(len(left), dtype=np.intp)
mask = new_lev_labels != -1
if not mask.all():
new_labels = [lab[mask] for lab in new_labels]
left_indexer = left_indexer[mask]
else: # tie out the order with other
if level == 0: # outer most level, take the fast route
ngroups = 1 + new_lev_labels.max()
left_indexer, counts = libalgos.groupsort_indexer(
new_lev_labels, ngroups)
# missing values are placed first; drop them!
left_indexer = left_indexer[counts[0]:]
new_labels = [lab[left_indexer] for lab in new_labels]
else: # sort the leaves
mask = new_lev_labels != -1
mask_all = mask.all()
if not mask_all:
new_labels = [lab[mask] for lab in new_labels]
left_indexer = _get_leaf_sorter(new_labels[:level + 1])
new_labels = [lab[left_indexer] for lab in new_labels]
# left_indexers are w.r.t masked frame.
# reverse to original frame!
if not mask_all:
left_indexer = mask.nonzero()[0][left_indexer]
join_index = MultiIndex(levels=new_levels, labels=new_labels,
names=left.names, verify_integrity=False)
if right_lev_indexer is not None:
right_indexer = algos.take_nd(right_lev_indexer,
join_index.labels[level],
allow_fill=False)
else:
right_indexer = join_index.labels[level]
if flip_order:
left_indexer, right_indexer = right_indexer, left_indexer
if return_indexers:
left_indexer = (None if left_indexer is None
else _ensure_platform_int(left_indexer))
right_indexer = (None if right_indexer is None
else _ensure_platform_int(right_indexer))
return join_index, left_indexer, right_indexer
else:
return join_index
def _join_monotonic(self, other, how='left', return_indexers=False):
if self.equals(other):
ret_index = other if how == 'right' else self
if return_indexers:
return ret_index, None, None
else:
return ret_index
sv = self._ndarray_values
ov = other._ndarray_values
if self.is_unique and other.is_unique:
# We can perform much better than the general case
if how == 'left':
join_index = self
lidx = None
ridx = self._left_indexer_unique(sv, ov)
elif how == 'right':
join_index = other
lidx = self._left_indexer_unique(ov, sv)
ridx = None
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
else:
if how == 'left':
join_index, lidx, ridx = self._left_indexer(sv, ov)
elif how == 'right':
join_index, ridx, lidx = self._left_indexer(ov, sv)
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
lidx = None if lidx is None else _ensure_platform_int(lidx)
ridx = None if ridx is None else _ensure_platform_int(ridx)
return join_index, lidx, ridx
else:
return join_index
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Index(joined, name=name)
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
# this is for partial string indexing,
# overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex
raise NotImplementedError
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
For an ordered or unique index, compute the slice indexer for input
labels and step.
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, default None
kind : string, default None
Returns
-------
indexer : slice
Raises
------
KeyError : If key does not exist, or key is not unique and index is
not ordered.
Notes
-----
This function assumes that the data is sorted, so use at your own peril
Examples
---------
This is a method on all index types. For example you can do:
>>> idx = pd.Index(list('abcd'))
>>> idx.slice_indexer(start='b', end='c')
slice(1, 3)
>>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')])
>>> idx.slice_indexer(start='b', end=('c', 'g'))
slice(1, 3)
"""
start_slice, end_slice = self.slice_locs(start, end, step=step,
kind=kind)
# return a slice
if not is_scalar(start_slice):
raise AssertionError("Start slice bound is non-scalar")
if not is_scalar(end_slice):
raise AssertionError("End slice bound is non-scalar")
return slice(start_slice, end_slice, step)
def _maybe_cast_indexer(self, key):
"""
If we have a float key and are not a floating index
then try to cast to an int if equivalent
"""
if is_float(key) and not self.is_floating():
try:
ckey = int(key)
if ckey == key:
key = ckey
except (OverflowError, ValueError, TypeError):
pass
return key
def _validate_indexer(self, form, key, kind):
"""
if we are positional indexer
validate that we have appropriate typed bounds
must be an integer
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc']
if key is None:
pass
elif is_integer(key):
pass
elif kind in ['iloc', 'getitem']:
self._invalid_indexer(form, key)
return key
_index_shared_docs['_maybe_cast_slice_bound'] = """
This function should be overloaded in subclasses that allow non-trivial
casting on label-slice bounds, e.g. datetime-like indices allowing
strings containing formatted datetimes.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
@Appender(_index_shared_docs['_maybe_cast_slice_bound'])
def _maybe_cast_slice_bound(self, label, side, kind):
assert kind in ['ix', 'loc', 'getitem', None]
# We are a plain index here (sub-class override this method if they
# wish to have special treatment for floats/ints, e.g. Float64Index and
# datetimelike Indexes
# reject them
if is_float(label):
if not (kind in ['ix'] and (self.holds_integer() or
self.is_floating())):
self._invalid_indexer('slice', label)
# we are trying to find integer bounds on a non-integer based index
# this is rejected (generally .loc gets you here)
elif is_integer(label):
self._invalid_indexer('slice', label)
return label
def _searchsorted_monotonic(self, label, side='left'):
if self.is_monotonic_increasing:
return self.searchsorted(label, side=side)
elif self.is_monotonic_decreasing:
# np.searchsorted expects ascending sort order, have to reverse
# everything for it to work (element ordering, search side and
# resulting value).
pos = self[::-1].searchsorted(label, side='right' if side == 'left'
else 'left')
return len(self) - pos
raise ValueError('index must be monotonic increasing or decreasing')
def _get_loc_only_exact_matches(self, key):
"""
This is overridden on subclasses (namely, IntervalIndex) to control
get_slice_bound.
"""
return self.get_loc(key)
def get_slice_bound(self, label, side, kind):
"""
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
"""
assert kind in ['ix', 'loc', 'getitem', None]
if side not in ('left', 'right'):
raise ValueError("Invalid value for side kwarg,"
" must be either 'left' or 'right': %s" %
(side, ))
original_label = label
# For datetime indices label may be a string that has to be converted
# to datetime boundary according to its resolution.
label = self._maybe_cast_slice_bound(label, side, kind)
# we need to look up the label
try:
slc = self._get_loc_only_exact_matches(label)
except KeyError as err:
try:
return self._searchsorted_monotonic(label, side)
except ValueError:
# raise the original KeyError
raise err
if isinstance(slc, np.ndarray):
# get_loc may return a boolean array or an array of indices, which
# is OK as long as they are representable by a slice.
if is_bool_dtype(slc):
slc = lib.maybe_booleans_to_slice(slc.view('u1'))
else:
slc = lib.maybe_indices_to_slice(slc.astype('i8'), len(self))
if isinstance(slc, np.ndarray):
raise KeyError("Cannot get %s slice bound for non-unique "
"label: %r" % (side, original_label))
if isinstance(slc, slice):
if side == 'left':
return slc.start
else:
return slc.stop
else:
if side == 'right':
return slc + 1
else:
return slc
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
Compute slice locations for input labels.
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, defaults None
If None, defaults to 1
kind : {'ix', 'loc', 'getitem'} or None
Returns
-------
start, end : int
Notes
-----
This method only works if the index is monotonic or unique.
Examples
---------
>>> idx = pd.Index(list('abcd'))
>>> idx.slice_locs(start='b', end='c')
(1, 3)
See Also
--------
Index.get_loc : Get location for a single label
"""
inc = (step is None or step >= 0)
if not inc:
# If it's a reverse slice, temporarily swap bounds.
start, end = end, start
start_slice = None
if start is not None:
start_slice = self.get_slice_bound(start, 'left', kind)
if start_slice is None:
start_slice = 0
end_slice = None
if end is not None:
end_slice = self.get_slice_bound(end, 'right', kind)
if end_slice is None:
end_slice = len(self)
if not inc:
# Bounds at this moment are swapped, swap them back and shift by 1.
#
# slice_locs('B', 'A', step=-1): s='B', e='A'
#
# s='A' e='B'
# AFTER SWAP: | |
# v ------------------> V
# -----------------------------------
# | | |A|A|A|A| | | | | |B|B| | | | |
# -----------------------------------
# ^ <------------------ ^
# SHOULD BE: | |
# end=s-1 start=e-1
#
end_slice, start_slice = start_slice - 1, end_slice - 1
# i == -1 triggers ``len(self) + i`` selection that points to the
# last element, not before-the-first one, subtracting len(self)
# compensates that.
if end_slice == -1:
end_slice -= len(self)
if start_slice == -1:
start_slice -= len(self)
return start_slice, end_slice
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
return self._shallow_copy(np.delete(self._data, loc))
def insert(self, loc, item):
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
"""
if is_scalar(item) and isna(item):
# GH 18295
item = self._na_value
_self = np.asarray(self)
item = self._coerce_scalar_to_index(item)._ndarray_values
idx = np.concatenate((_self[:loc], item, _self[loc:]))
return self._shallow_copy_with_infer(idx)
def drop(self, labels, errors='raise'):
"""
Make new Index with passed list of labels deleted
Parameters
----------
labels : array-like
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
dropped : Index
Raises
------
KeyError
If none of the labels are found in the selected axis
"""
arr_dtype = 'object' if self.dtype == 'object' else None
labels = com._index_labels_to_array(labels, dtype=arr_dtype)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
if errors != 'ignore':
raise KeyError(
'labels %s not contained in axis' % labels[mask])
indexer = indexer[~mask]
return self.delete(indexer)
_index_shared_docs['index_unique'] = (
"""
Return unique values in the index. Uniques are returned in order
of appearance, this does NOT sort.
Parameters
----------
level : int or str, optional, default None
Only return values from specified level (for MultiIndex)
.. versionadded:: 0.23.0
Returns
-------
Index without duplicates
See Also
--------
unique
Series.unique
""")
@Appender(_index_shared_docs['index_unique'] % _index_doc_kwargs)
def unique(self, level=None):
if level is not None:
self._validate_index_level(level)
result = super(Index, self).unique()
return self._shallow_copy(result)
def drop_duplicates(self, keep='first'):
"""
Return Index with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
Returns
-------
deduplicated : Index
See Also
--------
Series.drop_duplicates : equivalent method on Series
DataFrame.drop_duplicates : equivalent method on DataFrame
Index.duplicated : related method on Index, indicating duplicate
Index values.
Examples
--------
Generate an pandas.Index with duplicate values.
>>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'])
The `keep` parameter controls which duplicate values are removed.
The value 'first' keeps the first occurrence for each
set of duplicated entries. The default value of keep is 'first'.
>>> idx.drop_duplicates(keep='first')
Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object')
The value 'last' keeps the last occurrence for each set of duplicated
entries.
>>> idx.drop_duplicates(keep='last')
Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object')
The value ``False`` discards all sets of duplicated entries.
>>> idx.drop_duplicates(keep=False)
Index(['cow', 'beetle', 'hippo'], dtype='object')
"""
return super(Index, self).drop_duplicates(keep=keep)
def duplicated(self, keep='first'):
"""
Indicate duplicate index values.
Duplicated values are indicated as ``True`` values in the resulting
array. Either all duplicates, all except the first, or all except the
last occurrence of duplicates can be indicated.
Parameters
----------
keep : {'first', 'last', False}, default 'first'
The value or values in a set of duplicates to mark as missing.
- 'first' : Mark duplicates as ``True`` except for the first
occurrence.
- 'last' : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Examples
--------
By default, for each set of duplicated values, the first occurrence is
set to False and all others to True:
>>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama'])
>>> idx.duplicated()
array([False, False, True, False, True])
which is equivalent to
>>> idx.duplicated(keep='first')
array([False, False, True, False, True])
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True:
>>> idx.duplicated(keep='last')
array([ True, False, True, False, False])
By setting keep on ``False``, all duplicates are True:
>>> idx.duplicated(keep=False)
array([ True, False, True, False, True])
Returns
-------
numpy.ndarray
See Also
--------
pandas.Series.duplicated : Equivalent method on pandas.Series
pandas.DataFrame.duplicated : Equivalent method on pandas.DataFrame
pandas.Index.drop_duplicates : Remove duplicate values from Index
"""
return super(Index, self).duplicated(keep=keep)
_index_shared_docs['fillna'] = """
Fill NA/NaN values with the specified value
Parameters
----------
value : scalar
Scalar value to use to fill holes (e.g. 0).
This value cannot be a list-likes.
downcast : dict, default is None
a dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible)
Returns
-------
filled : %(klass)s
"""
@Appender(_index_shared_docs['fillna'])
def fillna(self, value=None, downcast=None):
self._assert_can_do_op(value)
if self.hasnans:
result = self.putmask(self._isnan, value)
if downcast is None:
# no need to care metadata other than name
# because it can't have freq if
return Index(result, name=self.name)
return self._shallow_copy()
_index_shared_docs['dropna'] = """
Return Index without NA/NaN values
Parameters
----------
how : {'any', 'all'}, default 'any'
If the Index is a MultiIndex, drop the value when any or all levels
are NaN.
Returns
-------
valid : Index
"""
@Appender(_index_shared_docs['dropna'])
def dropna(self, how='any'):
if how not in ('any', 'all'):
raise ValueError("invalid how option: {0}".format(how))
if self.hasnans:
return self._shallow_copy(self.values[~self._isnan])
return self._shallow_copy()
def _evaluate_with_timedelta_like(self, other, op):
# Timedelta knows how to operate with np.array, so dispatch to that
# operation and then wrap the results
other = Timedelta(other)
values = self.values
with np.errstate(all='ignore'):
result = op(values, other)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
if op == divmod:
return Index(result[0], **attrs), Index(result[1], **attrs)
return Index(result, **attrs)
def _evaluate_with_datetime_like(self, other, op):
raise TypeError("can only perform ops with datetime like values")
def _evaluate_compare(self, other, op):
raise com.AbstractMethodError(self)
@classmethod
def _add_comparison_methods(cls):
""" add in comparison methods """
cls.__eq__ = _make_comparison_op(operator.eq, cls)
cls.__ne__ = _make_comparison_op(operator.ne, cls)
cls.__lt__ = _make_comparison_op(operator.lt, cls)
cls.__gt__ = _make_comparison_op(operator.gt, cls)
cls.__le__ = _make_comparison_op(operator.le, cls)
cls.__ge__ = _make_comparison_op(operator.ge, cls)
@classmethod
def _add_numeric_methods_add_sub_disabled(cls):
""" add in the numeric add/sub methods to disable """
cls.__add__ = make_invalid_op('__add__')
cls.__radd__ = make_invalid_op('__radd__')
cls.__iadd__ = make_invalid_op('__iadd__')
cls.__sub__ = make_invalid_op('__sub__')
cls.__rsub__ = make_invalid_op('__rsub__')
cls.__isub__ = make_invalid_op('__isub__')
@classmethod
def _add_numeric_methods_disabled(cls):
""" add in numeric methods to disable other than add/sub """
cls.__pow__ = make_invalid_op('__pow__')
cls.__rpow__ = make_invalid_op('__rpow__')
cls.__mul__ = make_invalid_op('__mul__')
cls.__rmul__ = make_invalid_op('__rmul__')
cls.__floordiv__ = make_invalid_op('__floordiv__')
cls.__rfloordiv__ = make_invalid_op('__rfloordiv__')
cls.__truediv__ = make_invalid_op('__truediv__')
cls.__rtruediv__ = make_invalid_op('__rtruediv__')
if not compat.PY3:
cls.__div__ = make_invalid_op('__div__')
cls.__rdiv__ = make_invalid_op('__rdiv__')
cls.__mod__ = make_invalid_op('__mod__')
cls.__divmod__ = make_invalid_op('__divmod__')
cls.__neg__ = | make_invalid_op('__neg__') | pandas.core.ops.make_invalid_op |
from pandas import DataFrame, read_excel, ExcelFile, read_csv, concat, Series, \
notnull
from pathlib import Path
from re import match
from typing import Optional, List, Union, Callable
from survey import Survey
from survey.attributes import PositiveMeasureAttribute
from survey.mixins.data_types.categorical_mixin import CategoricalMixin
from survey.attributes import RespondentAttribute, SingleCategoryAttribute
from survey.attributes import CountAttribute
from survey.questions import Question
from survey.questions import SingleChoiceQuestion, FreeTextQuestion, \
LikertQuestion, MultiChoiceQuestion
from survey.questions import CountQuestion
from survey.questions import PositiveMeasureQuestion
from survey.questions import RankedChoiceQuestion
from survey.respondents import Respondent
from survey.surveys.metadata.attribute_metadata import AttributeMetadata
from survey.surveys.metadata.question_metadata import QuestionMetadata
from survey.surveys.survey_creators.choices import get_choices, \
get_likert_choices, get_multi_choices
class SurveyCreator(object):
def __init__(self,
survey_name: str,
survey_data_fn: Union[str, Path],
metadata_fn: Union[str, Path],
survey_id_col: Optional[str] = None,
survey_id: Optional = None,
pre_clean: Optional[Callable[[DataFrame], DataFrame]] = None):
"""
Create a new SurveyCreator.
:param survey_name: Name for the survey.
:param survey_data_fn: Path to the survey raw data file.
:param metadata_fn: Path to the survey metadata file.
:param survey_id_col: Optional name of the column that identifies the
survey in the metadata file.
:param survey_id: Optional value that identifies the survey in the
metadata file.
:param pre_clean: Optional method to run on the raw data file on read.
Used if there are some values in this specific raw
data file that need changing in some way.
"""
# now
self.survey_name: str = survey_name
self.survey_data_fn: Path = (
survey_data_fn if isinstance(survey_data_fn, Path)
else Path(survey_data_fn)
)
self.metadata_fn: Path = (
metadata_fn if isinstance(metadata_fn, Path)
else Path(metadata_fn)
)
self.survey_id_col: Optional[str] = survey_id_col
self.survey_id: Optional = survey_id
self.survey: Optional[Survey] = None
self.pre_clean = pre_clean
# later
self.survey_data: Optional[DataFrame] = None
self.questions_metadata: Optional[DataFrame] = None
self.attributes_metadata: Optional[DataFrame] = None
self.orders_metadata: Optional[DataFrame] = None
self.question_metadatas: Optional[List[QuestionMetadata]] = None
self.attribute_metadatas: Optional[List[AttributeMetadata]] = None
self.questions: Optional[List[Question]] = None
self.respondent_attributes: Optional[List[RespondentAttribute]] = None
self.respondents: Optional[List[Respondent]] = None
# focus vision
self.loop_mappings: Optional[DataFrame] = None
self.loop_expressions: Optional[DataFrame] = None
self.questions_metadata_original: Optional[DataFrame] = None
def run(self) -> Survey:
"""
Run all the steps to create the Survey object.
"""
self.read_survey_data()
self.read_metadata()
self.validate_metadata()
self.convert_metadata_to_objects()
self.clean_survey_data()
self.format_survey_data()
self.create_survey_components()
self.create_survey()
return self.survey
def read_survey_data(self):
"""
Read the raw survey data file and do any custom pre-cleaning.
"""
data = read_csv(self.survey_data_fn)
if self.pre_clean is not None:
data = self.pre_clean(data)
self.survey_data = data
def _filter_to_survey(self, metadata: DataFrame) -> DataFrame:
"""
Filter the given metadata to only contain metadata for the current
survey.
"""
if self.survey_id_col in metadata.columns:
metadata = metadata.loc[
(metadata[self.survey_id_col] == self.survey_id) |
(metadata[self.survey_id_col].isnull())
]
return metadata
def read_metadata(self):
"""
Read the question, attribute and order metadata from the Excel
metadata file.
"""
metadata = ExcelFile(self.metadata_fn)
# read metadata
questions_metadata = read_excel(metadata, 'questions')
attributes_metadata = read_excel(metadata, 'attributes')
orders_metadata = read_excel(metadata, 'orders')
orders_metadata['value'] = orders_metadata['value'].astype(str)
# filter to specified survey
if None not in (self.survey_id_col, self.survey_id):
questions_metadata = self._filter_to_survey(questions_metadata)
attributes_metadata = self._filter_to_survey(attributes_metadata)
orders_metadata = self._filter_to_survey(orders_metadata)
# check for clashes in question, attribute and category names
category_names = sorted(orders_metadata['category'].unique())
q_name_errors = []
for q_name in sorted(questions_metadata['name'].unique()):
if q_name in category_names:
q_name_errors.append(q_name)
if q_name_errors:
raise ValueError(
f'The following categories clash with question names. '
f'Rename questions or categories.\n{q_name_errors}'
)
a_name_errors = []
for a_name in sorted(attributes_metadata['name'].unique()):
if a_name in category_names:
a_name_errors.append(a_name)
if a_name_errors:
raise ValueError(
f'The following categories clash with attribute names. '
f'Rename attributes or categories.\n{a_name_errors}'
)
# create ordered choices for questions and attributes with shared
# choices
for meta in (attributes_metadata, questions_metadata):
for idx, row in meta.iterrows():
if | notnull(row['categories']) | pandas.notnull |
#!/home/brian/miniconda3/bin/python3.7
# encoding: utf-8
"""
Read the docs, obey PEP 8 and PEP 20 (Zen of Python, import this)
Build on: Spyder
Python ver: 3.7.3
Created on Thu Oct 17 21:14:04 2019
@author: brian
"""
# %% modules:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import plot_tree
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import MinMaxScaler
pd.set_option('display.max_rows', 50)
| pd.set_option('display.max_columns', 500) | pandas.set_option |
from datetime import datetime
import re
import unittest
import nose
from nose.tools import assert_equal
import numpy as np
from pandas.tslib import iNaT
from pandas import Series, DataFrame, date_range, DatetimeIndex, Timestamp
from pandas import compat
from pandas.compat import range, long, lrange, lmap, u
from pandas.core.common import notnull, isnull
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.core.config as cf
_multiprocess_can_split_ = True
def test_mut_exclusive():
msg = "mutually exclusive arguments: '[ab]' and '[ab]'"
with tm.assertRaisesRegexp(TypeError, msg):
com._mut_exclusive(a=1, b=2)
assert com._mut_exclusive(a=1, b=None) == 1
assert com._mut_exclusive(major=None, major_axis=None) is None
def test_is_sequence():
is_seq = com._is_sequence
assert(is_seq((1, 2)))
assert(is_seq([1, 2]))
assert(not is_seq("abcd"))
assert(not is_seq(u("abcd")))
assert(not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert(not is_seq(A()))
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
float_series = Series(np.random.randn(5))
obj_series = Series(np.random.randn(5), dtype=object)
assert(isinstance(notnull(float_series), Series))
assert(isinstance(notnull(obj_series), Series))
def test_isnull():
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert not isnull(np.inf)
assert not isnull(-np.inf)
float_series = Series(np.random.randn(5))
obj_series = Series(np.random.randn(5), dtype=object)
assert(isinstance(isnull(float_series), Series))
assert(isinstance(isnull(obj_series), Series))
# call on DataFrame
df = DataFrame(np.random.randn(10, 5))
df['foo'] = 'bar'
result = isnull(df)
expected = result.apply(isnull)
tm.assert_frame_equal(result, expected)
def test_isnull_tuples():
result = isnull((1, 2))
exp = np.array([False, False])
assert(np.array_equal(result, exp))
result = isnull([(False,)])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = isnull([(1,), (2,)])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = isnull(('foo', 'bar'))
assert(not result.any())
result = isnull((u('foo'), u('bar')))
assert(not result.any())
def test_isnull_lists():
result = isnull([[False]])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = isnull(['foo', 'bar'])
assert(not result.any())
result = isnull([u('foo'), u('bar')])
assert(not result.any())
def test_isnull_datetime():
assert (not isnull(datetime.now()))
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
assert(notnull(idx).all())
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
assert(mask[0])
assert(not mask[1:].any())
def test_datetimeindex_from_empty_datetime64_array():
for unit in [ 'ms', 'us', 'ns' ]:
idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))
assert(len(idx) == 0)
def test_nan_to_nat_conversions():
df = DataFrame(dict({
'A' : np.asarray(lrange(10),dtype='float64'),
'B' : Timestamp('20010101') }))
df.iloc[3:6,:] = np.nan
result = df.loc[4,'B'].value
assert(result == iNaT)
s = df['B'].copy()
s._data = s._data.setitem(tuple([slice(8,9)]),np.nan)
assert(isnull(s[8]))
# numpy < 1.7.0 is wrong
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= '1.7.0':
assert(s[8].value == np.datetime64('NaT').astype(np.int64))
def test_any_none():
assert(com._any_none(1, 2, 3, None))
assert(not com._any_none(1, 2, 3, 4))
def test_all_not_none():
assert(com._all_not_none(1, 2, 3, 4))
assert(not com._all_not_none(1, 2, 3, None))
assert(not com._all_not_none(None, None, None, None))
def test_repr_binary_type():
import string
letters = string.ascii_letters
btype = compat.binary_type
try:
raw = btype(letters, encoding=cf.get_option('display.encoding'))
except TypeError:
raw = btype(letters)
b = compat.text_type(compat.bytes_to_str(raw))
res = com.pprint_thing(b, quote_strings=True)
assert_equal(res, repr(b))
res = com.pprint_thing(b, quote_strings=False)
assert_equal(res, b)
def test_rands():
r = com.rands(10)
assert(len(r) == 10)
def test_adjoin():
data = [['a', 'b', 'c'],
['dd', 'ee', 'ff'],
['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = com.adjoin(2, *data)
assert(adjoined == expected)
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2),
(2, 3),
(3, 4)]
result = list(com.iterpairs(data))
assert(result == expected)
def test_split_ranges():
def _bin(x, width):
"return int(x) as a base2 string of given width"
return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1))
def test_locs(mask):
nfalse = sum(np.array(mask) == 0)
remaining = 0
for s, e in com.split_ranges(mask):
remaining += e - s
assert 0 not in mask[s:e]
# make sure the total items covered by the ranges are a complete cover
assert remaining + nfalse == len(mask)
# exhaustively test all possible mask sequences of length 8
ncols = 8
for i in range(2 ** ncols):
cols = lmap(int, list(_bin(i, ncols))) # count up in base2
mask = [cols[i] == 1 for i in range(len(cols))]
test_locs(mask)
# base cases
test_locs([])
test_locs([0])
test_locs([1])
def test_indent():
s = 'a b c\nd e f'
result = com.indent(s, spaces=6)
assert(result == ' a b c\n d e f')
def test_banner():
ban = com.banner('hi')
assert(ban == ('%s\nhi\n%s' % ('=' * 80, '=' * 80)))
def test_map_indices_py():
data = [4, 3, 2, 1]
expected = {4: 0, 3: 1, 2: 2, 1: 3}
result = com.map_indices_py(data)
assert(result == expected)
def test_union():
a = [1, 2, 3]
b = [4, 5, 6]
union = sorted(com.union(a, b))
assert((a + b) == union)
def test_difference():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.difference(b, a))
assert([4, 5, 6] == inter)
def test_intersection():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.intersection(a, b))
assert(a == inter)
def test_groupby():
values = ['foo', 'bar', 'baz', 'baz2', 'qux', 'foo3']
expected = {'f': ['foo', 'foo3'],
'b': ['bar', 'baz', 'baz2'],
'q': ['qux']}
grouped = com.groupby(values, lambda x: x[0])
for k, v in grouped:
assert v == expected[k]
def test_is_list_like():
passes = ([], [1], (1,), (1, 2), {'a': 1}, set([1, 'a']), Series([1]),
Series([]), Series(['a']).str)
fails = (1, '2', object())
for p in passes:
assert com.is_list_like(p)
for f in fails:
assert not com.is_list_like(f)
def test_ensure_int32():
values = np.arange(10, dtype=np.int32)
result = com._ensure_int32(values)
assert(result.dtype == np.int32)
values = np.arange(10, dtype=np.int64)
result = com._ensure_int32(values)
assert(result.dtype == np.int32)
def test_ensure_platform_int():
# verify that when we create certain types of indices
# they remain the correct type under platform conversions
from pandas.core.index import Int64Index
# int64
x = Int64Index([1, 2, 3], dtype='int64')
assert(x.dtype == np.int64)
pi = com._ensure_platform_int(x)
assert(pi.dtype == np.int_)
# int32
x = Int64Index([1, 2, 3], dtype='int32')
assert(x.dtype == np.int32)
pi = com._ensure_platform_int(x)
assert(pi.dtype == np.int_)
# TODO: fix this broken test
# def test_console_encode():
# """
# On Python 2, if sys.stdin.encoding is None (IPython with zmq frontend)
# common.console_encode should encode things as utf-8.
# """
# if compat.PY3:
# raise nose.SkipTest
# with tm.stdin_encoding(encoding=None):
# result = com.console_encode(u"\u05d0")
# expected = u"\u05d0".encode('utf-8')
# assert (result == expected)
def test_is_re():
passes = re.compile('ad'),
fails = 'x', 2, 3, object()
for p in passes:
assert com.is_re(p)
for f in fails:
assert not com.is_re(f)
def test_is_recompilable():
passes = (r'a', u('x'), r'asdf', re.compile('adsf'),
u(r'\u2233\s*'), re.compile(r''))
fails = 1, [], object()
for p in passes:
assert com.is_re_compilable(p)
for f in fails:
assert not com.is_re_compilable(f)
class TestTake(unittest.TestCase):
# standard incompatible fill error
fill_error = re.compile("Incompatible type for fill_value")
_multiprocess_can_split_ = True
def test_1d_with_out(self):
def _test_dtype(dtype, can_hold_na):
data = np.random.randint(0, 2, 4).astype(dtype)
indexer = [2, 1, 0, 1]
out = np.empty(4, dtype=dtype)
com.take_1d(data, indexer, out=out)
expected = data.take(indexer)
tm.assert_almost_equal(out, expected)
indexer = [2, 1, 0, -1]
out = np.empty(4, dtype=dtype)
if can_hold_na:
com.take_1d(data, indexer, out=out)
expected = data.take(indexer)
expected[3] = np.nan
tm.assert_almost_equal(out, expected)
else:
with tm.assertRaisesRegexp(TypeError, self.fill_error):
com.take_1d(data, indexer, out=out)
# no exception o/w
data.take(indexer, out=out)
_test_dtype(np.float64, True)
_test_dtype(np.float32, True)
_test_dtype(np.uint64, False)
_test_dtype(np.uint32, False)
_test_dtype(np.uint16, False)
_test_dtype(np.uint8, False)
_test_dtype(np.int64, False)
_test_dtype(np.int32, False)
_test_dtype(np.int16, False)
_test_dtype(np.int8, False)
_test_dtype(np.object_, True)
_test_dtype(np.bool, False)
def test_1d_fill_nonna(self):
def _test_dtype(dtype, fill_value, out_dtype):
data = np.random.randint(0, 2, 4).astype(dtype)
indexer = [2, 1, 0, -1]
result = com.take_1d(data, indexer, fill_value=fill_value)
assert((result[[0, 1, 2]] == data[[2, 1, 0]]).all())
assert(result[3] == fill_value)
assert(result.dtype == out_dtype)
indexer = [2, 1, 0, 1]
result = | com.take_1d(data, indexer, fill_value=fill_value) | pandas.core.common.take_1d |
from abc import abstractmethod
from collections import OrderedDict
import os
import pickle
import re
from typing import Tuple, Union
import pandas as pd
import numpy as np
import gym
from gridworld.log import logger
from gridworld import ComponentEnv
from gridworld.utils import to_scaled, to_raw, maybe_rescale_box_space
from gridworld.agents.buildings.obs_space import make_obs_space
from gridworld.agents.buildings import defaults
from gridworld.agents.buildings import five_zone_rom_dynamics as dyn
# Below are control variables' boundary.
MAX_FLOW_RATE = [2.2, 2.2, 2.2, 2.2, 3.2] # Max flow rate for each individual zone
MIN_FLOW_RATE = [.22, .22, .22, .22, .32] # Max flow rate for each individual zone
MAX_TOTAL_FLOW_RATE = 10.0 # Total flow rate for all zones should be lower than 10 kg/sec.
MAX_DISCHARGE_TEMP = 16.0 # Max temp of air leaving chiller
MIN_DISCHARGE_TEMP = 10.0 # Min temp of air leaving chiller
DEFAULT_COMFORT_BOUNDS = (22., 28.) # Temps between these values are considered "comfortable"
def load_data(start_time: str = None, end_time: str = None) -> Tuple[pd.DataFrame, dict]:
"""Returns exogenous data dataframe, and state space model (per-zone) dict."""
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
df = pd.read_csv(os.path.join(THIS_DIR, "data/exogenous_data.csv"), index_col=0)
df.index = pd.DatetimeIndex(df.index)
start_time = | pd.Timestamp(start_time) | pandas.Timestamp |
import numpy as np
import pandas as pd
from scipy.optimize import least_squares
from scipy.optimize import OptimizeResult
from numba.typed import List
from mspt.diff.diffusion_analysis_functions import calc_msd, calc_jd_nth, lin_fit_msd_offset, lin_fit_msd_offset_iterative
from mspt.diff.diffusion_analysis_functions import fit_jdd_cumul_off, fit_jdd_cumul_off_2c, fit_msd_jdd_cumul_off_global, fit_msd_jdd_cumul_off_global_2c
from mspt.diff.diffusion_analysis_jacobians import jdd_jac, jdd_jac_2c, msd_jdd_jac, msd_jdd_jac_2c
def fit_JDD_MSD(trajectory_id, trajs_df, frame_rate=199.8, pixel_size=84.4, n_timelags_MSD=None, n_timelags_JDD=None):
dict_traj = dict()
dict_jdd_msd = dict()
for d, i in enumerate(trajectory_id):
traj = trajs_df[trajs_df['particle']==i]
ti = np.asarray(traj['frame'])
c = np.asarray(-traj['contrast'])
x = np.asarray(traj['x']) * pixel_size / 1000.0 # in microns
y = np.asarray(traj['y']) * pixel_size / 1000.0 # in microns
dict_traj[d] = (traj['x'].values,traj['y'].values,-traj['contrast'].values)
length = len(x)
med_c = np.median(c)
mean_c = np.mean(c)
center_time = np.median(ti)
### MSD fit ##########################
MSD = calc_msd(x,y)
if n_timelags_MSD is None:
res_lsq_msd = lin_fit_msd_offset_iterative(MSD[1:], # MSD[0] is 0
1./frame_rate, # time lag in seconds
max_it=10)
else:
slope, offset, SSR = lin_fit_msd_offset(MSD[1:], # MSD[0] is 0
1./frame_rate) # time lag in seconds
res_lsq_msd = [slope/4., offset/4., SSR[0], n_timelags_MSD]
# Check if first 3 MSD data points are monotonously increasing
if np.any(np.diff(MSD)[:3]<0)==False:
MSD_check = True
else:
MSD_check = False
# Truncate MSD data
MSD = MSD[1:res_lsq_msd[3]+1]
# Set number of time lags included in JDD fitting
if n_timelags_JDD is None:
n_tau_JDD = res_lsq_msd[3]
else:
n_tau_JDD = n_timelags_JDD
# Precalculate JDDs for different lag times for later use
JDDs = list()
for tau in np.arange(1,n_tau_JDD+1,1):
jdd = calc_jd_nth(x, y, n=tau)
jdd_sorted = np.empty((jdd.size+1,),dtype=np.float64)
jdd_sorted[0] = 0.
jdd_sorted[1:] = np.sort(jdd)
JDDs.append(jdd_sorted)
JDDs = List(JDDs)
######################################
### JDD fit: 1 component #############
jdd_1c_flag = False
try:
res_lsq_jdd = least_squares(fit_jdd_cumul_off,
np.array([1.0,0.005]),
jac=jdd_jac,
args=(JDDs,length,1./frame_rate,n_tau_JDD),
method = 'lm',
x_scale='jac')
except:
try: # Restrict the offset parameter if fit failed
jdd_1c_flag = True # flag trajectory if fit failed with initial boundary conditions
bounds_x0_1c = ([0.00001, -0.03],[np.inf, np.inf])
res_lsq_jdd = least_squares(fit_jdd_cumul_off,
np.array([0.5,0.005]),
jac=jdd_jac,
args=(JDDs,length,1./frame_rate,n_tau_JDD),
bounds = bounds_x0_1c,
method = 'dogbox',
x_scale='jac')
except: # Fill results dict manually if second fit failed
res_lsq_jdd = OptimizeResult( {'x' : np.full(2, np.nan),
'fun': np.array([np.nan]),
'success': False} )
######################################
### JDD fit: 2 components ############
jdd_2c_flag = False
try:
res_lsq_jdd_2c = least_squares(fit_jdd_cumul_off_2c,
np.array([0.1,1.0,0.5,0.005]),
jac=jdd_jac_2c,
args=(JDDs,length,1./frame_rate,n_tau_JDD),
method = 'lm',
x_scale='jac')
except:
try: # Restrict the offset parameter if fit failed
jdd_2c_flag = True # flag trajectory if fit failed with initial boundary conditions
bounds_x0_2c = ([0.00001, 0.00001, 0.0,-0.03],[np.inf, np.inf, 1.0,np.inf])
res_lsq_jdd_2c = least_squares(fit_jdd_cumul_off_2c,
np.array([0.1,1.0,0.5,0.005]),
jac=jdd_jac_2c,
args=(JDDs,length,1./frame_rate,n_tau_JDD),
bounds = bounds_x0_2c,
method = 'dogbox',
x_scale='jac')
except: # Fill results dict manually if second fit failed
res_lsq_jdd_2c = OptimizeResult( {'x' : np.full(4, np.nan),
'fun': np.array([np.nan]),
'success': False} )
######################################
### Global fit MSD & JDD: 1 component
msd_jdd_1c_flag = False
try:
res_lsq_msd_jdd_1c = least_squares(fit_msd_jdd_cumul_off_global,
np.array([1.0,0.004]),
jac=msd_jdd_jac,
args=(JDDs,MSD,length,1./frame_rate,n_tau_JDD),
method = 'lm',
x_scale='jac')
except:
try: # Restrict the offset parameter if fit failed
msd_jdd_1c_flag = True
bounds_x0_1c = ([0.00001, -0.03],[np.inf, np.inf])
res_lsq_msd_jdd_1c = least_squares(fit_msd_jdd_cumul_off_global,
np.array([1.0,0.004]),
jac=msd_jdd_jac,
args=(JDDs,MSD,length,1./frame_rate,n_tau_JDD),
bounds = bounds_x0_1c,
method = 'dogbox',
x_scale='jac')
except: # Fill results dict manually if second fit failed
res_lsq_msd_jdd_1c = OptimizeResult( {'x' : np.full(2, np.nan),
'fun': np.array([np.nan]),
'success': False} )
######################################
### Global fit MSD & JDD: 2 components
msd_jdd_2c_flag = False
try:
res_lsq_msd_jdd_2c = least_squares(fit_msd_jdd_cumul_off_global_2c,
np.array([0.1,1.0,0.5,0.004]),
jac=msd_jdd_jac_2c,
args=(JDDs,MSD,length,1./frame_rate,n_tau_JDD),
method = 'lm',
x_scale='jac')
except:
try: # Restrict the offset parameter if fit failed
msd_jdd_2c_flag = True
bounds_x0_2c = ([0.00001, 0.00001, 0.0,-0.03],[np.inf, np.inf, 1.0,np.inf])
res_lsq_msd_jdd_2c = least_squares(fit_msd_jdd_cumul_off_global_2c,
np.array([0.1,1.0,0.5,0.004]),
jac=msd_jdd_jac_2c,
args=(JDDs,MSD,length,1./frame_rate,n_tau_JDD),
bounds = bounds_x0_2c,
method = 'trf',
x_scale='jac')
except: # Fill results dict manually if second fit failed
res_lsq_msd_jdd_2c = OptimizeResult( {'x' : np.full(4, np.nan),
'fun': np.array([np.nan]),
'success': False} )
######################################
tmp_array = np.full((34),np.nan)
### Trajectory statistics ##############################################################################################################
tmp_array[0] = length # Trajectory length
tmp_array[1] = center_time # Center frame of trajectory
tmp_array[2] = med_c # Median contrast of trajectory
tmp_array[3] = mean_c # Mean contrast of trajectory
########################################################################################################################################
### MSD fit ############################################################################################################################
tmp_array[4] = res_lsq_msd[0] # Diffusion coefficient
tmp_array[5] = res_lsq_msd[1] # Localization uncertainty squared
if res_lsq_msd[3] == 2:
tmp_array[6] = 0 # Reduced chi squared = 0, exact solution (line through 2 datapoints)
else:
tmp_array[6] = res_lsq_msd[2]/(res_lsq_msd[3] - 2.) # Reduced chi squared
tmp_array[7] = MSD_check # True if first 3 MSD data points are monotonously increasing
########################################################################################################################################
### JDD fit: 1 component ###############################################################################################################
tmp_array[8] = res_lsq_jdd.x[0] # Diffusion coefficient
tmp_array[9] = res_lsq_jdd.x[1] # Localization uncertainty squared
tmp_array[10] = np.sum(res_lsq_jdd.fun**2)/(len(res_lsq_jdd.fun) - 2.) # Reduced chi squared
tmp_array[11] = res_lsq_jdd.success # True if fit successful
tmp_array[12] = jdd_1c_flag # True if fit with initial boundary conditions failed
########################################################################################################################################
### JDD fit: 2 components ##############################################################################################################
tmp_array[13] = res_lsq_jdd_2c.x[0] # Diffusion coefficient component 1
tmp_array[14] = res_lsq_jdd_2c.x[1] # Diffusion coefficient component 2
tmp_array[15] = res_lsq_jdd_2c.x[2] # Amplitude component 1
tmp_array[16] = 1.0 - res_lsq_jdd_2c.x[2] # Amplitude component 2
tmp_array[17] = res_lsq_jdd_2c.x[3] # Localization uncertainty squared
tmp_array[18] = np.sum(res_lsq_jdd_2c.fun**2)/(len(res_lsq_jdd_2c.fun) - 4.) # Reduced chi squared
tmp_array[19] = res_lsq_jdd_2c.success # True if fit successful
tmp_array[20] = jdd_2c_flag # True if fit with initial boundary conditions failed
########################################################################################################################################
### Global fit MSD & JDD: 1 component ##################################################################################################
tmp_array[21] = res_lsq_msd_jdd_1c.x[0] # Diffusion coefficient
tmp_array[22] = res_lsq_msd_jdd_1c.x[1] # Localization uncertainty squared
tmp_array[23] = np.sum((res_lsq_msd_jdd_1c.fun[:])**2)/float(len(x) - 2) # Reduced chi squared
tmp_array[24] = res_lsq_msd_jdd_1c.success # True if fit successful
tmp_array[25] = msd_jdd_1c_flag # True if fit with initial boundary conditions failed
########################################################################################################################################
### Global fit MSD & JDD: 2 components #################################################################################################
tmp_array[26] = res_lsq_msd_jdd_2c.x[0] # Diffusion coefficient component 1
tmp_array[27] = res_lsq_msd_jdd_2c.x[1] # Diffusion coefficient component 1
tmp_array[28] = res_lsq_msd_jdd_2c.x[2] # Amplitude component 1
tmp_array[29] = 1.0 - res_lsq_msd_jdd_2c.x[2] # Amplitude component 2
tmp_array[30] = res_lsq_msd_jdd_2c.x[3] # Localization uncertainty squared
tmp_array[31] = np.sum((res_lsq_msd_jdd_2c.fun[:])**2)/float(len(x) - 4) # Reduced chi squared
tmp_array[32] = res_lsq_msd_jdd_2c.success # True if fit successful
tmp_array[33] = msd_jdd_2c_flag # True if fit with initial boundary conditions failed
########################################################################################################################################
dict_jdd_msd[d] = tmp_array
df_jdd_msd = pd.DataFrame.from_dict(dict_jdd_msd,
orient='index',
columns=['len','center frame', 'med_c','mean_c',
'D_MSD','off_MSD', 'chi_MSD' ,'MSD_check',
'D_JDD', 'off_JDD', 'chi_JDD', 'fit_JDD_success', 'flag_JDD_c1',
'D_1_JDD_2c', 'D_2_JDD_2c', 'A_1_JDD_2c', 'A_2_JDD_2c', 'off_JDD_2c', 'chi_JDD_2c', 'fit_JDD_2c_success', 'flag_JDD_2c',
'D_MSD_JDD','off_MSD_JDD', 'chi_MSD_JDD','fit_MSD_JDD_1c_success', 'flag_MSD_JDD_1c',
'D_1_MSD_JDD_2c','D_2_MSD_JDD_2c','A_1_MSD_JDD_2c','A_2_MSD_JDD_2c', 'off_MSD_JDD_2c', 'chi_MSD_JDD_2c' , 'fit_MSD_JDD_2c_success', 'flag_MSD_JDD_2c'])
dtypes = {'len': np.uint32,
'MSD_check': np.bool_,
'fit_JDD_success': np.bool_,
'flag_JDD_c1': np.bool_,
'fit_JDD_2c_success': np.bool_,
'flag_JDD_2c': np.bool_,
'fit_MSD_JDD_1c_success': np.bool_,
'flag_MSD_JDD_1c': np.bool_,
'fit_MSD_JDD_2c_success': np.bool_,
'flag_MSD_JDD_2c': np.bool_}
df_jdd_msd = df_jdd_msd.astype(dtypes)
# Calculate effective diffusion coefficient for 2 component JDD
df_jdd_msd['Deff_JDD_2c'] = np.where( ( (df_jdd_msd['fit_JDD_2c_success']==True) &
(df_jdd_msd['D_1_JDD_2c']>0) &
(df_jdd_msd['D_2_JDD_2c']>0) &
(df_jdd_msd['A_1_JDD_2c'].between(0,1)) ),
(df_jdd_msd['A_1_JDD_2c'] * df_jdd_msd['D_1_JDD_2c'] +
df_jdd_msd['A_2_JDD_2c'] * df_jdd_msd['D_2_JDD_2c'] ),
np.nan )
# Select 1 or 2 component JDD fit based on reduced chi squared criteria
# In case of non-physical fit results, choose 1 component JDD
df_jdd_msd['Deff_JDD'] = np.where( ( (df_jdd_msd['chi_JDD_2c']<df_jdd_msd['chi_JDD']) &
(~df_jdd_msd['Deff_JDD_2c'].isna()) ),
df_jdd_msd['Deff_JDD_2c'],
df_jdd_msd['D_JDD'])
# Calculate effective diffusion coefficient for 2 component global MSD and JDD fit
df_jdd_msd['Deff_MSD_JDD_2c'] = np.where( ( (df_jdd_msd['fit_MSD_JDD_2c_success']==True) &
(df_jdd_msd['D_1_MSD_JDD_2c']>0) &
(df_jdd_msd['D_2_MSD_JDD_2c']>0) &
(df_jdd_msd['A_1_MSD_JDD_2c'].between(0,1)) ),
(df_jdd_msd['A_1_MSD_JDD_2c'] * df_jdd_msd['D_1_MSD_JDD_2c'] +
df_jdd_msd['A_2_MSD_JDD_2c'] * df_jdd_msd['D_2_MSD_JDD_2c'] ),
np.nan)
# Select 1 or 2 component global MSD and JDD fit based on reduced chi squared criteria
# In case of non-physical fit results, choose 1 component JDD
df_jdd_msd['Deff_MSD_JDD'] = np.where( ( (df_jdd_msd['chi_MSD_JDD_2c']<df_jdd_msd['chi_MSD_JDD']) &
(~df_jdd_msd['Deff_MSD_JDD_2c'].isna()) ),
df_jdd_msd['Deff_MSD_JDD_2c'],
df_jdd_msd['D_MSD_JDD'])
# Create DataFrame containing the whole trajectory information (list of x positions, y positions, and contrasts) in three columns
traj_df_temp = pd.DataFrame.from_dict(dict_traj,
orient='index',
columns=['x pos','y pos','contrast'])
# Set dtype to object as multiple values are contained in each cell
traj_df_temp = traj_df_temp.astype(object)
# Merge DataFrames horizontally
df_jdd_msd = | pd.concat([df_jdd_msd, traj_df_temp], axis=1) | pandas.concat |
import numpy as np
import pytest
from pandas import (
Categorical,
CategoricalDtype,
NaT,
Timestamp,
array,
to_datetime,
)
import pandas._testing as tm
class TestAstype:
def test_astype_str_int_categories_to_nullable_int(self):
# GH#39616
dtype = CategoricalDtype([str(i) for i in range(5)])
codes = np.random.randint(5, size=20)
arr = Categorical.from_codes(codes, dtype=dtype)
res = arr.astype("Int64")
expected = array(codes, dtype="Int64")
tm.assert_extension_array_equal(res, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_astype(self, ordered):
# string
cat = Categorical(list("abbaaccc"), ordered=ordered)
result = cat.astype(object)
expected = np.array(cat)
tm.assert_numpy_array_equal(result, expected)
msg = r"Cannot cast object dtype to float64"
with pytest.raises(ValueError, match=msg):
cat.astype(float)
# numeric
cat = Categorical([0, 1, 2, 2, 1, 0, 1, 0, 2], ordered=ordered)
result = cat.astype(object)
expected = np.array(cat, dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = cat.astype(int)
expected = np.array(cat, dtype="int")
tm.assert_numpy_array_equal(result, expected)
result = cat.astype(float)
expected = np.array(cat, dtype=float)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype_ordered", [True, False])
@pytest.mark.parametrize("cat_ordered", [True, False])
def test_astype_category(self, dtype_ordered, cat_ordered):
# GH#10696/GH#18593
data = list("abcaacbab")
cat = Categorical(data, categories=list("bac"), ordered=cat_ordered)
# standard categories
dtype = CategoricalDtype(ordered=dtype_ordered)
result = cat.astype(dtype)
expected = Categorical(data, categories=cat.categories, ordered=dtype_ordered)
tm.assert_categorical_equal(result, expected)
# non-standard categories
dtype = CategoricalDtype(list("adc"), dtype_ordered)
result = cat.astype(dtype)
expected = Categorical(data, dtype=dtype)
tm.assert_categorical_equal(result, expected)
if dtype_ordered is False:
# dtype='category' can't specify ordered, so only test once
result = cat.astype("category")
expected = cat
tm.assert_categorical_equal(result, expected)
def test_astype_object_datetime_categories(self):
# GH#40754
cat = Categorical(to_datetime(["2021-03-27", NaT]))
result = cat.astype(object)
expected = np.array([ | Timestamp("2021-03-27 00:00:00") | pandas.Timestamp |
import math as math
import numpy as np
import re
import pandas as pd
import spimcube.functions as fct
def initialization(path, basename):
"""Return a dictionary with: NStepsX, NStepsY, Npixel, Matrix, tab_of_lambda, Xstep, Ystep, Xrange, Yrange."""
# create the complete file name with extension
data = path + basename + ".dat"
Lambda = path + basename + ".lambda"
init = path + basename + ".ini"
#___load 'data' file, extract the first three values and create a list without them___
Rawfile = np.fromfile(data, dtype='>i4', count=-1, sep="")
NStepsX = Rawfile[0]
NStepsY = Rawfile[1]
CCD_nb_pixels = Rawfile[2]
Data = Rawfile[3:]
#___create a 3D matrix in desired dimensions and fill it with the elements from the 'Data' binary file___
Matrix = np.reshape(Data, (NStepsY, NStepsX, CCD_nb_pixels))
#normalization
#Matrix = Matrix / np.amax(Matrix)
#___load Lambda file and convert in float___
LAMBDA = np.loadtxt(Lambda, dtype=np.str)
LAMBDAf = [0 for index in range(len(LAMBDA))]
for index in range(len(LAMBDA)):
LAMBDAf[index] = float(LAMBDA[index].replace("," , "."))
#___generate X & Y 1D array of scanned positions___
Xstep = fct.get_value("PasX", init)
Ystep = fct.get_value("PasY", init)
Xrange = np.linspace(Xstep, Xstep*NStepsX, NStepsX, dtype='>f4') # building 1D array of X position
Yrange = np.linspace(Ystep, Ystep*NStepsY, NStepsY, dtype='>f4') # and Y position in µm
return {'NStepsX':NStepsX, 'NStepsY':NStepsY ,'Npixel':CCD_nb_pixels, 'Matrix': Matrix, 'tab_of_lambda':LAMBDAf, 'Xstep':Xstep, 'Ystep':Ystep, 'Xrange':Xrange, 'Yrange':Yrange}
def initialization_winspec32(path, basename, NStepsX=None, NStepsY=None, Xstep=None, Ystep=None, CCD_nb_pixels=1340):
"""
Return a dictionary with: NStepsX, NStepsY, Npixel, Matrix, tab_of_lambda, Xstep, Ystep, Xrange, Yrange
Use Regex for NStepsX/NStepsX/Xstep/Ystep providing that the keywords 'steps' and 'mum' are used in the filename.
"""
filename = path + basename + ".txt"
if NStepsX is None or NStepsY is None:
pattern = r"(\w*)x(\w*)" + 'steps' # specific to how we write the file name
values = re.findall(pattern, basename)
NStepsX = int(values[0][0])
NStepsY = int(values[0][1])
if Xstep is None or Ystep is None:
pattern = r"(\w*)x(\w*)" + 'mum' # specific to how we write the file name
values = re.findall(pattern, basename)
Xstep = int(values[0][0])
Ystep = int(values[0][1])
Xrange = np.linspace(Xstep, Xstep*NStepsX, NStepsX, dtype='>f4') # building 1D array of X position
Yrange = np.linspace(Ystep, Ystep*NStepsY, NStepsY, dtype='>f4') # and Y position in µm
column_1, column_2 = np.loadtxt(filename, unpack=True)
tab_of_lambda = column_1[0:CCD_nb_pixels]
Matrix = column_2.reshape(NStepsX, NStepsY, CCD_nb_pixels)
return {'NStepsX':NStepsX, 'NStepsY':NStepsY ,'Npixel':CCD_nb_pixels, 'Matrix': Matrix, 'tab_of_lambda':tab_of_lambda, 'Xstep':Xstep, 'Ystep':Ystep, 'Xrange':Xrange, 'Yrange':Yrange}
def define_space_range(Xmin, Xmax, Ymin, Ymax, RangeXY):
"""Return a dictionary with: Xpmin, Xpmax, Ypmin, Ypmax."""
#___provide pixel numbers and corresponding values for plotting image in the desired space area___
SpaceArea = [Xmin, Xmax, Ymin, Ymax] # define plot area in µm
PixelArea = [Xpmin, Xpmax, Ypmin, Ypmax] = np.zeros((4, 2), dtype='>f4') # define plot area in pixel
for i, parameter in enumerate(SpaceArea):
PixelArea[i] = fct.find_nearest(RangeXY[0 if i<2 else 1], parameter) #function defined in package.functions
return {'Xpmin':Xpmin, 'Xpmax':Xpmax, 'Ypmin':Ypmin, 'Ypmax':Ypmax}
def initialization_Bsweep(path, basename, B_init, B_final, B_step, CCD_nb_pixels=1340):
"""Return a dictionary with: Matrix, tab_of_lambda, B_range, B_step, Npixel."""
filename = path + basename + ".txt"
number_Bsweep = int(np.rint((B_final - B_init) / B_step + 1))
# building 1D array of B values
column_1, column_2 = np.loadtxt(filename, unpack=True)
tab_of_lambda = column_1[0:CCD_nb_pixels]
Matrix = column_2.reshape(number_Bsweep, CCD_nb_pixels)
return {'Matrix': Matrix, 'tab_of_lambda':tab_of_lambda, 'B_range':B_range, 'B_step':B_step, 'Npixel':CCD_nb_pixels}
def df_from_bsweep(folder, file, B_init, B_final, step, CCD_nb_pixels=1340):
"""
Return a dataframe from a sweep in magnetic field, i.e from a file containing several spectra at different field.
"""
number_of_steps = int((B_final - B_init)/step + 1)
B_values = np.linspace(B_init, B_final, number_of_steps, dtype='>f4')
# Extract the spectra.
col_1, col_2 = np.loadtxt(folder+file+'.txt', unpack=True)
col_1 = np.reshape(col_1, (number_of_steps, CCD_nb_pixels))
col_2 = np.reshape(col_2, (number_of_steps, CCD_nb_pixels))
# Create the list of the different quantities.
wavelength = [list(x) for x in col_1]
intensity = [list(y) for y in col_2]
energy = [list(fct.nm_eV(x)) for x in wavelength]
df = | pd.DataFrame(data={'B': B_values, 'wavelength': wavelength, 'energy': energy, 'intensity': intensity}) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# dataset src: https://data.london.gov.uk/dataset/smartmeter-energy-use-data-in-london-households
# file: UKPN-LCL-smartmeter-sample (986.99 kB)
# In[2]:
# A Time series is a collection of data points indexed,
# listed or graphed in time order.
# Most commonly, a time series is a sequence taken at
# successive equally spaced points in time.
# Thus it is a sequence of discrete-time data.
# In[3]:
# Load libraries
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import pandas as pd
import numpy as np
# In[5]:
raw_data_filename = "UKPN-LCL-smartmeter-sample.csv"
raw_data_df = pd.read_csv(
raw_data_filename,
header=0
)
# In[32]:
display(raw_data_df.shape)
display(raw_data_df.head(3))
display(raw_data_df.tail(3))
display(raw_data_df.dtypes)
display(raw_data_df.columns.values)
display(raw_data_df.describe(include='all'))
display(raw_data_df.isnull().sum())
display(raw_data_df[raw_data_df['KWH/hh (per half hour) '] == 'Null'].shape) # (1, 6)
# In[47]:
raw_date_kwh_df = raw_data_df[['DateTime', 'KWH/hh (per half hour) ']].copy()
raw_date_kwh_df = raw_date_kwh_df.rename(columns={"KWH/hh (per half hour) ": "KWH_hh"})
# In[49]:
# fix row where "KWH_hh" equals 'Null'
display(raw_date_kwh_df[raw_date_kwh_df['KWH_hh'] == 'Null']) # (1, 6)
raw_date_kwh_df = raw_date_kwh_df.drop([2982])
# In[50]:
# fix dtypes
raw_date_kwh_df.loc[:, 'DateTime'] = | pd.to_datetime(raw_date_kwh_df.loc[:, 'DateTime']) | pandas.to_datetime |
import re
from pathlib import Path
from typing import List
import pandas as pd
from scipy.io import arff
from common import write_arff_file
def create_index_partitions() -> List[int]:
partitions = []
for i in range(10, 200, 10):
partitions.append(i)
for i in range(75, 126):
partitions.append(i)
return partitions
def create_partitions(dataset_path: Path):
data, a = arff.loadarff(dataset_path)
df = | pd.DataFrame(data) | pandas.DataFrame |
import ast
import argparse
import warnings
import logging
import os
import json
import boto3
import pickle
# from prettytable import PrettyTable
import subprocess
import sys
from urllib.parse import urlparse
#os.system('pip install autogluon')
# from autogluon import TabularPrediction as task
import pandas as pd # this should come after the pip install.
logging.basicConfig(level=logging.DEBUG)
logging.info(subprocess.call('ls -lR /opt/ml/input'.split()))
# with warnings.catch_warnings():
# warnings.filterwarnings('ignore', category=DeprecationWarning)
# from prettytable import PrettyTable
# import autogluon as ag
# from autogluon import TabularPrediction as task
# from autogluon.task.tabular_prediction import TabularDataset
from prettytable import PrettyTable
import autogluon as ag
from autogluon import TabularPrediction as task
from autogluon.task.tabular_prediction import TabularDataset
def __load_input_data(path: str):
"""
Load training data as dataframe
:param path:
:return: DataFrame
"""
input_data_files = os.listdir(path)
try:
input_dfs = [pd.read_csv(f'{path}/{data_file}') for data_file in input_data_files]
return task.Dataset(df=pd.concat(input_dfs))
except:
print(f'No csv data in {path}!')
return None
def format_for_print(df):
table = PrettyTable(list(df.columns))
for row in df.itertuples():
table.add_row(row[1:])
return str(table)
def du(path):
"""disk usage in human readable format (e.g. '2,1GB')"""
return subprocess.check_output(['du','-sh', path]).split()[0].decode('utf-8')
# ------------------------------------------------------------ #
# Training methods #
# ------------------------------------------------------------ #
def train(args):
# SageMaker passes num_cpus, num_gpus and other args we can use to tailor training to
# the current container environment, but here we just use simple cpu context.
model_dir = args.model_dir
# target = args.label
# presets = args.presets
# Load training and validation data
print(f'Train files: {os.listdir(args.train)}')
train_data = __load_input_data(args.train)
columns = train_data.columns.tolist()
column_dict = {"columns":columns}
with open('columns.pkl', 'wb') as f:
pickle.dump(column_dict, f)
subsample_size = int(args.train_rows) # subsample subset of data for faster demo, try setting this to much larger values
train_data = train_data.sample(n=subsample_size, random_state=0)
# predictor = task.fit(train_data = train_data, label=target,
# output_directory=model_dir,
# presets = presets)
# Train models
predictor = task.fit(
train_data=train_data,
output_directory= model_dir,
**args.fit_args,
)
# Results summary
predictor.fit_summary(verbosity=1)
# Optional test data
if args.test:
print(f'Test files: {os.listdir(args.test)}')
test_data = __load_input_data(args.test)
# Test data must be labeled for scoring
# Leaderboard on test data
print('Running model on test data and getting Leaderboard...')
leaderboard = predictor.leaderboard(dataset=test_data, silent=True)
print(format_for_print(leaderboard), end='\n\n')
# Feature importance on test data
# Note: Feature importance must be calculated on held-out (test) data.
# If calculated on training data it will be biased due to overfitting.
if args.feature_importance:
print('Feature importance:')
# Increase rows to print feature importance
| pd.set_option('display.max_rows', 500) | pandas.set_option |
# -*- coding: utf-8 -*-
"""
Created on Sun May 21 13:13:26 2017
@author: ning
"""
import pandas as pd
import os
import numpy as np
import matplotlib.pyplot as plt
import pickle
try:
function_dir = 'D:\\NING - spindle\\Spindle_by_Graphical_Features'
os.chdir(function_dir)
except:
function_dir = 'C:\\Users\\ning\\OneDrive\\python works\\Spindle_by_Graphical_Features'
os.chdir(function_dir)
import eegPipelineFunctions
try:
file_dir = 'D:\\NING - spindle\\training set\\road_trip\\'
# file_dir = 'D:\\NING - spindle\\training set\\road_trip_29_channels\\'
os.chdir(file_dir)
except:
file_dir = 'C:\\Users\\ning\\Downloads\\road_trip\\'
# file_dir = 'C:\\Users\\ning\\Downloads\\road_trip_29_channels\\'
os.chdir(file_dir)
if False:
signal_features_dict = {}
graph_features_dict = {}
for directory_1 in [f for f in os.listdir(file_dir) if ('epoch_length' in f)]:
sub_dir = file_dir + directory_1 + '\\'
epoch_length = directory_1[-3]
os.chdir(sub_dir)
df_cc, df_pli, df_plv, df_signal,df_graph = [],[],[],[],[]
for sub_fold in os.listdir(sub_dir):
sub_fold_dir = sub_dir + sub_fold + '\\'
os.chdir(sub_fold_dir)
cc_features, pli_features, plv_features, signal_features = [ | pd.read_csv(f) | pandas.read_csv |
import gzip
import pickle5 as pickle
# import pickle
from collections import defaultdict
import numpy as np
import pandas as pd
import os
from copy import deepcopy
import datetime
import neat
from tensorflow.python.framework.ops import default_session
from scipy.optimize import curve_fit
from ongoing.prescriptors.base import BasePrescriptor, PRED_CASES_COL, CASES_COL, NPI_COLUMNS, NPI_MAX_VALUES
import ongoing.prescriptors.base as base
path = '5days-results-2d-1-hidden'
num_checkpoint = 26
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
CHECKPOINTS_PREFIX = os.path.join(ROOT_DIR, 'neat-checkpoint-')
# CONFIG_FILE = os.path.join(ROOT_DIR, '{}/config-prescriptor-multiobjective'.format(path))
CONFIG_FILE = os.path.join(ROOT_DIR, '{}/config-prescriptor-{}'.format(path, num_checkpoint))
TMP_PRED_FILE_NAME = os.path.join(ROOT_DIR, 'tmp_predictions_for_prescriptions', 'preds.csv')
TMP_PRESCRIPTION_FILE = os.path.join(ROOT_DIR, 'tmp_prescription.csv')
# Number of days the prescriptors will look at in the past.
# Larger values here may make convergence slower, but give
# prescriptors more context. The number of inputs of each neat
# network will be NB_LOOKBACK_DAYS * (NPI_COLUMNS + 1) + NPI_COLUMNS.
# The '1' is for previous case data, and the final NPI_COLUMNS
# is for IP cost information.
NB_LOOKBACK_DAYS = 21
# Number of countries to use for training. Again, lower numbers
# here will make training faster, since there will be fewer
# input variables, but could potentially miss out on useful info.
NB_EVAL_COUNTRIES = 10
# Number of prescriptions to make per country.
# This can be set based on how many solutions in PRESCRIPTORS_FILE
# we want to run and on time constraints.
NB_PRESCRIPTIONS = 10
# Number of days to fix prescribed IPs before changing them.
# This could be a useful toggle for decision makers, who may not
# want to change policy every day. Increasing this value also
# can speed up the prescriptor, at the cost of potentially less
# interesting prescriptions.
ACTION_DURATION = 14
# Range of days the prescriptors will be evaluated on.
# To save time during training, this range may be significantly
# shorter than the maximum days a prescriptor can be evaluated on.
EVAL_START_DATE = '2020-08-01'
EVAL_END_DATE = '2020-08-02'
# Maximum number of generations to run (unlimited if None)
NB_GENERATIONS = 200
# Path to file containing neat prescriptors. Here we simply use a
# recent checkpoint of the population from train_prescriptor.py,
# but this is likely not the most complementary set of prescriptors.
# Many approaches can be taken to generate/collect more diverse sets.
# Note: this set can contain up to 10 prescriptors for evaluation.
# PRESCRIPTORS_FILE = os.path.join(ROOT_DIR, '{}/neat-checkpoint-{}'.format(path, num_checkpoint))
PRESCRIPTORS_FILE = os.path.join(ROOT_DIR, '{}/neat-checkpoint-{}_short_pickle4'.format(path, num_checkpoint))
def dominates(one, other):
"""Return true if each objective of *one* is not strictly worse than
the corresponding objective of *other* and at least one objective is
strictly better.
"""
not_equal = False
for self_wvalue, other_wvalue in zip(one, other):
if self_wvalue > other_wvalue:
not_equal = True
elif self_wvalue < other_wvalue:
return False
return not_equal
def sortNondominatedNSGA2(pop_arr, k, first_front_only=False):
"""Sort the first *k* *individuals* into different nondomination levels
using the "Fast Nondominated Sorting Approach" proposed by Deb et al.,
see [Deb2002]_. This algorithm has a time complexity of :math:`O(MN^2)`,
where :math:`M` is the number of objectives and :math:`N` the number of
individuals.
:param individuals: A list of individuals to select from.
:param k: The number of individuals to select.
:param first_front_only: If :obj:`True` sort only the first front and
exit.
:returns: A list of Pareto fronts (lists), the first list includes
nondominated individuals.
.. [Deb2002] Deb, Pratab, Agarwal, and Meyarivan, "A fast elitist
non-dominated sorting genetic algorithm for multi-objective
optimization: NSGA-II", 2002.
"""
if k == 0:
return []
map_fit_ind = defaultdict(list)
for ind in pop_arr:
map_fit_ind[ind.fitness_mult].append(ind)
fits = list(map_fit_ind.keys())
current_front = []
next_front = []
dominating_fits = defaultdict(int)
dominated_fits = defaultdict(list)
# Rank first Pareto front
for i, fit_i in enumerate(fits):
for fit_j in fits[i + 1:]:
if dominates(fit_i, fit_j):
dominating_fits[fit_j] += 1
dominated_fits[fit_i].append(fit_j)
elif dominates(fit_j, fit_i):
dominating_fits[fit_i] += 1
dominated_fits[fit_j].append(fit_i)
if dominating_fits[fit_i] == 0:
current_front.append(fit_i)
fronts = [[]]
for fit in current_front:
fronts[-1].extend(map_fit_ind[fit])
pareto_sorted = len(fronts[-1])
# Rank the next front until all individuals are sorted or
# the given number of individual are sorted.
if not first_front_only:
N = min(len(pop_arr), k)
while pareto_sorted < N:
fronts.append([])
for fit_p in current_front:
for fit_d in dominated_fits[fit_p]:
dominating_fits[fit_d] -= 1
if dominating_fits[fit_d] == 0:
next_front.append(fit_d)
pareto_sorted += len(map_fit_ind[fit_d])
fronts[-1].extend(map_fit_ind[fit_d])
current_front = next_front
next_front = []
return fronts
def assignCrowdingDist(individuals):
"""Assign a crowding distance to each individual's fitness.
It is done per front.
"""
if len(individuals) == 0:
return
distances = [0.0] * len(individuals)
crowd = [(ind.fitness_mult, i) for i, ind in enumerate(individuals)]
nobj = len(individuals[0].fitness_mult)
for i in range(nobj):
crowd.sort(key=lambda element: element[0][i])
distances[crowd[0][1]] = float("inf")
distances[crowd[-1][1]] = float("inf")
if crowd[-1][0][i] == crowd[0][0][i]:
continue
norm = nobj * float(crowd[-1][0][i] - crowd[0][0][i])
for prev, cur, next in zip(crowd[:-2], crowd[1:-1], crowd[2:]):
distances[cur[1]] += (next[0][i] - prev[0][i]) / norm
# find max and min distance
max_val = -float("inf")
min_val = float("inf")
flag_plus_inf = False
flag_minus_inf = False
for dist in distances:
if dist != float("inf") and max_val < dist:
max_val = dist
pass
if dist != -float("inf") and min_val > dist:
min_val = dist
pass
if dist == float("inf"):
flag_plus_inf = True
elif dist == -float("inf"):
flag_minus_inf = True
pass
# set values equal to inf to be max + 0.5
# set values equal to -inf to be max - 0.5
# and rescale the rest
if flag_plus_inf:
max_val += 0.5
if flag_minus_inf:
min_val -= 0.5
for i in range(0, len(distances)):
if distances[i] == float("inf"):
distances[i] = 1.
elif distances[i] == -float("inf"):
distances[i] = 0.
else:
distances[i] = (distances[i] - min_val) / (max_val - min_val)
pass
pass
for i, dist in enumerate(distances):
individuals[i].crowding_dist = dist / 2
pass
pass
def get_best_n_points(n, x_arr, y_arr):
# 1. fit the curve
# define the true objective function
def objective(x, a, b, c):
return a + b / (c - x)
# fit curve
popt, _ = curve_fit(objective, x_arr, y_arr)
# get coefficients
a, b, c = popt
# define a sequence of inputs between the smallest and largest known inputs
x_line = np.arange(min(x_arr), max(x_arr), 1)
# calculate the output for the range
y_line = objective(x_line, a, b, c)
# 2. find arc length
arc_len_arr = []
for pos in range(0, len(x_line) - 1):
p1 = np.array([x_line[pos], y_line[pos]])
p2 = np.array([x_line[pos + 1], y_line[pos + 1]])
arc_len_arr.append(np.linalg.norm(p2 - p1))
arc_len_arr = np.array(arc_len_arr)
# distance delta
d = sum(arc_len_arr) / (n-1)
# cumul_sum of art length
arc_len_arr_cum = np.cumsum(arc_len_arr)
# 3. choose ref. points
# positions of reference points
points_pos = [0]
for i in range(1, (n-1)):
dist = abs(arc_len_arr_cum - i * d)
points_pos.append(np.argmin(dist) + 1)
pass
points_pos.append(len(x_line) - 1)
ref_points = np.array([x_line[points_pos], y_line[points_pos]]).T
# 4. approximate ref. points
all_my_points = np.array([x_arr, y_arr]).T
chosen_points = []
for ref_point in ref_points:
dist = np.linalg.norm((all_my_points - ref_point), axis=1)
pos = np.argmin(dist)
chosen_points.append(pos)
pass
ref_points_pos = points_pos
return chosen_points
class Neat(BasePrescriptor):
def __init__(self, seed=base.SEED, eval_start_date=EVAL_START_DATE, eval_end_date=EVAL_END_DATE,
nb_eval_countries=NB_EVAL_COUNTRIES, nb_lookback_days=NB_LOOKBACK_DAYS, nb_prescriptions=NB_PRESCRIPTIONS, nb_generations=NB_GENERATIONS,
action_duration=ACTION_DURATION, config_file=CONFIG_FILE, prescriptors_file=PRESCRIPTORS_FILE, hist_df=None, verbose=True):
super().__init__(seed=seed)
self.eval_start_date = pd.to_datetime(eval_start_date, format='%Y-%m-%d')
self.eval_end_date = pd.to_datetime(eval_end_date, format='%Y-%m-%d')
self.nb_eval_countries = nb_eval_countries
self.nb_lookback_days = nb_lookback_days
self.nb_prescriptions = nb_prescriptions
self.nb_generations = nb_generations
self.action_duration = action_duration
self.config_file = config_file
self.prescriptors_file = prescriptors_file
self.hist_df = hist_df
self.verbose = verbose
def fit(self, hist_df=None):
if hist_df is not None:
self.hist_df = hist_df
# As a heuristic, use the top NB_EVAL_COUNTRIES w.r.t. ConfirmedCases
# so far as the geos for evaluation.
eval_geos = list(self.hist_df.groupby('GeoID').max()['ConfirmedCases'].sort_values(
ascending=False).head(self.nb_eval_countries).index)
if self.verbose:
print("Nets will be evaluated on the following geos:", eval_geos)
# Pull out historical data for all geos
past_cases = {}
past_ips = {}
for geo in eval_geos:
geo_df = self.hist_df[self.hist_df['GeoID'] == geo]
past_cases[geo] = np.maximum(0, np.array(geo_df[CASES_COL]))
past_ips[geo] = np.array(geo_df[NPI_COLUMNS])
# Gather values for scaling network output
ip_max_values_arr = np.array([NPI_MAX_VALUES[ip] for ip in NPI_COLUMNS])
# Load configuration.
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
self.config_file)
# Create the population, which is the top-level object for a NEAT run.
p = neat.Population(config)
if self.verbose:
# Add a stdout reporter to show progress in the terminal.
p.add_reporter(neat.StdOutReporter(show_species_detail=True))
# Add statistics reporter to provide extra info about training progress.
stats = neat.StatisticsReporter()
p.add_reporter(stats)
# Add checkpointer to save population every generation and every 10 minutes.
p.add_reporter(neat.Checkpointer(generation_interval=1,
time_interval_seconds=600,
filename_prefix=CHECKPOINTS_PREFIX))
# Function that evaluates the fitness of each prescriptor model, equal costs
def eval_genomes_multy_ones(genomes, config):
# Every generation sample a different set of costs per geo,
# so that over time solutions become robust to different costs.
cost_df = base.generate_costs(self.hist_df, mode='equal')
cost_df = base.add_geo_id(cost_df)
geo_costs = {}
for geo in eval_geos:
costs = cost_df[cost_df['GeoID'] == geo]
cost_arr = np.array(costs[NPI_COLUMNS])[0]
geo_costs[geo] = cost_arr
# Evaluate each individual
for genome_id, genome in genomes:
if genome.fitness is not None:
continue
# Create net from genome
net = neat.nn.FeedForwardNetwork.create(genome, config)
# Set up dictionary to keep track of prescription
df_dict = {'CountryName': [], 'RegionName': [], 'Date': []}
for ip_col in NPI_COLUMNS:
df_dict[ip_col] = []
# Set initial data
eval_past_cases = deepcopy(past_cases)
eval_past_ips = deepcopy(past_ips)
# Compute prescribed stringency incrementally
stringency = np.zeros(config.genome_config.num_outputs)
# Make prescriptions one day at a time, feeding resulting
# predictions from the predictor back into the prescriptor.
for date in pd.date_range(self.eval_start_date, self.eval_end_date):
date_str = date.strftime("%Y-%m-%d")
# Prescribe for each geo
for geo in eval_geos:
# Prepare input data. Here we use log to place cases
# on a reasonable scale; many other approaches are possible.
X_cases = np.log(eval_past_cases[geo][-self.nb_lookback_days:] + 1)
X_ips = eval_past_ips[geo][-self.nb_lookback_days:]
X_costs = geo_costs[geo]
X = np.concatenate([X_cases.flatten(),
X_ips.flatten(),
X_costs])
# Get prescription
prescribed_ips = net.activate(X)
# Map prescription to integer outputs
prescribed_ips = (prescribed_ips * ip_max_values_arr).round()
# Add it to prescription dictionary
country_name, region_name = (geo.split(' / ') + [np.nan])[:2]
df_dict['CountryName'].append(country_name)
df_dict['RegionName'].append(region_name)
df_dict['Date'].append(date_str)
for ip_col, prescribed_ip in zip(NPI_COLUMNS, prescribed_ips):
df_dict[ip_col].append(prescribed_ip)
# Update stringency. This calculation could include division by
# the number of IPs and/or number of geos, but that would have
# no effect on the ordering of candidate solutions.
stringency += prescribed_ips
# Create dataframe from prescriptions.
pres_df = pd.DataFrame(df_dict)
# Make prediction given prescription for all countries
pred_df = self.get_predictions(self.eval_start_date.strftime("%Y-%m-%d"), date_str, pres_df)
# Update past data with new day of prescriptions and predictions
pres_df = base.add_geo_id(pres_df)
pred_df = base.add_geo_id(pred_df)
new_pres_df = pres_df[pres_df['Date'] == date_str]
new_pred_df = pred_df[pred_df['Date'] == date_str]
for geo in eval_geos:
geo_pres = new_pres_df[new_pres_df['GeoID'] == geo]
geo_pred = new_pred_df[new_pred_df['GeoID'] == geo]
# Append array of prescriptions
pres_arr = np.array([geo_pres[ip_col].values[0] for ip_col in NPI_COLUMNS]).reshape(1, -1)
eval_past_ips[geo] = np.concatenate([eval_past_ips[geo], pres_arr])
# Append predicted cases
eval_past_cases[geo] = np.append(eval_past_cases[geo],
geo_pred[PRED_CASES_COL].values[0])
# Compute fitness. There are many possibilities for computing fitness and ranking
# candidates. Here we choose to minimize the product of ip stringency and predicted
# cases. This product captures the area of the 2D objective space that dominates
# the candidate. We minimize it by including a negation. To place the fitness on
# a reasonable scale, we take means over all geos and days. Note that this fitness
# function can lead directly to the degenerate solution of all ips 0, i.e.,
# stringency zero. To achieve more interesting behavior, a different fitness
# function may be required.
new_cases = pred_df[PRED_CASES_COL].mean().mean()
fitness_mult = list(-stringency)
fitness_mult.append(-new_cases)
genome.fitness_mult = tuple(fitness_mult)
if self.verbose:
print('Evaluated Genome', genome_id)
print('New cases:', new_cases)
print('Stringency:', stringency)
print('Fitness:', genome.fitness_mult)
# Function that evaluates the fitness of each prescriptor model, equal costs
def eval_genomes_2d_ones(genomes, config):
# Every generation sample a different set of costs per geo,
# so that over time solutions become robust to different costs.
cost_df = base.generate_costs(self.hist_df, mode='equal')
cost_df = base.add_geo_id(cost_df)
geo_costs = {}
for geo in eval_geos:
costs = cost_df[cost_df['GeoID'] == geo]
cost_arr = np.array(costs[NPI_COLUMNS])[0]
geo_costs[geo] = cost_arr
# Evaluate each individual
for genome_id, genome in genomes:
# Create net from genome
net = neat.nn.FeedForwardNetwork.create(genome, config)
# Set up dictionary to keep track of prescription
df_dict = {'CountryName': [], 'RegionName': [], 'Date': []}
for ip_col in NPI_COLUMNS:
df_dict[ip_col] = []
# Set initial data
eval_past_cases = deepcopy(past_cases)
eval_past_ips = deepcopy(past_ips)
# Compute prescribed stringency incrementally
stringency = 0.
# Make prescriptions one day at a time, feeding resulting
# predictions from the predictor back into the prescriptor.
for date in pd.date_range(self.eval_start_date, self.eval_end_date):
date_str = date.strftime("%Y-%m-%d")
# Prescribe for each geo
for geo in eval_geos:
# Prepare input data. Here we use log to place cases
# on a reasonable scale; many other approaches are possible.
X_cases = np.log(eval_past_cases[geo][-self.nb_lookback_days:] + 1)
X_ips = eval_past_ips[geo][-self.nb_lookback_days:]
X_costs = geo_costs[geo]
X = np.concatenate([X_cases.flatten(),
X_ips.flatten(),
X_costs])
# Get prescription
prescribed_ips = net.activate(X)
# Map prescription to integer outputs
prescribed_ips = (prescribed_ips * ip_max_values_arr).round()
# Add it to prescription dictionary
country_name, region_name = (geo.split(' / ') + [np.nan])[:2]
df_dict['CountryName'].append(country_name)
df_dict['RegionName'].append(region_name)
df_dict['Date'].append(date_str)
for ip_col, prescribed_ip in zip(NPI_COLUMNS, prescribed_ips):
df_dict[ip_col].append(prescribed_ip)
# Update stringency. This calculation could include division by
# the number of IPs and/or number of geos, but that would have
# no effect on the ordering of candidate solutions.
stringency += np.sum(geo_costs[geo] * prescribed_ips)
# Create dataframe from prescriptions.
pres_df = pd.DataFrame(df_dict)
# Make prediction given prescription for all countries
pred_df = self.get_predictions(self.eval_start_date.strftime("%Y-%m-%d"), date_str, pres_df)
# Update past data with new day of prescriptions and predictions
pres_df = base.add_geo_id(pres_df)
pred_df = base.add_geo_id(pred_df)
new_pres_df = pres_df[pres_df['Date'] == date_str]
new_pred_df = pred_df[pred_df['Date'] == date_str]
for geo in eval_geos:
geo_pres = new_pres_df[new_pres_df['GeoID'] == geo]
geo_pred = new_pred_df[new_pred_df['GeoID'] == geo]
# Append array of prescriptions
pres_arr = np.array([geo_pres[ip_col].values[0] for ip_col in NPI_COLUMNS]).reshape(1, -1)
eval_past_ips[geo] = np.concatenate([eval_past_ips[geo], pres_arr])
# Append predicted cases
eval_past_cases[geo] = np.append(eval_past_cases[geo],
geo_pred[PRED_CASES_COL].values[0])
# Compute fitness. There are many possibilities for computing fitness and ranking
# candidates. Here we choose to minimize the product of ip stringency and predicted
# cases. This product captures the area of the 2D objective space that dominates
# the candidate. We minimize it by including a negation. To place the fitness on
# a reasonable scale, we take means over all geos and days. Note that this fitness
# function can lead directly to the degenerate solution of all ips 0, i.e.,
# stringency zero. To achieve more interesting behavior, a different fitness
# function may be required.
new_cases = pred_df[PRED_CASES_COL].mean().mean()
genome.fitness_mult = (-new_cases, -stringency)
if self.verbose:
print('Evaluated Genome', genome_id)
print('New cases:', new_cases)
print('Stringency:', stringency)
print('Fitness:', genome.fitness_mult)
# Function that evaluates the fitness of each prescriptor model, random costs
def eval_genomes_2d(genomes, config):
# Every generation sample a different set of costs per geo,
# so that over time solutions become robust to different costs.
cost_df = base.generate_costs(self.hist_df, mode='random')
cost_df = base.add_geo_id(cost_df)
geo_costs = {}
for geo in eval_geos:
costs = cost_df[cost_df['GeoID'] == geo]
cost_arr = np.array(costs[NPI_COLUMNS])[0]
geo_costs[geo] = cost_arr
# Evaluate each individual
for genome_id, genome in genomes:
# Create net from genome
net = neat.nn.FeedForwardNetwork.create(genome, config)
# Set up dictionary to keep track of prescription
df_dict = {'CountryName': [], 'RegionName': [], 'Date': []}
for ip_col in NPI_COLUMNS:
df_dict[ip_col] = []
# Set initial data
eval_past_cases = deepcopy(past_cases)
eval_past_ips = deepcopy(past_ips)
# Compute prescribed stringency incrementally
stringency = 0.
# Make prescriptions one day at a time, feeding resulting
# predictions from the predictor back into the prescriptor.
for date in pd.date_range(self.eval_start_date, self.eval_end_date):
date_str = date.strftime("%Y-%m-%d")
# Prescribe for each geo
for geo in eval_geos:
# Prepare input data. Here we use log to place cases
# on a reasonable scale; many other approaches are possible.
X_cases = np.log(eval_past_cases[geo][-self.nb_lookback_days:] + 1)
X_ips = eval_past_ips[geo][-self.nb_lookback_days:]
X_costs = geo_costs[geo]
X = np.concatenate([X_cases.flatten(),
X_ips.flatten(),
X_costs])
# Get prescription
prescribed_ips = net.activate(X)
# Map prescription to integer outputs
prescribed_ips = (prescribed_ips * ip_max_values_arr).round()
# Add it to prescription dictionary
country_name, region_name = (geo.split(' / ') + [np.nan])[:2]
df_dict['CountryName'].append(country_name)
df_dict['RegionName'].append(region_name)
df_dict['Date'].append(date_str)
for ip_col, prescribed_ip in zip(NPI_COLUMNS, prescribed_ips):
df_dict[ip_col].append(prescribed_ip)
# Update stringency. This calculation could include division by
# the number of IPs and/or number of geos, but that would have
# no effect on the ordering of candidate solutions.
stringency += np.sum(geo_costs[geo] * prescribed_ips)
# Create dataframe from prescriptions.
pres_df = pd.DataFrame(df_dict)
# Make prediction given prescription for all countries
pred_df = self.get_predictions(self.eval_start_date.strftime("%Y-%m-%d"), date_str, pres_df)
# Update past data with new day of prescriptions and predictions
pres_df = base.add_geo_id(pres_df)
pred_df = base.add_geo_id(pred_df)
new_pres_df = pres_df[pres_df['Date'] == date_str]
new_pred_df = pred_df[pred_df['Date'] == date_str]
for geo in eval_geos:
geo_pres = new_pres_df[new_pres_df['GeoID'] == geo]
geo_pred = new_pred_df[new_pred_df['GeoID'] == geo]
# Append array of prescriptions
pres_arr = np.array([geo_pres[ip_col].values[0] for ip_col in NPI_COLUMNS]).reshape(1, -1)
eval_past_ips[geo] = np.concatenate([eval_past_ips[geo], pres_arr])
# Append predicted cases
eval_past_cases[geo] = np.append(eval_past_cases[geo],
geo_pred[PRED_CASES_COL].values[0])
# Compute fitness. There are many possibilities for computing fitness and ranking
# candidates. Here we choose to minimize the product of ip stringency and predicted
# cases. This product captures the area of the 2D objective space that dominates
# the candidate. We minimize it by including a negation. To place the fitness on
# a reasonable scale, we take means over all geos and days. Note that this fitness
# function can lead directly to the degenerate solution of all ips 0, i.e.,
# stringency zero. To achieve more interesting behavior, a different fitness
# function may be required.
new_cases = pred_df[PRED_CASES_COL].mean().mean()
genome.fitness_mult = (-new_cases, -stringency)
if self.verbose:
print('Evaluated Genome', genome_id)
print('New cases:', new_cases)
print('Stringency:', stringency)
print('Fitness:', genome.fitness_mult)
# Function that evaluates the fitness of each prescriptor model
def eval_genomes(genomes, config):
# Every generation sample a different set of costs per geo,
# so that over time solutions become robust to different costs.
cost_df = base.generate_costs(self.hist_df, mode='random')
cost_df = base.add_geo_id(cost_df)
geo_costs = {}
for geo in eval_geos:
costs = cost_df[cost_df['GeoID'] == geo]
cost_arr = np.array(costs[NPI_COLUMNS])[0]
geo_costs[geo] = cost_arr
# Evaluate each individual
for genome_id, genome in genomes:
# Create net from genome
net = neat.nn.FeedForwardNetwork.create(genome, config)
# Set up dictionary to keep track of prescription
df_dict = {'CountryName': [], 'RegionName': [], 'Date': []}
for ip_col in NPI_COLUMNS:
df_dict[ip_col] = []
# Set initial data
eval_past_cases = deepcopy(past_cases)
eval_past_ips = deepcopy(past_ips)
# Compute prescribed stringency incrementally
stringency = 0.
# Make prescriptions one day at a time, feeding resulting
# predictions from the predictor back into the prescriptor.
for date in pd.date_range(self.eval_start_date, self.eval_end_date):
date_str = date.strftime("%Y-%m-%d")
# Prescribe for each geo
for geo in eval_geos:
# Prepare input data. Here we use log to place cases
# on a reasonable scale; many other approaches are possible.
X_cases = np.log(eval_past_cases[geo][-self.nb_lookback_days:] + 1)
X_ips = eval_past_ips[geo][-self.nb_lookback_days:]
X_costs = geo_costs[geo]
X = np.concatenate([X_cases.flatten(),
X_ips.flatten(),
X_costs])
# Get prescription
prescribed_ips = net.activate(X)
# Map prescription to integer outputs
prescribed_ips = (prescribed_ips * ip_max_values_arr).round()
# Add it to prescription dictionary
country_name, region_name = (geo.split(' / ') + [np.nan])[:2]
df_dict['CountryName'].append(country_name)
df_dict['RegionName'].append(region_name)
df_dict['Date'].append(date_str)
for ip_col, prescribed_ip in zip(NPI_COLUMNS, prescribed_ips):
df_dict[ip_col].append(prescribed_ip)
# Update stringency. This calculation could include division by
# the number of IPs and/or number of geos, but that would have
# no effect on the ordering of candidate solutions.
stringency += np.sum(geo_costs[geo] * prescribed_ips)
# Create dataframe from prescriptions.
pres_df = pd.DataFrame(df_dict)
# Make prediction given prescription for all countries
pred_df = self.get_predictions(self.eval_start_date.strftime("%Y-%m-%d"), date_str, pres_df)
# Update past data with new day of prescriptions and predictions
pres_df = base.add_geo_id(pres_df)
pred_df = base.add_geo_id(pred_df)
new_pres_df = pres_df[pres_df['Date'] == date_str]
new_pred_df = pred_df[pred_df['Date'] == date_str]
for geo in eval_geos:
geo_pres = new_pres_df[new_pres_df['GeoID'] == geo]
geo_pred = new_pred_df[new_pred_df['GeoID'] == geo]
# Append array of prescriptions
pres_arr = np.array([geo_pres[ip_col].values[0] for ip_col in NPI_COLUMNS]).reshape(1,-1)
eval_past_ips[geo] = np.concatenate([eval_past_ips[geo], pres_arr])
# Append predicted cases
eval_past_cases[geo] = np.append(eval_past_cases[geo],
geo_pred[PRED_CASES_COL].values[0])
# Compute fitness. There are many possibilities for computing fitness and ranking
# candidates. Here we choose to minimize the product of ip stringency and predicted
# cases. This product captures the area of the 2D objective space that dominates
# the candidate. We minimize it by including a negation. To place the fitness on
# a reasonable scale, we take means over all geos and days. Note that this fitness
# function can lead directly to the degenerate solution of all ips 0, i.e.,
# stringency zero. To achieve more interesting behavior, a different fitness
# function may be required.
new_cases = pred_df[PRED_CASES_COL].mean().mean()
genome.fitness = -(new_cases * stringency)
if self.verbose:
print('Evaluated Genome', genome_id)
print('New cases:', new_cases)
print('Stringency:', stringency)
print('Fitness:', genome.fitness)
# Run until a solution is found. Since a "solution" as defined in our config
# would have 0 fitness, this will run indefinitely and require manual stopping,
# unless evolution finds the solution that uses 0 for all ips. A different
# value can be placed in the config for automatic stopping at other thresholds.
# winner = p.run(eval_genomes, n=self.nb_generations)
winner = p.run_NSGA2(eval_genomes_2d, n=self.nb_generations, multi=True, algo='NSGA-2')
return
def prescribe(self,
start_date_str,
end_date_str,
prior_ips_df,
cost_df,
restore_from_dic=False,
):
start_date = pd.to_datetime(start_date_str, format='%Y-%m-%d')
end_date = pd.to_datetime(end_date_str, format='%Y-%m-%d')
geos = prior_ips_df['GeoID'].unique()
# Restrict it to dates before the start_date
df = self.hist_df[self.hist_df['Date'] <= start_date]
# Create past case data arrays for all geos
past_cases = {}
for geo in geos:
geo_df = df[df['GeoID'] == geo]
past_cases[geo] = np.maximum(0, np.array(geo_df[CASES_COL]))
# Create past ip data arrays for all geos
past_ips = {}
for geo in geos:
geo_df = prior_ips_df[prior_ips_df['GeoID'] == geo]
past_ips[geo] = np.array(geo_df[NPI_COLUMNS])
# Fill in any missing case data before start_date
# using predictor given past_ips_df.
# Note that the following assumes that the df returned by prepare_historical_df()
# has the same final date for all regions. This has been true so far, but relies
# on it being true for the Oxford data csv loaded by prepare_historical_df().
last_historical_data_date_str = df['Date'].max()
last_historical_data_date = pd.to_datetime(last_historical_data_date_str,
format='%Y-%m-%d')
if last_historical_data_date + pd.Timedelta(days=1) < start_date:
if self.verbose:
print("Filling in missing data...")
missing_data_start_date = last_historical_data_date + pd.Timedelta(days=1)
missing_data_start_date_str = datetime.datetime.strftime(missing_data_start_date,
format='%Y-%m-%d')
missing_data_end_date = start_date - pd.Timedelta(days=1)
missing_data_end_date_str = datetime.datetime.strftime(missing_data_end_date,
format='%Y-%m-%d')
pred_df = self.get_predictions(missing_data_start_date_str,
missing_data_end_date_str,
prior_ips_df)
pred_df = base.add_geo_id(pred_df)
for geo in geos:
geo_df = pred_df[pred_df['GeoID'] == geo].sort_values(by='Date')
pred_cases_arr = np.array(geo_df[PRED_CASES_COL])
past_cases[geo] = np.append(past_cases[geo], pred_cases_arr)
elif self.verbose:
print("No missing data.")
# Gather values for scaling network output
ip_max_values_arr = np.array([NPI_MAX_VALUES[ip] for ip in NPI_COLUMNS])
if not restore_from_dic:
# Load prescriptors
checkpoint = neat.Checkpointer.restore_checkpoint(self.prescriptors_file)
# read population and prepare data
pop = checkpoint.population
pop_arr = [pop[key] for key in pop]
# is population 2-objective?
# print('!!!!!!!!!!!!!! Problem dim {}'.format(len(pop_arr[0].fitness_mult)))
# print('!!!!!!!!!!!!!! pop_size = {}'.format(len(pop_arr)))
if len(pop_arr[0].fitness_mult) == 2:
x_arr = [el.fitness_mult[0] for el in pop_arr]
y_arr = [el.fitness_mult[1] for el in pop_arr]
chosen_points_pos = get_best_n_points(NB_PRESCRIPTIONS, x_arr, y_arr)
prescriptors_all = np.array(pop_arr)[chosen_points_pos]
pass
else:
# create new attribute for every genome
for el in pop_arr:
el.fitness_manyD = deepcopy(el.fitness_mult)
pass
prescriptors_all = pop_arr
pass
# prescriptors = list(checkpoint.population.values())[:self.nb_prescriptions]
pass
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
self.config_file)
# Load IP costs to condition prescriptions
geo_costs = {}
for geo in geos:
costs = cost_df[cost_df['GeoID'] == geo]
cost_arr = np.array(costs[NPI_COLUMNS])[0]
geo_costs[geo] = cost_arr
# Generate prescriptions
prescription_dfs = []
if restore_from_dic:
print('Using file {}'.format(self.prescriptors_file))
with gzip.open(self.prescriptors_file) as f:
prescriptors_dic = pickle.load(f)
prescriptors = [prescriptors_dic[key] for key in prescriptors_dic]
else:
if len(prescriptors_all) == NB_PRESCRIPTIONS:
prescriptors = prescriptors_all
else:
for el in pop_arr:
npi_arr = np.array(el.fitness_manyD[0:-1])
new_cases = el.fitness_manyD[-1]
el.fitness_mult = (new_cases, sum(npi_arr * cost_arr))
pass
fronts = sortNondominatedNSGA2(pop_arr, len(pop))
for front in fronts:
assignCrowdingDist(front)
num_fronts = len(fronts)
for i in range(0, num_fronts):
front = fronts[i]
for el in front:
el.fitness = (num_fronts - i) + el.crowding_dist
pass
pass
pass
tmp_chosen = []
for i in range(0, len(fronts)):
tmp_chosen.extend(fronts[i])
if len(tmp_chosen) >= NB_PRESCRIPTIONS:
break
pass
# print('!!!!!!!!!!! Choosing among {} prescriptors'.format(len(tmp_chosen)))
x_arr = [el.fitness_mult[0] for el in tmp_chosen]
y_arr = [el.fitness_mult[1] for el in tmp_chosen]
chosen_points_pos = get_best_n_points(NB_PRESCRIPTIONS, x_arr, y_arr)
prescriptors = np.array(tmp_chosen)[chosen_points_pos]
pass
pass
for prescription_idx, prescriptor in enumerate(prescriptors):
if self.verbose:
print("Generating prescription", prescription_idx, "...")
# Create net from genome
net = neat.nn.FeedForwardNetwork.create(prescriptor, config)
# Set up dictionary for keeping track of prescription
df_dict = {'CountryName': [], 'RegionName': [], 'Date': []}
for ip_col in sorted(NPI_MAX_VALUES.keys()):
df_dict[ip_col] = []
# Set initial data
eval_past_cases = deepcopy(past_cases)
eval_past_ips = deepcopy(past_ips)
# Generate prescriptions iteratively, feeding resulting
# predictions from the predictor back into the prescriptor.
action_start_date = start_date
while action_start_date <= end_date:
# Get prescription for all regions
for geo in geos:
# Prepare input data. Here we use log to place cases
# on a reasonable scale; many other approaches are possible.
X_cases = np.log(eval_past_cases[geo][-self.nb_lookback_days:] + 1)
X_ips = eval_past_ips[geo][-self.nb_lookback_days:]
X_costs = geo_costs[geo]
X = np.concatenate([X_cases.flatten(),
X_ips.flatten(),
X_costs])
# Get prescription
prescribed_ips = net.activate(X)
# Map prescription to integer outputs
prescribed_ips = (prescribed_ips * ip_max_values_arr).round()
# Add it to prescription dictionary for the full ACTION_DURATION
country_name, region_name = (geo.split(' / ') + [np.nan])[:2]
if region_name == 'nan':
region_name = np.nan
for date in pd.date_range(action_start_date, periods=self.action_duration):
if date > end_date:
break
date_str = date.strftime("%Y-%m-%d")
df_dict['CountryName'].append(country_name)
df_dict['RegionName'].append(region_name)
df_dict['Date'].append(date_str)
for ip_col, prescribed_ip in zip(NPI_COLUMNS, prescribed_ips):
df_dict[ip_col].append(prescribed_ip)
# Create dataframe from prescriptions
pres_df = | pd.DataFrame(df_dict) | pandas.DataFrame |
import requests
from bs4 import BeautifulSoup
import pandas as pd
from difflib import SequenceMatcher
desired_width = 320
pd.set_option('display.width', desired_width)
pd.set_option('display.max_columns', 10)
pd.set_option('display.max_rows', 1000)
pd.set_option('display.max_colwidth', None) #To display full URL in dataframe
from datetime import datetime
def df_column_switch(df, column1, column2):
i = list(df.columns)
a, b = i.index(column1), i.index(column2)
i[b], i[a] = i[a], i[b]
df = df[i]
return df
def create_jobsdf_greenhouse(company_name, url, save_to_excel = False):
'''
Add function description here
'''
url = str(url)
company_name = str(company_name)
page = requests.get(url)
sections = BeautifulSoup(page.text, 'html.parser').find_all('section', {'class': 'level-0'})
roles = []
role_urls = []
for section in sections:
#print(section)
for opening in section.find_all('div', {'class': 'opening'}):
#print(opening)
#print(' ')
role_title = opening.find('a').getText().strip()
role_location = opening.find('span', {'class': 'location'}).getText().strip()
partial_url = [elem.get('href') for elem in opening.find_all('a')][0]
if ((company_name == 'Optiver') or
(company_name == 'Glovo') or
(company_name == 'Graviton Research Capital')):
job_no = partial_url.split('/')[-1].split('=')[-1]
role_url = partial_url
elif company_name == 'Squarepoint Capital':
job_no = partial_url.split('/')[-1].split('=')[-1]
role_url = partial_url.split('?')[0] + '/job#' + job_no
else:
job_no = partial_url.split('/')[-1]
common_size = SequenceMatcher(None, partial_url, url).get_matching_blocks()[0].size #U #.find_longest_match(0, len(partial_url), 0, len(url))
role_url = url + partial_url[common_size : ] #U
roles.append(role_title + ' - ' + role_location + ' - ' + job_no)
role_urls.append(role_url)
jobs_df = pd.DataFrame(pd.Series(roles), columns = ['Role'])
jobs_df['URL'] = pd.DataFrame(pd.Series(role_urls))
jobs_df.insert(jobs_df.columns.get_loc('Role'), 'Company', company_name)
#print(roles)
#print(role_urls)
if save_to_excel == True:
jobs_df['Date Viewed'] = datetime.now()
fname = company_name + '.xlsx'
jobs_df.to_excel('Dataframes/' + fname)
print("All jobs on the given webpage saved as a new Excel file", company_name + '.xlsx')
#print(jobs_df)
return jobs_df
def new_jobs_greenhouse(company_name, url, save_to_excel = False):
'''
Add function description here
'''
url = str(url)
company_name = str(company_name)
latest_jobs_df = create_jobsdf_greenhouse(company_name, url)
latest_jobs_df.fillna('', inplace = True)
latest_jobs_df.pop('Company')
#print('latest jobs df')
#print(latest_jobs_df)
prev_jobs_df = | pd.read_excel('Dataframes/' + company_name + '.xlsx', index_col = [0], dtype = object) | pandas.read_excel |
import unittest
import numpy as np
import pandas as pd
from pyalink.alink import *
class TestDataFrame(unittest.TestCase):
def setUp(self):
data_null = np.array([
["007", 1, 1, 2.0, True],
[None, 2, 2, None, True],
["12", None, 4, 2.0, False],
["1312", 0, None, 1.2, None],
])
self.df_null = pd.DataFrame({
"f_string": data_null[:, 0],
"f_long": data_null[:, 1],
"f_int": data_null[:, 2],
"f_double": data_null[:, 3],
"f_boolean": data_null[:, 4]
})
data = np.array([
["a", 1, 1, 2.0, True],
["abc", 2, 2, 2.4, True],
["c", 4, 4, 2.0, False],
["a", 0, 1, 1.2, False],
])
self.df = pd.DataFrame({
"f_string": data[:, 0],
"f_long": data[:, 1],
"f_int": data[:, 2],
"f_double": data[:, 3],
"f_boolean": data[:, 4]
})
def test_memory_null(self):
from pyalink.alink.config import g_config
g_config["collect_storage_type"] = "memory"
schema = "f_string string,f_long long,f_int int,f_double double,f_boolean boolean"
op = dataframeToOperator(self.df_null, schema, op_type="batch")
col_names = op.getColNames()
col_types = op.getColTypes()
self.assertEqual(col_names[0], "f_string")
self.assertEqual(col_names[1], "f_long")
self.assertEqual(col_names[2], "f_int")
self.assertEqual(col_names[3], "f_double")
self.assertEqual(col_names[4], "f_boolean")
self.assertEqual(col_types[0], "VARCHAR")
self.assertEqual(col_types[1], "BIGINT")
self.assertEqual(col_types[2], "INT")
self.assertEqual(col_types[3], "DOUBLE")
self.assertEqual(col_types[4], "BOOLEAN")
df2 = op.collectToDataframe()
print(df2)
print(df2.dtypes)
self.assertEqual(df2['f_string'].dtype, pd.StringDtype())
self.assertEqual(df2['f_long'].dtype, pd.Int64Dtype())
self.assertEqual(df2['f_int'].dtype, | pd.Int32Dtype() | pandas.Int32Dtype |
import re
import pandas as pd
from gensim.models import KeyedVectors
from nltk.corpus import stopwords
import keras.backend as K
from keras.layers import Input, Embedding, LSTM, Lambda
from keras.models import Model
from keras.optimizers import Adadelta
from random import sample
from keras.preprocessing.sequence import pad_sequences
import itertools
import numpy as np
def train(X_train, X_val, Y_train, Y_val, embedding, l, n_hidden = 50, batch = 64, epoch = 25, g = 1.25):
inputL, inputR = Input(shape=(l,), dtype='int32'), Input(shape=(l,), dtype='int32')
embedding_layer = Embedding(len(embedding), 300, weights=[embedding], input_length=l, trainable=False)
encodedL, encodedR = embedding_layer(inputL), embedding_layer(inputR)
lstm = LSTM(n_hidden)
outputL, outputR = lstm(encodedL), lstm(encodedR)
similarity = Lambda(function = lambda x: K.exp(-K.sum(K.abs(x[0]-x[1]), axis=1, keepdims = True)), output_shape = lambda x: (x[0][0], 1))([outputL, outputR])
model = Model([inputL, inputR], [similarity])
model.compile(loss = 'binary_crossentropy', optimizer = Adadelta(clipnorm = g), metrics = ['acc', TPR, PPV])
return model.fit([X_train['L'], X_train['R']], Y_train, batch_size = batch, epochs = epoch, validation_data=([X_val['L'], X_val['R']], Y_val))
def preprocess(filename, vsize = -1):
df = pd.read_csv(filename)
q1, q2, is_dupl = df.q1.tolist(), df.q2.tolist(), df.is_dupl.tolist()
size = len(q1)
if vsize < 0: vsize = size // 10
val, tpq, tnq, val_q1, val_q2, val_dupl = set(sample(range(size), vsize)), [], [], [], [], []
for i in range(size):
if i in val:
val_q1.append(q1[i])
val_q2.append(q2[i])
val_dupl.append(is_dupl[i])
elif is_dupl[i] == 0:
tnq.append((q1[i], q2[i]))
else:
tpq.append((q1[i], q2[i]))
npp, nnp = len(tpq), len(tnq)
N = min(npp, nnp)
if npp > nnp:
tpq = sample(tpq, N)
else:
tnq = sample(tnq, N)
train_q1, train_q2, train_dupl = [], [], []
for i in range(N):
train_q1.append(tpq[i][0])
train_q2.append(tpq[i][1])
train_dupl.append(1)
train_q1.append(tnq[i][0])
train_q2.append(tnq[i][1])
train_dupl.append(0)
maxlen, w2id, nw = 0, dict(), 1
w2v = KeyedVectors.load_word2vec_format('GoogleWord2Vec.bin', binary=True)
vocab, stops = w2v.vocab, set(stopwords.words('english'))
for ql in [train_q1, train_q2, val_q1, val_q2]:
for i in range(len(ql)):
wl, q2id = q2wl(ql[i]), []
for w in wl:
if w in stops and w not in vocab:
continue
if w in w2id:
q2id.append(w2id[w])
else:
q2id.append(nw)
w2id[w] = nw
nw += 1
ql[i], length = q2id, len(q2id)
if length > maxlen:
maxlen = length
embedding = 1 * np.random.randn(nw, 300)
for w in w2id:
if w in vocab:
embedding[w2id[w]] = w2v.word_vec(w)
del w2id, w2v, vocab
X_train, X_val = {'L': pd.Series(train_q1), 'R': pd.Series(train_q2)}, {'L': pd.Series(val_q1), 'R': pd.Series(val_q2)}
Y_train, Y_val = pd.DataFrame({'is_dupl': train_dupl}), | pd.DataFrame({'is_dupl': val_dupl}) | pandas.DataFrame |
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from GenNet_utils.hase.config import basedir, PYTHON_PATH
os.environ['HASEDIR'] = basedir
if PYTHON_PATH is not None:
for i in PYTHON_PATH: sys.path.insert(0, i)
from GenNet_utils.hase.hdgwas.tools import HaseAnalyser
import argparse
import pandas as pd
import numpy as np
from collections import OrderedDict
if __name__ == "__main__":
os.environ['HASEDIR'] = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
parser = argparse.ArgumentParser(description='Script analyse results of HASE')
parser.add_argument("-r", required=True, help="path to hase results")
parser.add_argument("-o", "--out", type=str, required=True, help="path to save result folder")
parser.add_argument("-df", type=float, default=None,
help="degree of freedom = ( #subjects in study - #covariates - 1 )")
parser.add_argument("-N", type=int, default=None, help="file number to read")
# TODO (low) add reference panel
args = parser.parse_args()
Analyser = HaseAnalyser()
print(args)
Analyser.DF = args.df
Analyser.result_path = args.r
Analyser.file_number = args.N
results = OrderedDict()
results['RSID'] = np.array([])
results['p_value'] = np.array([])
results['t-stat'] = np.array([])
results['phenotype'] = np.array([])
results['SE'] = np.array([])
results['MAF'] = np.array([])
results['BETA'] = np.array([])
while True:
Analyser.summary()
if Analyser.results is None:
break
print('Saving data...')
if not os.path.exists(os.path.join(args.out, 'results' + '.csv')):
df = pd.DataFrame.from_dict(results)
df.to_csv(os.path.join(args.out, 'results' + '.csv'), sep=" ", index=None)
df = | pd.DataFrame.from_dict(Analyser.results) | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
from shapely.geometry import box
import flopy
from sfrmaker.routing import find_path, make_graph
from gisutils import shp2df
from mfexport.budget_output import read_sfr_output
from .fileio import read_tables
from .routing import get_next_id_in_subset
from sfrmaker.fileio import load_modelgrid
def get_inflow_locations_from_parent_model(parent_reach_data, inset_reach_data,
inset_grid, active_area=None
):
"""Get places in an inset model SFR network where the parent SFR network crosses
the inset model boundary, using common line ID numbers from parent and inset reach datasets.
MF2005 or MF6 supported; if either dataset contains only reach numbers (is MODFLOW-6),
the reach numbers are used as segment numbers, with each segment only having one reach.
Parameters
----------
parent_reach_data : str (filepath) or DataFrame
SFR reach data for parent model. Must include columns:
line_id : int; unique identifier for hydrography line that each reach is based on
rno : int; unique identifier for each reach. Optional if iseg and ireach columns are included.
iseg : int; unique identifier for each segment. Optional if rno is included.
ireach : int; unique identifier for each reach. Optional if rno is included.
geometry : shapely.geometry object representing location of each reach
inset_reach_data : str (filepath) or DataFrame
SFR reach data for inset model. Same columns as parent_reach_data,
except a geometry column isn't needed. line_id values must correspond to
same source hydrography as those in parent_reach_data.
inset_grid : flopy.discretization.StructuredGrid instance describing model grid
Must be in same coordinate system as geometries in parent_reach_data.
Required only if active_area is None.
active_area : shapely.geometry.Polygon object
Describes the area of the inset model where SFR is applied. Used to find
inset reaches from parent model. Must be in same coordinate system as
geometries in parent_reach_data. Required only if inset_grid is None.
Returns
-------
locations : DataFrame
Columns:
parent_segment : parent model segment
parent_reach : parent model reach
parent_rno : parent model reach number
line_id : unique identifier for hydrography line that each reach is based on
"""
# spatial reference instances defining parent and inset grids
if isinstance(inset_grid, str):
grid = load_modelgrid(inset_grid)
elif isinstance(inset_grid, flopy.discretization.grid.Grid):
grid = inset_grid
else:
raise ValueError('Unrecognized input for inset_grid')
if active_area is None:
l, r, b, t = grid.extent
active_area = box(l, b, r, t)
# parent and inset reach data
if isinstance(parent_reach_data, str):
prd = shp2df(parent_reach_data)
elif isinstance(parent_reach_data, pd.DataFrame):
prd = parent_reach_data.copy()
else:
raise ValueError('Unrecognized input for parent_reach_data')
if 'rno' in prd.columns and 'iseg' not in prd.columns:
prd['iseg'] = prd['rno']
prd['ireach'] = 1
mustinclude_cols = {'line_id', 'rno', 'iseg', 'ireach', 'geometry'}
assert len(mustinclude_cols.intersection(prd.columns)) == len(mustinclude_cols)
if isinstance(inset_reach_data, str):
if inset_reach_data.endswith('.shp'):
ird = shp2df(inset_reach_data)
else:
ird = pd.read_csv(inset_reach_data)
elif isinstance(inset_reach_data, pd.DataFrame):
ird = inset_reach_data.copy()
else:
raise ValueError('Unrecognized input for inset_reach_data')
if 'rno' in ird.columns and 'iseg' not in ird.columns:
ird['iseg'] = ird['rno']
ird['ireach'] = 1
mustinclude_cols = {'line_id', 'rno', 'iseg', 'ireach'}
assert len(mustinclude_cols.intersection(ird.columns)) == len(mustinclude_cols)
graph = make_graph(ird.rno.values, ird.outreach.values, one_to_many=False)
# cull parent reach data to only lines that cross or are just upstream of inset boundary
buffered = active_area.buffer(5000, cap_style=2)
close = [g.intersects(buffered) for g in prd.geometry]
prd = prd.loc[close]
prd.index = prd.rno
boundary = active_area.exterior
inset_line_id_connections = {} # parent rno: inset line_id
for i, r in prd.iterrows():
if r.outreach not in prd.index:
continue
downstream_line = prd.loc[r.outreach, 'geometry']
upstream_line = prd.loc[prd.rno == r.outreach, 'geometry'].values[0]
intersects = r.geometry.intersects(boundary)
intersects_downstream = downstream_line.within(active_area)
# intersects_upstream = upstream_line.within(active_area)
in_inset_model = r.geometry.within(active_area)
if intersects_downstream:
if intersects:
# if not intersects_upstream: # exclude lines that originated within the model
# # lines that cross route to their counterpart in inset model
inset_line_id_connections[r.rno] = r.line_id
pass
elif not in_inset_model:
# lines that route to a line within the inset model
# route to that line's inset counterpart
inset_line_id_connections[r.rno] = prd.loc[r.outreach, 'line_id']
pass
prd = prd.loc[prd.rno.isin(inset_line_id_connections.keys())]
# parent rno lookup
parent_rno_lookup = {v: k for k, v in inset_line_id_connections.items()}
# inlet reaches in inset model
ird = ird.loc[ird.ireach == 1]
ird = ird.loc[ird.line_id.isin(inset_line_id_connections.values())]
# for each reach in ird (potential inset inlets)
# check that there isn't another inlet downstream
drop_reaches = []
for i, r in ird.iterrows():
path = find_path(graph, r.rno)
another_inlet_downstream = len(set(path[1:]).intersection(set(ird.rno))) > 0
if another_inlet_downstream:
drop_reaches.append(r.rno)
ird = ird.loc[~ird.rno.isin(drop_reaches)]
# cull parent flows to outlet reaches
iseg_ireach = zip(prd.iseg, prd.ireach)
parent_outlet_iseg_ireach = dict(zip(prd.rno, iseg_ireach))
df = ird[['line_id', 'name', 'rno', 'iseg', 'ireach']].copy()
df['parent_rno'] = [parent_rno_lookup[lid] for lid in df['line_id']]
df['parent_iseg'] = [parent_outlet_iseg_ireach[rno][0] for rno in df['parent_rno']]
df['parent_ireach'] = [parent_outlet_iseg_ireach[rno][1] for rno in df['parent_rno']]
return df.reset_index(drop=True)
def get_inflows_from_parent_model(parent_reach_data, inset_reach_data,
mf2005_parent_sfr_outputfile, mf6_parent_sfr_budget_file,
inset_grid, active_area=None):
"""Get places in an inset model SFR network where the parent SFR network crosses
the inset model boundary, using common line ID numbers from parent and inset reach datasets.
MF2005 or MF6 supported; if either dataset contains only reach numbers (is MODFLOW-6),
the reach numbers are used as segment numbers, with each segment only having one reach.
Parameters
----------
parent_reach_data : str (filepath) or DataFrame
SFR reach data for parent model. Must include columns:
line_id : int; unique identifier for hydrography line that each reach is based on
rno : int; unique identifier for each reach. Optional if iseg and ireach columns are included.
iseg : int; unique identifier for each segment. Optional if rno is included.
ireach : int; unique identifier for each reach. Optional if rno is included.
geometry : shapely.geometry object representing location of each reach
inset_reach_data : str (filepath) or DataFrame
SFR reach data for inset model. Same columns as parent_reach_data,
except a geometry column isn't needed. line_id values must correspond to
same source hydrography as those in parent_reach_data.
mf2005_parent_sfr_outputfile : str (filepath)
Modflow-2005 style SFR text file budget output.
mf6_parent_sfr_budget_file : str (filepath)
Modflow-6 style SFR binary budget output
inset_grid : flopy.discretization.StructuredGrid instance describing model grid
Must be in same coordinate system as geometries in parent_reach_data.
Required only if active_area is None.
active_area : shapely.geometry.Polygon object
Describes the area of the inset model where SFR is applied. Used to find
inset reaches from parent model. Must be in same coordinate system as
geometries in parent_reach_data. Required only if inset_grid is None.
Returns
-------
inflows : DataFrame
Columns:
parent_segment : parent model segment
parent_reach : parent model reach
parent_rno : parent model reach number
line_id : unique identifier for hydrography line that each reach is based on
"""
locations = get_inflow_locations_from_parent_model(parent_reach_data=parent_reach_data,
inset_reach_data=inset_reach_data,
inset_grid=inset_grid,
active_area=active_area)
df = read_sfr_output(mf2005_sfr_outputfile=mf2005_parent_sfr_outputfile,
mf6_sfr_stage_file=None,
mf6_sfr_budget_file=mf6_parent_sfr_budget_file,
model=None)
j=2
def add_to_perioddata(sfrdata, data, flowline_routing=None,
variable='inflow',
line_id_column=None,
rno_column=None,
period_column='per',
data_column='Q_avg',
one_inflow_per_path=False):
"""Add data to the period data table (sfrdata.period_data)
for a MODFLOW-6 style sfrpackage.
Parameters
----------
sfrdata : sfrmaker.SFRData instance
SFRData instance with reach_data table attribute. To add observations from x, y coordinates,
the reach_data table must have a geometry column with LineStrings representing each reach, or
an sfrlines_shapefile is required. Reach numbers are assumed to be in an 'rno' column.
data : DataFrame, path to csv file, or list of DataFrames or file paths
Table with information on the observation sites to be located. Must have
either reach numbers (rno_column), line_ids (line_id_column),
or x and y locations (x_column_in_data and y_column_in_data).
flowline_routing : dict
Optional dictionary of routing for source hydrography. Only needed
if locating by line_id, and SFR network is a subset of the full source
hydrography (i.e. some lines were dropped in the creation of the SFR packge,
or if the sites are inflow points corresponding to lines outside of the model perimeter).
In this case, observation points referenced to line_ids that are missing from the SFR
network are placed at the first reach corresponding to the next downstream line_id
that is represented in the SFR network. By default, None.
variable : str, optional
Modflow-6 period variable (see Modflow-6 Description of Input and Outpu), by default 'inflow'
line_id_column : str
Column in data matching observation sites to line_ids in the source hydrography data.
Either line_id_column or rno_column must be specified. By default, None
rno_column : str
Column in data matching observation sites to reach numbers in the SFR network. By default, None.
period_column : str, optional
Column with modflow stress period for each inflow value, by default 'per', by default, 'per'.
data_column : str, optional
Column with flow values, by default 'Q_avg'
one_inflow_per_path : bool, optional
Limit inflows to one per (headwater to outlet) routing path, choosing the inflow location
that is furthest downstream. By default, False.
Returns
-------
Updates the sfrdata.perioddata DataFrame.
"""
sfrd = sfrdata
# allow input via a list of tables or single table
data = read_tables(data)
# cull data to valid periods
data = data.loc[data[period_column] >= 0].copy()
# map NHDPlus COMIDs to reach numbers
if flowline_routing is not None:
assert line_id_column in data.columns, \
"Data need an id column so {} locations can be mapped to reach numbers".format(variable)
# replace ids that are not keys (those outside the network) with zeros
# (0 is the exit condition for finding paths in get_next_id_in_subset)
flowline_routing = {k: v if v in flowline_routing.keys() else 0 for k, v in flowline_routing.items()}
rno_column = 'rno'
r1 = sfrd.reach_data.loc[sfrd.reach_data.ireach == 1]
line_id_rno_mapping = dict(zip(r1['line_id'], r1['rno']))
line_ids = get_next_id_in_subset(r1.line_id, flowline_routing,
data[line_id_column])
data['line_id_in_model'] = line_ids
data[rno_column] = [line_id_rno_mapping[lid] for lid in line_ids]
else:
assert rno_column in data.columns, \
"Data to add need reach number or flowline routing information is needed."
# check for duplicate inflows in same path
if variable == 'inflow' and one_inflow_per_path:
line_ids = set(data[line_id_column])
drop = set()
dropped_line_info_file = 'dropped_inflows_locations.csv'
for lid in line_ids:
path = find_path(flowline_routing, start=lid)
duplicated = set(path[1:]).intersection(line_ids)
if len(duplicated) > 0:
drop.add(lid)
txt = ('warning: {}: {} is upstream '
'of the following line_ids:\n{}\n'
'see {} for details.').format(line_id_column,
lid, duplicated,
dropped_line_info_file
)
print(txt)
if len(drop) > 0:
data.loc[data[line_id_column].isin(drop)].to_csv(dropped_line_info_file, index=False)
data = data.loc[~data[line_id_column].isin(drop)]
# add inflows to period_data
period_data = sfrd.period_data
period_data['rno'] = data[rno_column]
period_data['per'] = data[period_column]
period_data[variable] = data[data_column]
period_data['specified_line_id'] = data[line_id_column]
other_columns = [c for c in data.columns
if c not in {rno_column, period_column, data_column, line_id_column}]
for c in other_columns:
period_data[c] = data[c]
def add_to_segment_data(sfrdata, data, flowline_routing=None,
variable='flow',
line_id_column=None,
segment_column=None,
period_column='per',
data_column='Q_avg'):
"""Like add_to_perioddata, but for MODFLOW-2005.
"""
sfrd = sfrdata
# cull data to valid periods
data = data.loc[data[period_column] >= 0].copy()
# map NHDPlus COMIDs to reach numbers
if flowline_routing is not None:
assert line_id_column in data.columns, \
"Data need an id column so {} locations can be mapped to reach numbers".format(variable)
flowline_routing = {k: v if v in flowline_routing.keys() else 0 for k, v in flowline_routing.items()}
segment_column = 'segment'
r1 = sfrd.reach_data.loc[sfrd.reach_data.ireach == 1]
line_id_iseg_mapping = dict(zip(r1['line_id'], r1['iseg']))
line_ids = get_next_id_in_subset(r1.line_id, flowline_routing,
data[line_id_column])
data[segment_column] = [line_id_iseg_mapping[lid] for lid in line_ids]
else:
assert segment_column in data.columns, \
"Data to add need segment number or flowline routing information is needed."
# check for duplicate inflows in same path
if variable == 'flow':
line_ids = set(data[line_id_column])
drop = set()
dropped_line_info_file = 'dropped_inflows_locations.csv'
for lid in line_ids:
path = find_path(flowline_routing, start=lid)
duplicated = set(path[1:]).intersection(line_ids)
if len(duplicated) > 0:
drop.add(lid)
txt = ('warning: {}: {} is upstream '
'of the following line_ids:\n{}\n'
'see {} for details.').format(line_id_column,
lid, duplicated,
dropped_line_info_file
)
print(txt)
if len(drop) > 0:
data.loc[data[line_id_column].isin(drop)].to_csv(dropped_line_info_file, index=False)
data = data.loc[~data[line_id_column].isin(drop)]
# rename columns in data to be added to same names as segment_data
data.rename(columns={period_column: 'per',
segment_column: 'nseg',
data_column: variable},
inplace=True)
# update existing segment data
sfrd.segment_data.index = pd.MultiIndex.from_tuples(zip(sfrd.segment_data.per, sfrd.segment_data.nseg),
names=['per', 'nseg'])
loc = list(zip(data.per, data.nseg))
data.index = pd.MultiIndex.from_tuples(loc, names=['per', 'nseg'])
replace = sorted(list(set(data.index).intersection(sfrd.segment_data.index)))
add = sorted(list(set(data.index).difference(sfrd.segment_data.index)))
sfrd.segment_data.loc[replace, variable] = data.loc[replace, variable]
# concat on the added data (create additional rows in segment_data table)
to_concat = [sfrd.segment_data]
period_groups = data.loc[add, ['per', 'nseg', variable]].reset_index(drop=True).groupby('per')
for per, group in period_groups:
# start with existing data (row) for that segment
df = sfrd.segment_data.loc[(slice(None, None), group.nseg), :].copy()
df['per'] = per
df.index = zip(df.per, df.nseg)
df[variable] = group[variable].values
to_concat.append(df)
sfrd.segment_data = | pd.concat(to_concat) | pandas.concat |
# -*- coding: utf-8 -*-
#%% NumPyの読み込み
import numpy as np
# SciPyのstatsモジュールの読み込み
import scipy.stats as st
# Pandasの読み込み
import pandas as pd
# PyMCの読み込み
import pymc3 as pm
# MatplotlibのPyplotモジュールの読み込み
import matplotlib.pyplot as plt
# tqdmからプログレスバーの関数を読み込む
from tqdm import trange
# 日本語フォントの設定
from matplotlib.font_manager import FontProperties
import sys
if sys.platform.startswith('win'):
FontPath = 'C:\\Windows\\Fonts\\meiryo.ttc'
elif sys.platform.startswith('darwin'):
FontPath = '/System/Library/Fonts/ヒラギノ角ゴシック W4.ttc'
elif sys.platform.startswith('linux'):
FontPath = '/usr/share/fonts/truetype/takao-gothic/TakaoPGothic.ttf'
else:
print('このPythonコードが対応していないOSを使用しています.')
sys.exit()
jpfont = FontProperties(fname=FontPath)
#%% ギブズ・サンプラーによる正規分布の平均と分散に関するベイズ推論
# 正規分布の平均と分散のギブズ・サンプラー
def gibbs_gaussian(data, iterations, mu0, tau0, nu0, lam0):
"""
入力
data: データ
iterations: 反復回数
mu0: 平均の事前分布(正規分布)の平均
tau0: 平均の事前分布(正規分布)の標準偏差
nu0: 分散の事前分布(逆ガンマ分布)の形状パラメータ
lam0: 分散の事前分布(逆ガンマ分布)の尺度パラメータ
出力
runs: モンテカルロ標本
"""
n = data.size
sum_data = data.sum()
mean_data = sum_data / n
variance_data = data.var()
inv_tau02 = 1.0 / tau0**2
mu0_tau02 = mu0 * inv_tau02
a = 0.5 * (n + nu0)
c = n * variance_data + lam0
sigma2 = variance_data
runs = np.empty((iterations, 2))
for idx in trange(iterations):
variance_mu = 1.0 / (n / sigma2 + inv_tau02)
mean_mu = variance_mu * (sum_data / sigma2 + mu0_tau02)
mu = st.norm.rvs(loc=mean_mu, scale=np.sqrt(variance_mu))
b = 0.5 * (n * (mu - mean_data)**2 + c)
sigma2 = st.invgamma.rvs(a, scale=b)
runs[idx, 0] = mu
runs[idx, 1] = sigma2
return runs
# モンテカルロ標本からの事後統計量の計算
def mcmc_stats(runs, burnin, prob, batch):
"""
入力
runs: モンテカルロ標本
burnin: バーンインの回数
prob: 区間確率 (0 < prob < 1)
batch: 乱数系列の分割数
出力
事後統計量のデータフレーム
"""
traces = runs[burnin:, :]
n = traces.shape[0] // batch
k = traces.shape[1]
alpha = 100 * (1.0 - prob)
post_mean = np.mean(traces, axis=0)
post_median = np.median(traces, axis=0)
post_sd = np.std(traces, axis=0)
mc_err = [pm.mcse(traces[:, i].reshape((n, batch), order='F')).item(0) \
for i in range(k)]
ci_lower = np.percentile(traces, 0.5 * alpha, axis=0)
ci_upper = np.percentile(traces, 100 - 0.5 * alpha, axis=0)
hpdi = pm.hpd(traces, 1.0 - prob)
rhat = [pm.rhat(traces[:, i].reshape((n, batch), order='F')).item(0) \
for i in range(k)]
stats = np.vstack((post_mean, post_median, post_sd, mc_err,
ci_lower, ci_upper, hpdi.T, rhat)).T
stats_string = ['平均', '中央値', '標準偏差', '近似誤差',
'信用区間(下限)', '信用区間(上限)',
'HPDI(下限)', 'HPDI(上限)', '$\\hat R$']
param_string = ['平均 $\\mu$', '分散 $\\sigma^2$']
return | pd.DataFrame(stats, index=param_string, columns=stats_string) | pandas.DataFrame |
# encoding: utf-8
from opendatatools.common import RestAgent
from opendatatools.common import date_convert, remove_non_numerical
from bs4 import BeautifulSoup
import datetime
import json
import pandas as pd
import io
from opendatatools.futures.futures_agent import _concat_df
import zipfile
class SHExAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
headers = {
"Accept": '*/*',
'Referer': 'http://www.sse.com.cn/market/sseindex/indexlist/',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
}
self.add_headers(headers)
def get_index_list(self):
url = 'http://query.sse.com.cn/commonSoaQuery.do'
data = {
'sqlId': 'DB_SZZSLB_ZSLB',
}
response = self.do_request(url, data)
rsp = json.loads(response)
if 'pageHelp' in rsp:
data = rsp['pageHelp']['data']
return pd.DataFrame(data)
else:
return None
def get_index_component(self, index):
url = 'http://query.sse.com.cn/commonSoaQuery.do'
data = {
'sqlId': 'DB_SZZSLB_CFGLB',
'indexCode' : index,
}
response = self.do_request(url, data)
rsp = json.loads(response)
if 'pageHelp' in rsp:
data = rsp['pageHelp']['data']
return pd.DataFrame(data)
else:
return None
def get_dividend(self, code):
url = 'http://query.sse.com.cn/commonQuery.do'
data = {
'sqlId' : 'COMMON_SSE_GP_SJTJ_FHSG_AGFH_L_NEW',
'security_code_a' : code,
}
response = self.do_request(url, data)
rsp = json.loads(response)
if 'result' in rsp:
data = rsp['result']
return | pd.DataFrame(data) | pandas.DataFrame |
"""
This module merges temperature, humidity, and influenza data together
"""
import pandas as pd
import ast
__author__ = '<NAME>'
__license__ = 'MIT'
__status__ = 'release'
__url__ = 'https://github.com/caominhduy/TH-Flu-Modulation'
__version__ = '1.0.0'
def merge_flu(path='data/epidemiology/processed_CDC_2008_2021.csv'):
df = pd.read_csv(path, low_memory=False)
df['week'] = df['week'].astype('int')
df['year'] = df['year'].astype('int')
cols = ['state', 'week', 'year', 'level']
df = df.reindex(columns=cols)
return df
def merge_weather():
with open('data/geodata/state_abbr.txt', 'r') as f:
contents = f.read()
state_abbr_dict = ast.literal_eval(contents)
states = list(state_abbr_dict.values())
df_temp = pd.DataFrame(columns=['week', 'temp', 'state', 'year'])
df_humid = pd.DataFrame(columns=['week', 'humid', 'state', 'year'])
for year in list(range(2008, 2020)):
y = str(year)
df = pd.read_csv('data/weather/' + y + '-temp.csv')
temps = df[states[0]]
weeks = df['week']
snames = pd.Series(states)
snames = snames.repeat(len(weeks)).reset_index(drop=True)
for s in states[1:]:
temps = temps.append(df[s]).reset_index(drop=True)
weeks = weeks.append(df['week']).reset_index(drop=True)
frames = {'week': weeks, 'temp': temps, 'state': snames}
df2 = | pd.DataFrame(frames) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import preprocessing
matplotlib.use("Agg")
import datetime
import torch
from finrl.config import config
from finrl.marketdata.yahoodownloader import YahooDownloader
from finrl.preprocessing.preprocessors import FeatureEngineer
from finrl.preprocessing.data import data_split
from finrl.env.multi_env_stocktrading import StockTradingEnv
from finrl.lxcalgorithms.gateway import Gateway
from finrl.model.multi_models import DRLAgent
from finrl.trade.backtest import backtest_stats as BackTestStats
def getEveryDay(begin_date,end_date):
# 前闭后闭
date_list = []
begin_date = datetime.datetime.strptime(begin_date, "%Y-%m-%d")
end_date = datetime.datetime.strptime(end_date,"%Y-%m-%d")
while begin_date <= end_date:
date_str = begin_date.strftime("%Y-%m-%d")
date_list.append(date_str)
begin_date += datetime.timedelta(days=1)
return date_list
def train_one():
"""
train an agent
"""
print("==============Start Fetching Data===========")
print('GPU is :', torch.cuda.is_available())
start_date = config.START_DATE,
end_date = config.START_TRADE_DATE,
start_date = start_date[0]
end_date = end_date[0]
date_list = getEveryDay(start_date,end_date)
food = Gateway()
now = datetime.datetime.now().strftime("%Y%m%d-%Hh%M")
for i in range(0,1):
df = YahooDownloader(
start_date=start_date,
end_date=end_date,
ticker_list=config.DOW_30_TICKER,
).lxc_fetch_data()
print("==============Start Feature Engineering===========")
fe = FeatureEngineer(
use_technical_indicator=True,
tech_indicator_list=config.TECHNICAL_INDICATORS_LIST,
use_turbulence=True,
user_defined_feature=False,
)
processed = fe.preprocess_data(df)
train = data_split(processed, start_date, end_date)
stock_dimension = len(train.tic.unique())
#print("train.tic.unique() is:")
#print(train.tic.unique())
print('stock_dimension is:', stock_dimension)
state_space = (
1
+ 2 * stock_dimension
+ len(config.TECHNICAL_INDICATORS_LIST) * stock_dimension
)
env_kwargs = {
"hmax": 100,
"initial_amount": 1000000,
"buy_cost_pct": 0.001,
"sell_cost_pct": 0.001,
"state_space": state_space,
"stock_dim": stock_dimension,
"tech_indicator_list": config.TECHNICAL_INDICATORS_LIST,
# "action_space": stock_dimension,
"action_space": 1,
"reward_scaling": 1e-4
}
e_train_gym = StockTradingEnv(df=train, **env_kwargs)
env_train, _ = e_train_gym.get_sb_env()
agent = DRLAgent(env=env_train)
print("==============Model Training===========")
print("start training ddpg model")
##################################################################################################a2c
print("start pre_training model")
multi_number = stock_dimension
#temp = agent.get_model(model_name="a2c", lxc_stock_number=0)
#agent.train_model(model=temp, tb_log_name="a2c", total_timesteps=1000)
#print("e_train_gym.normal_high is:", e_train_gym.normal_high)
#print("e_train_gym.normal_low is:", e_train_gym.normal_low)
#e_trade_gym.normal_high = e_train_gym.normal_high
#e_trade_gym.normal_low = e_train_gym.normal_low
print("start main_training model")
for j in range(0, multi_number):
if(j+1>food.agents_number):
model_a2c = agent.get_model(model_name="a2c", lxc_stock_number=j,all_stock_number = multi_number)
model_a2c.all_stock_number = multi_number
model_a2c.env.reset()
if(j!=0):
trained_a2c = agent.get_pre_model(model=model_a2c, tb_log_name="a2c", total_timesteps=1000,
lxcName="lxcMulti" + str(j - 1))
trained_a2c.lxc_stock_number = j
trained_a2c.all_stock_number = multi_number
trained_a2c = trained_a2c.online_learning(total_timesteps=1000, tb_log_name="a2c")
agent.save_pre_model(model=trained_a2c, tb_log_name="a2c", total_timesteps=1000,
lxcName="lxcMulti" + str(j))
else:
trained_a2c = agent.train_lxc_model(
# model=model_a2c, tb_log_name="a2c", total_timesteps=80000,lxcType=1,lxcName="lxc2"
model=model_a2c, tb_log_name="a2c", total_timesteps=1000, lxcType= None, lxcName="lxcMulti" + str(j)
)
food.agents.append(trained_a2c)
print(j,"'s model is trained done")
else:
print("here!!!")
food.agents[j].all_stock_number = multi_number
food.agents[j].env = env_train
food.agents[j] = food.agents[j].online_learning(total_timesteps=1000, tb_log_name="a2c")
env_train.reset()
food.agents_number = multi_number
# names=["date","open","high","low","close","volume","tic","day",]
# df = pd.read_csv("./" + config.DATA_SAVE_DIR + "/" + "20210315-07h382" + ".csv",index_col=0)
print('GPU is :', torch.cuda.is_available())
#########################################################################
print("==============Start Trading===========")
start_date = config.START_TRADE_DATE,
end_date = config.END_DATE,
df_account_value = | pd.DataFrame() | pandas.DataFrame |
from movie import app
from flask import render_template,flash
from movie.forms import MovieForm
@app.route('/',methods=['GET','POST'])
def movierec():
form=MovieForm()
if form.validate_on_submit():
import pandas as pd
import numpy as np
ratings=pd.read_csv('ratings.csv')
movies=pd.read_csv('movies.csv')
movie_data=pd.merge(ratings,movies,on='movieId')
movie_data.groupby('title')['rating'].mean().sort_values(ascending=False)
movie_data.groupby('title')['rating'].count().sort_values(ascending=False)
rating_mean=pd.DataFrame(movie_data.groupby('title')['rating'].mean())
rating_mean['rating_count']=pd.DataFrame(movie_data.groupby('title')['rating'].count())
user_movie_rating=movie_data.pivot_table(index='userId',columns='title',values='rating')
x=form.moviename.data
x=x.split()
for i in range(0,len(x)):
x[i]=x[i].capitalize()
x=' '.join(x)
flag=-1
for i in range(0,len(movies)):
if x in movies.iloc[i,1]:
x=movies.iloc[i,1]
flag=1
if flag==-1:
flash('There was some error. The admin has been notified. Please try another movie. Sorry for the inconvenience!!')
else:
movie=x
movie_ratings=user_movie_rating[movie]
movies_like_movie=user_movie_rating.corrwith(movie_ratings)
corr_movie= | pd.DataFrame(movies_like_movie,columns=['Correlation']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from unittest import TestCase
import pandas as pd
from alphaware.base import (Factor,
FactorContainer)
from alphaware.enums import (FactorType,
OutputDataFormat,
FreqType,
FactorNormType)
from alphaware.analyzer import FactorIC
from pandas.util.testing import assert_frame_equal
class TestFactorIC(TestCase):
def test_factor_ic(self):
index = pd.MultiIndex.from_product([['2014-01-30', '2014-02-28', '2014-03-31'], ['001', '002']],
names=['trade_date', 'ticker'])
data1 = pd.DataFrame(index=index, data=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
factor_test1 = Factor(data=data1, name='alpha1')
factor_test3 = Factor(data=data1, name='alpha2')
test2_property = {'type': FactorType.FWD_RETURN,
'data_format': OutputDataFormat.MULTI_INDEX_DF,
'norm_type': FactorNormType.Null,
'freq': FreqType.EOM}
data2 = pd.DataFrame(index=index, data=[3.0, 2.0, 3.0, 7.0, 8.0, 9.0])
factor_test2 = Factor(data=data2, name='fwd_return1', property_dict=test2_property)
factor_test4 = Factor(data=data2, name='fwd_return2', property_dict=test2_property)
fc = FactorContainer('2014-01-30', '2014-02-28', [factor_test1, factor_test2, factor_test3, factor_test4])
t = FactorIC()
calculate = t.predict(fc)
expected = pd.DataFrame(data=[[-1.0, -1.0, -1.0, -1.0], [1.0, 1.0, 1.0, 1.0]],
index=pd.DatetimeIndex(['2014-01-30', '2014-02-28'], freq=None),
columns=['alpha1_fwd_return1', 'alpha2_fwd_return1', 'alpha1_fwd_return2',
'alpha2_fwd_return2'])
| assert_frame_equal(calculate, expected) | pandas.util.testing.assert_frame_equal |
import pandas as pd
import matplotlib as mpl
import numpy as np
from sklearn import metrics
import itertools
import warnings
from dateutil.relativedelta import relativedelta
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import ticker
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
# plt.style.use('ggplot')
sns.set_theme(style="darkgrid")
font = {'size' : 12}
mpl.rc('font', **font)
mpl.rc('figure', max_open_warning = 0)
pd.set_option('display.max_columns',None)
pd.set_option('display.max_rows',25)
# only display whole years in figures
years = mdates.YearLocator()
years_fmt = mdates.DateFormatter('%Y')
print('Functions loaded.')
################################################################################
def melt_data(df):
'''
Takes in a Zillow Housing Data File (ZHVI) as a DataFrame in wide format
and returns a melted DataFrame
'''
melted = pd.melt(df, id_vars=['RegionID', 'RegionName', 'City', 'State', 'StateName', 'Metro', 'CountyName', 'SizeRank', 'RegionType'], var_name='date')
melted['date'] = pd.to_datetime(melted['date'], infer_datetime_format=True)
melted = melted.dropna(subset=['value'])
return melted
def visualize_data(df, sf_all, bedrooms):
fig, ax = plt.subplots(figsize=(15,10))
ax.set_title(f'{bedrooms}-Bedroom Home Values in San Franciso by Zip Code', size=24)
sns.lineplot(data=df, x=df.date, y=df.value, ax=ax, hue='zipcode', style='zipcode')
sns.lineplot(data=sf_all, x=sf_all.index, y=sf_all.value, ax=ax, color = 'b', label='all')
ax.set_xlabel('Year', size=20)
ax.set_ylabel('Home Value (USD)', size=20)
ax.set_xlim(pd.Timestamp('1996'), pd.Timestamp('2022-05-31'))
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
ax.set_yticks(np.linspace(1e5,1.5e6,15))
ax.set_ylim((1e5, 1.5e6))
ax.get_yaxis().set_major_formatter(ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.savefig(f'images/{bedrooms}_bdrm_home_values.png')
def create_df_dict(df):
zipcodes = list(set(df.zipcode))
keys = [zipcode for zipcode in map(str,zipcodes)]
data_list = []
for key in keys:
new_df = df.copy()[df.zipcode == int(key)]
new_df.drop('zipcode', inplace=True, axis=1)
new_df.columns = ['date', 'value']
new_df.date = pd.to_datetime(new_df.date)
new_df.set_index('date', inplace=True)
new_df = new_df.asfreq('M')
data_list.append(new_df)
df_dict = dict(zip(keys, data_list))
return df_dict
def test_stationarity(df_all, diffs=0):
if diffs == 2:
dftest = adfuller(df_all.diff().diff().dropna())
elif diffs == 1:
dftest = adfuller(df_all.diff().dropna())
else:
dftest = adfuller(df_all)
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic',
'p-value', '#Lags Used',
'Number of Observations Used'])
for key, value in dftest[4].items():
dfoutput['Critical Value (%s)' %key] = value
print (dfoutput)
def test_stationarity_all_zips(df_dict, diffs=0):
for zipcode, df in df_dict.items():
if diffs == 2:
dftest = adfuller(df.diff().diff().dropna())
elif diffs == 1:
dftest = adfuller(df.diff().dropna())
else:
dftest = adfuller(df)
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic',
'p-value', '#Lags Used',
'Number of Observations Used'])
for key, value in dftest[4].items():
dfoutput['Critical Value (%s)' %key] = value
print(dfoutput[1])
def plot_pacf_housing(df_all, bedrooms):
pacf_fig, ax = plt.subplots(1, 2, figsize=(12, 6))
pacf_fig.suptitle(f'Partial Autocorrelations of {bedrooms}-Bedroom Time Series for Entire San Francisco Data Set', fontsize=18)
plot_pacf(df_all, ax=ax[0])
ax[0].set_title('Undifferenced PACF', size=14)
ax[0].set_xlabel('Lags', size=14)
ax[0].set_ylabel('PACF', size=14)
plot_pacf(df_all.diff().dropna(), ax=ax[1])
ax[1].set_title('Differenced PACF', size=14)
ax[1].set_xlabel('Lags', size=14)
ax[1].set_ylabel('PACF', size=14)
pacf_fig.tight_layout()
pacf_fig.subplots_adjust(top=0.9)
plt.savefig(f'images/{bedrooms}_bdrm_PACF.png')
def plot_acf_housing(df_all, bedrooms):
acf_fig, ax = plt.subplots(1, 3, figsize=(18, 6))
acf_fig.suptitle(f'Autocorrelations of {bedrooms}-Bedroom Time Series for Entire San Francisco Data Set', fontsize=18)
plot_acf(df_all, ax=ax[0])
ax[0].set_title('Undifferenced ACF', size=14)
ax[0].set_xlabel('Lags', size=14)
ax[0].set_ylabel('ACF', size=14)
plot_acf(df_all.diff().dropna(), ax=ax[1])
ax[1].set_title('Once-Differenced ACF', size=14)
ax[1].set_xlabel('Lags', size=14)
ax[1].set_ylabel('ACF', size=14)
plot_acf(df_all.diff().diff().dropna(), ax=ax[2])
ax[2].set_title('Twice-Differenced ACF', size=14)
ax[2].set_xlabel('Lags', size=14)
ax[2].set_ylabel('ACF', size=14)
acf_fig.tight_layout()
acf_fig.subplots_adjust(top=0.9)
plt.savefig(f'images/{bedrooms}_bdrm_ACF.png')
def plot_seasonal_decomposition(df_all, bedrooms):
decomp = seasonal_decompose(df_all, period=12)
dc_obs = decomp.observed
dc_trend = decomp.trend
dc_seas = decomp.seasonal
dc_resid = decomp.resid
dc_df = pd.DataFrame({"observed": dc_obs, "trend": dc_trend,
"seasonal": dc_seas, "residual": dc_resid})
start = dc_df.iloc[:, 0].index[0]
end = dc_df.iloc[:, 0].index[-1] + relativedelta(months=+15) + relativedelta(day=31)
decomp_fig, axes = plt.subplots(4, 1, figsize=(15, 15))
for i, ax in enumerate(axes):
ax.plot(dc_df.iloc[:, i])
ax.set_xlim(start, end)
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
ax.set_ylabel(dc_df.iloc[:, i].name)
if i != 2:
ax.get_yaxis().set_major_formatter(ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
plt.setp(ax.xaxis.get_majorticklabels(), ha="right", rotation=45, rotation_mode="anchor")
decomp_fig.suptitle(
f'Seasonal Decomposition of {bedrooms}-Bedroom Time Series of San Francisco Home Values (Mean)', fontsize=24)
decomp_fig.tight_layout()
decomp_fig.subplots_adjust(top=0.94)
plt.savefig(f'images/{bedrooms}_bdrm_seasonal_decomp.png')
def train_test_split_housing(df_dict, split=84):
print(f'Using a {split}/{100-split} train-test split...')
cutoff = [round((split/100)*len(df)) for zipcode, df in df_dict.items()]
train_dict_list = [df_dict[i][:cutoff[count]] for count, i in enumerate(list(df_dict.keys()))]
train_dict = dict(zip(list(df_dict.keys()), train_dict_list))
test_dict_list = [df_dict[i][cutoff[count]:] for count, i in enumerate(list(df_dict.keys()))]
test_dict = dict(zip(list(df_dict.keys()), test_dict_list))
return train_dict, test_dict
def gridsearch_SARIMAX(train_dict, seas = 12, p_min=2, p_max=2, q_min=0, q_max=0, d_min=1, d_max=1,
s_p_min=2, s_p_max=2, s_q_min=0, s_q_max=0, s_d_min=1, s_d_max=1, verbose=True):
p = range(p_min, p_max+1)
q = range(q_min, q_max+1)
d = range(d_min, d_max+1)
s_p = range(s_p_min, s_p_max+1)
s_q = range(s_q_min, s_q_max+1)
s_d = range(s_d_min, s_d_max+1)
pdq = list(itertools.product(p, d, q))
seasonal_pdq = [(x[0], x[1], x[2], seas) for x in list(itertools.product(s_p, s_d, s_q))]
if verbose:
print('Parameters for SARIMAX grid search...')
for i in pdq:
for s in seasonal_pdq:
print('SARIMAX: {} x {}'.format(i, s))
zipcodes = []
param_list = []
param_seasonal_list = []
aic_list = []
for zipcode, train in train_dict.items():
for param in pdq:
for param_seasonal in seasonal_pdq:
mod = SARIMAX(train,
order=param,
seasonal_order=param_seasonal,
enforce_stationarity=False,
enforce_invertibility=False)
zipcodes.append(zipcode[-5:])
param_list.append(param)
param_seasonal_list.append(param_seasonal)
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
aic = mod.fit(maxiter=1000).aic
except Warning as e:
continue
aic_list.append(aic)
if verbose:
print(param,param_seasonal)
print(f'Zip Code {zipcode} | AIC: {aic}')
else:
print('-', end='')
print('\nCompleted.')
return zipcodes, param_list, param_seasonal_list, aic_list
def get_best_params(zipcodes, param_list, param_seasonal_list, aic_list, bedrooms):
# intialize list of model params
model_data = {'zipcode': zipcodes,
'param': param_list,
'param_seasonal': param_seasonal_list,
'aic': aic_list
}
# Create model params DataFrames
sarimax_details_df = pd.DataFrame(model_data)
# print(sarimax_details_df.shape)
best_params_df = sarimax_details_df.loc[sarimax_details_df.groupby('zipcode')['aic'].idxmin()]
best_params_df.set_index('zipcode', inplace=True)
print(best_params_df)
best_params_df.to_csv(f'data/{bedrooms}_bdrm_best_params.csv')
return best_params_df
def evaluate_model(train_dict, test_dict, model_best_df):
predict_dict = {}
cat_predict_dict = train_dict.copy()
for _ in range(5):
for zipcode, df in cat_predict_dict.items():
if cat_predict_dict[zipcode].index[-1] >= | pd.to_datetime('2021-02-28') | pandas.to_datetime |
from seedsKmeans import SEEDS
import pandas as pd
import BaseDados as BD
import numpy as np
tamanhos= [0.05, 0.06, 0.07, 0.08, 0.09, 0.1]
resultado = []
for tam in tamanhos:
X,Y = BD.base_qualquer('D:/basedados/vinhos.csv')
Y -= 1
dados = pd.DataFrame(X, columns=np.arange(np.size(X, axis=1)))
dados['classe'] = Y
acuracia = []
recall = []
precisao = []
fscore = []
for j in range(0, 10):
print('Tamanho:',tam,' - Iteração ', j)
kmeans = SEEDS(dados,3)
acuracia.append(kmeans.acuracia)
recall.append(kmeans.recall)
precisao.append(kmeans.precisao)
fscore.append(kmeans.f1)
resultado.append([np.mean(acuracia), np.std(acuracia), np.mean(recall), np.mean(precisao), np.mean(fscore)])
colunas = ['ACURACIA', 'STD', 'RECALL', 'PRECISAO', 'F-SCORE']
dados = | pd.DataFrame(resultado, columns=colunas) | pandas.DataFrame |
import pandas as pd
import dash
from dash.dependencies import Input, Output, State, MATCH, ALL
import dash_core_components as dcc
import dash_html_components as html
from dash.exceptions import PreventUpdate
import dash_bootstrap_components as dbc
import dash_table
import plotly.graph_objs as go
from threading import Thread
import queue
import serial
import serial.tools.list_ports
import time
from pathlib import Path
import json
import sqlite3
from datetime import datetime
# globals... yuk
FILE_DIR = ''
APP_ID = 'serial_data'
Q = queue.Queue()
SERIAL_THREAD = None
class SerialThread(Thread):
def __init__(self, port, baud=115200):
super().__init__()
self.port = port
self._isRunning = True
self.ser_obj = serial.Serial(port=port,
baudrate=baud,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=None)
def run(self):
while self._isRunning:
try:
while self.ser_obj.in_waiting > 2:
try:
line = self.ser_obj.readline()
split_line = line.strip().decode("utf-8")
Q.put(split_line)
except:
continue
except:
continue
def stop(self):
self._isRunning = False
time.sleep(0.25)
self.ser_obj.close()
return None
# layout
layout = dbc.Container([
dbc.Row(
dbc.Col([
dcc.Store(id=f'{APP_ID}_store'),
dcc.Interval(id=f'{APP_ID}_interval',
interval=2000,
n_intervals=0,
disabled=True),
html.H2('Serial Data Plotter'),
html.P('This tests plotting data from serial (arduino) using a background thread to collect the data and send it to a queue. '
'Data is retrieved from the queue and stored in the browser as well as written to a file')
])
),
dbc.Row([
dbc.Col(
dbc.FormGroup([
dbc.Button('COM Ports (refresh)', id=f'{APP_ID}_com_button'),
dcc.Dropdown(id=f'{APP_ID}_com_dropdown',
placeholder='Select COM port',
options=[],
multi=False),
dbc.Textarea(id=f'{APP_ID}_com_desc_label', disabled=True )
]),
width=4
),
dbc.Col(
dbc.FormGroup([
dbc.Label('Headers'),
dbc.Button('Initialize Headers', id=f'{APP_ID}_init_header_button', block=True),
dash_table.DataTable(
id=f'{APP_ID}_header_dt',
columns=[
{"name": 'Position', "id": 'pos', "type": 'numeric', 'editable': False},
{"name": 'Name', "id": 'name', "type": 'text', 'editable': False},
{"name": 'Format', "id": 'fmt', "type": 'text', "presentation": 'dropdown'}
],
data=None,
editable=True,
row_deletable=False,
dropdown={
'fmt': {
'options': [
{'label': i, 'value': i} for i in ['text', 'real', 'integer']
],
},
}
),
]),
width=4
),
]),
dbc.Row(
dbc.Col([
dbc.Toast(
children=[],
id=f'{APP_ID}_header_toast',
header="Initialize Headers",
icon="danger",
dismissable=True,
is_open=False
),
],
width="auto"
),
),
dbc.Row([
dbc.Col(
dbc.FormGroup([
dbc.Label('Filename'),
dbc.Input(placeholder='filename',
id=f'{APP_ID}_filename_input',
type='text',
value=f'data/my_data_{datetime.now().strftime("%m.%d.%Y.%H.%M.%S")}.db')
])
)
]),
dbc.ButtonGroup([
dbc.Button('Start', id=f'{APP_ID}_start_button', n_clicks=0, disabled=True, size='lg', color='secondary'),
dbc.Button('Stop', id=f'{APP_ID}_stop_button', n_clicks=0, disabled=True, size='lg', color='secondary'),
dbc.Button('Clear', id=f'{APP_ID}_clear_button', n_clicks=0, disabled=True, size='lg'),
dbc.Button('Download Data', id=f'{APP_ID}_download_button', n_clicks=0, disabled=True, size='lg'),
],
className='mt-2 mb-2'
),
html.H2('Data Readouts'),
dcc.Dropdown(
id=f'{APP_ID}_readouts_dropdown',
multi=True,
options=[],
value=None
),
dbc.CardDeck(
id=f'{APP_ID}_readouts_card_deck'
),
html.H2('Data Plots', className='mt-2 mb-1'),
dbc.ButtonGroup([
dbc.Button("Add Plot", id=f'{APP_ID}_add_figure_button'),
dbc.Button("Remove Plot", id=f'{APP_ID}_remove_figure_button'),
]),
html.Div(
id=f'{APP_ID}_figure_div'
),
])
def add_dash(app):
@app.callback(
[Output(f'{APP_ID}_header_dt', 'data'),
Output(f'{APP_ID}_header_toast', 'children'),
Output(f'{APP_ID}_header_toast', 'is_open'),
],
[Input(f'{APP_ID}_init_header_button', 'n_clicks')],
[State(f'{APP_ID}_com_dropdown', 'value')]
)
def serial_data_init_header(n_clicks, com):
if n_clicks is None or com is None:
raise PreventUpdate
baud = 115200
try:
ser_obj = serial.Serial(port=com,
baudrate=baud,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=10)
split_line = '_'
while split_line[0] != '{':
line = ser_obj.readline()
split_line = line.strip().decode("utf-8")
split_line = line.strip().decode("utf-8")
jdic = json.loads(split_line)
data = [{'pos': i, 'name': k} for i, k in enumerate(jdic.keys())]
for i, k in enumerate(jdic.keys()):
if isinstance(jdic[k], int):
data[i].update({'fmt': 'integer'})
elif isinstance(jdic[k], float):
data[i].update({'fmt': 'real'})
else:
data[i].update({'fmt': 'text'})
ser_obj.close()
return data, '', False
except Exception as e:
return [{}], html.P(str(e)), True
return data, '', False
@app.callback(
Output(f'{APP_ID}_com_dropdown', 'options'),
[Input(f'{APP_ID}_com_button', 'n_clicks')]
)
def serial_data_refresh_com_ports(n_clicks):
if n_clicks is None:
raise PreventUpdate
ports = [{'label': comport.device, 'value': comport.device} for comport in serial.tools.list_ports.comports()]
return ports
@app.callback(
Output(f'{APP_ID}_com_desc_label', 'value'),
[Input(f'{APP_ID}_com_dropdown', 'value')]
)
def serial_data_com_desc(com):
if com is None:
raise PreventUpdate
ports = [comport.device for comport in serial.tools.list_ports.comports()]
idx = ports.index(com)
descs = [comport.description for comport in serial.tools.list_ports.comports()]
return descs[idx]
@app.callback(
[
Output(f'{APP_ID}_interval', 'disabled'),
Output(f'{APP_ID}_start_button', 'disabled'),
Output(f'{APP_ID}_start_button', 'color'),
Output(f'{APP_ID}_stop_button', 'disabled'),
Output(f'{APP_ID}_stop_button', 'color'),
Output(f'{APP_ID}_clear_button', 'disabled'),
Output(f'{APP_ID}_clear_button', 'color'),
Output(f'{APP_ID}_filename_input', 'disabled'),
Output(f'{APP_ID}_filename_input', 'value'),
Output(f'{APP_ID}_header_dt', 'editable'),
Output(f'{APP_ID}_store', 'clear_data'),
],
[
Input(f'{APP_ID}_start_button', 'n_clicks'),
Input(f'{APP_ID}_stop_button', 'n_clicks'),
Input(f'{APP_ID}_clear_button', 'n_clicks'),
Input(f'{APP_ID}_header_dt', 'data'),
],
[
State(f'{APP_ID}_com_dropdown', 'value'),
State(f'{APP_ID}_filename_input', 'value'),
State(f'{APP_ID}_header_dt', 'data')
]
)
def serial_data_start_stop(n_start, n_stop, n_clear, hdr_data, port, filename, data_header):
global SERIAL_THREAD
global Q
ctx = dash.callback_context
if any([n_start is None, n_stop is None, port is None, hdr_data is None, n_clear is None]):
raise PreventUpdate
if pd.DataFrame(hdr_data).empty:
raise PreventUpdate
df_hdr = pd.DataFrame(data_header).sort_values('pos')
df_hdr['name'] = df_hdr['name'].fillna(df_hdr['pos'].astype(str))
headers = df_hdr['name'].tolist()
trig = ctx.triggered[0]['prop_id'].split('.')[0]
if trig == f'{APP_ID}_header_dt':
if len(data_header[0].keys()) == 3 and ~df_hdr.isnull().values.any():
return True, False, 'success', True, 'secondary', True, 'secondary', False, filename, True, False
else:
return True, True, 'secondary', True, 'secondary', True, 'secondary', False, filename, True, False
if trig == f'{APP_ID}_start_button':
print(f'starting: {filename}')
if filename is None or filename == '':
filename = f'data/my_data_{datetime.now().strftime("%m.%d.%Y.%H.%M.%S")}.db'
if (Path(FILE_DIR) / filename).exists():
clear = False
else:
clear = True
SERIAL_THREAD = SerialThread(port, baud=115200)
SERIAL_THREAD.start()
return False, True, 'secondary', False, 'danger', True, 'secondary', True, filename, False, clear
if trig == f'{APP_ID}_stop_button':
print('stopping')
SERIAL_THREAD.stop()
with Q.mutex:
Q.queue.clear()
return True, False, 'success', True, 'secondary', False, 'warning', False, filename, True, False
if trig == f'{APP_ID}_clear_button':
print('clearing')
filename = f'data/my_data_{datetime.now().strftime("%m.%d.%Y.%H.%M.%S")}.db'
return True, False, 'success', True, 'secondary', True, 'secondary', False, filename, True, True
@app.callback(
Output(f'{APP_ID}_store', 'data'),
[Input(f'{APP_ID}_interval', 'n_intervals')],
[State(f'{APP_ID}_interval', 'disabled'),
State(f'{APP_ID}_store', 'data'),
State(f'{APP_ID}_filename_input', 'value'),
State(f'{APP_ID}_header_dt', 'data')
]
)
def serial_data_update_store(n_intervals, disabled, data, filename, data_header):
global Q
# get data from queue
if disabled is not None and not disabled:
new_data = []
while not Q.empty():
new_data_dic = json.loads(Q.get())
new_data.append(tuple((new_data_dic[c["name"]] for c in data_header if c["name"] in new_data_dic.keys())))
conn = sqlite3.connect(FILE_DIR + filename)
c = conn.cursor()
c.execute(''' SELECT count(name) FROM sqlite_master WHERE type='table' AND name='my_data' ''')
if c.fetchone()[0] == 1:
c.executemany(f'INSERT INTO my_data VALUES ({(",".join(["?"] * len(data_header)) )})', new_data)
conn.commit()
last_row_id = c.execute("SELECT COUNT() FROM my_data").fetchone()[0]
conn.close()
else:
c.execute(
f'''CREATE TABLE my_data
(''' + ', '.join([f'{hdr["name"]} {hdr["fmt"]}' for hdr in data_header])
+ ')'
)
c.executemany(f'INSERT INTO my_data VALUES ({(",".join(["?"] * len(data_header)) )})', new_data)
conn.commit()
last_row_id = c.execute("SELECT COUNT() FROM my_data").fetchone()[0]
conn.close()
return last_row_id
@app.callback(
Output(f'{APP_ID}_readouts_dropdown', 'options'),
Input(f'{APP_ID}_header_dt', 'data')
)
def serial_data_readout_options(hdr_data):
if hdr_data is None:
raise PreventUpdate
if pd.DataFrame(hdr_data).empty:
raise PreventUpdate
df_hdr = pd.DataFrame(hdr_data).sort_values('pos')
df_hdr['name'] = df_hdr['name'].fillna(df_hdr['pos'].astype(str))
headers = df_hdr['name'].tolist()
options = [{'label': c, 'value': c} for c in headers]
return options
@app.callback(
Output(f'{APP_ID}_readouts_card_deck', 'children'),
Output(f'{APP_ID}_readouts_dropdown', 'value'),
Input(f'{APP_ID}_readouts_card_deck', 'children'),
Input(f'{APP_ID}_readouts_dropdown', 'value'),
)
def serial_data_create_readouts(cards, selected):
ctx = dash.callback_context
input_id = ctx.triggered[0]["prop_id"].split(".")[0]
if input_id == f'{APP_ID}_readouts_card_deck':
selected = []
for card in cards:
selected.append(card['id']['index'])
else:
# collect selected to create cards
cards = []
if selected is not None:
for s in selected:
cards.append(
dbc.Card(
id={'type': f'{APP_ID}_readout_card', 'index': s},
children=[
dbc.CardHeader(s),
]
)
)
return cards, selected
@app.callback(
Output({'type': f'{APP_ID}_readout_card', 'index': ALL}, 'children'),
Input(f'{APP_ID}_store', 'modified_timestamp'),
State(f'{APP_ID}_filename_input', 'value'),
)
def serial_data_update_readouts(ts, filename):
if any([v is None for v in [ts]]):
raise PreventUpdate
conn = sqlite3.connect(FILE_DIR + filename)
cur = conn.cursor()
n_estimate = cur.execute("SELECT COUNT() FROM my_data").fetchone()[0]
n_int = n_estimate // 10000 + 1
query = f'SELECT * FROM my_data WHERE ROWID % {n_int} = 0'
df = pd.read_sql(query, conn)
conn.close()
card_chs = []
for ccb in dash.callback_context.outputs_list:
y = df[ccb['id']['index']].iloc[-1]
ch = [
dbc.CardHeader(ccb['id']['index']),
dbc.CardBody(
dbc.ListGroup([
dbc.ListGroupItem(html.H3(f"{y:0.3g}"), color='info'),
]),
)
]
card_chs.append(ch)
return card_chs
@app.callback(
Output(f'{APP_ID}_figure_div', 'children'),
Input(f'{APP_ID}_add_figure_button', 'n_clicks'),
Input(f'{APP_ID}_remove_figure_button', 'n_clicks'),
Input(f'{APP_ID}_header_dt', 'data'),
State(f'{APP_ID}_figure_div', 'children'),
)
def serial_data_create_figures(n_add, n_remove, header_data, figure_objs):
ctx = dash.callback_context
input_id = ctx.triggered[0]["prop_id"].split(".")[0]
df_header = | pd.DataFrame(header_data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon 9/2/14
Using python pandas to post process csv output from Pacejka Tire model
@author: <NAME>, 2014
"""
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import pylab as py
class PacTire_panda:
'''
@class: loads, manages and plots various output from Pacejka Tire model
'''
def __init__(self,fileName_steadyState,fileName_transient="none"):
'''
Input:
filename: full filename w/ input data in csv form
'''
# first file is the steady state magic formula output
self._m_filename_SS = fileName_steadyState
# optional: compare against the transient slip output
if( fileName_transient=="none"):
self._use_transient_slip = False
else:
self._use_transient_slip = True
self._m_filename_T = fileName_transient
df_T = pd.read_csv(self._m_filename_T, header=0, sep=',') # index_col=0,
self._m_df_T = df_T
# Header Form:
# time,kappa,alpha,gamma,kappaP,alphaP,gammaP,Vx,Vy,Fx,Fy,Fz,Mx,My,Mz,Fxc,Fyc,Mzc
df = pd.read_csv(self._m_filename_SS, header=0, sep=',') # index_col=0,
self._m_df = df
# @brief plot Forces, moments, pure slip vs. kappa
def plot_kappa_FMpure(self,adams_Fx_tab_filename="none",adams_Mz_tab_filename="none"):
# force v. kappa
figF = plt.figure()
df_kappa = pd.DataFrame(self._m_df, columns = ['kappa','Fx','Fz','Fxc'])
# always create the plot, axes handle
axF = df_kappa.plot(linewidth=2.0,x='kappa',y=['Fxc','Fz'])
axF.set_xlabel(r'$\kappa $ ')
axF.set_ylabel('Force [N]')
axF.set_title(r'$\kappa *$, pure long. slip')
if( adams_Fx_tab_filename != "none"):
# load in the adams junk data, Fx vs. kappa %
dfx_adams = pd.read_table(adams_Fx_tab_filename,sep='\t',header=0)
dfx_adams['Longitudinal Slip'] = dfx_adams['Longitudinal Slip'] / 100.0
axF.plot(dfx_adams['Longitudinal Slip'],dfx_adams['Longitudinal Force'],'r--',linewidth=1.5,label="Fx Adams")
# plt.legend(loc='best')
# compare transient slip output also?
if( self._use_transient_slip):
df_ts = pd.DataFrame(self._m_df_T, columns = ['kappa','Fx','Fxc'])
axF.plot(df_ts['kappa'],df_ts['Fx'],'c--',linewidth=1.0,label='Fx transient')
axF.plot(df_ts['kappa'],df_ts['Fxc'],'k-*',linewidth=1.0,label='Fxc transient')
axF.legend(loc='best')
figM = plt.figure()
df_kappaM = pd.DataFrame(self._m_df, columns = ['kappa','Mx','My','Mz','Mzc'])
axM = df_kappaM.plot(linewidth=2.0,x='kappa',y=['Mx','My','Mzc'])
if( adams_Mz_tab_filename != "none"):
dfmz_adams = pd.read_table(adams_Mz_tab_filename,sep='\t',header=0)
dfmz_adams['longitudinal_slip'] = dfmz_adams['longitudinal_slip']/100.
axM.plot(dfmz_adams['longitudinal_slip'], dfmz_adams['aligning_moment'],'r--',linewidth=1.5,label="Mz Adams")
if( self._use_transient_slip):
df_tsM = pd.DataFrame(self._m_df_T, columns = ['kappa','Mzc','Mzx','Mzy','M_zrc','t','s'])
axM.plot(df_tsM['kappa'],df_tsM['Mzc'],'k-*',linewidth=1.0,label='Mzc transient')
axM.plot(df_tsM['kappa'], df_tsM['Mzx'],'b--',linewidth=2,label='Mz,x')
axM.plot(df_tsM['kappa'], df_tsM['Mzy'],'g--',linewidth=2,label='Mz,y')
axM.plot(df_tsM['kappa'], df_tsM['M_zrc'],'y--',linewidth=2,label='M_zrc')
axM2 = axM.twinx()
axM2.plot(df_tsM['kappa'], df_tsM['t'],'b.',linewidth=1.0,label='t trail')
axM2.plot(df_tsM['kappa'], df_tsM['s'],'c.',linewidth=1.0,label='s arm')
axM2.set_ylabel('length [m]')
axM2.legend(loc='lower right')
axM.set_xlabel(r'$\kappa $ ')
axM.set_ylabel('Moment [N-m]')
axM.legend(loc='best')
axM.set_title(r'$\kappa $, pure long. slip')
# @brief plot Forces, Moments, pure slip vs. alpha
def plot_alpha_FMpure(self,adams_Fy_tab_filename="none",adams_Mz_tab_filename="none"):
figF = plt.figure()
df_sy = pd.DataFrame(self._m_df, columns = ['alpha','Fy','Fz','Fyc'])
axF = df_sy.plot(linewidth=2.0,x='alpha', y=['Fyc','Fz'])# y=['Fy','Fyc','Fz'])
if( adams_Fy_tab_filename != "none"):
# load in adams tabular data for Fy vs. alpha [deg]
dfy_adams = pd.read_table(adams_Fy_tab_filename,sep='\t',header=0)
axF.plot(dfy_adams['slip_angle'],dfy_adams['lateral_force'],'r--',linewidth=1.5,label="Fy Adams")
# compare transient slip output
if(self._use_transient_slip):
# already have this in df_T
df_ts = pd.DataFrame(self._m_df_T, columns = ['alpha','Fyc','Fy'] )
# axF.plot(df_ts['alpha'], df_ts['Fy'],'c-*',linewidth=2.0,label="Fy Transient")
axF.plot(df_ts['alpha'], df_ts['Fyc'],'k-*',linewidth=1.0,label="Fyc Transient")
axF.set_xlabel(r'$\alpha $[deg]')
axF.set_ylabel('Force [N]')
axF.legend(loc='best')
axF.set_title(r'$\alpha $, pure lateral slip')
figM = plt.figure()
df_M = pd.DataFrame(self._m_df, columns = ['alpha','Mx','My','Mz','Mzc'])
axM = df_M.plot(linewidth=2.0,x='alpha',y=['Mx','My','Mzc']) # ,'Mz'])
if( adams_Mz_tab_filename != "none"):
dfmz_adams = pd.read_table(adams_Mz_tab_filename,sep='\t',header=0)
axM.plot(dfmz_adams['slip_angle'], dfmz_adams['aligning_moment'],'r--',linewidth=1.5,label="Mz Adams")
# also plot transient slip outputs
if(self._use_transient_slip):
# already have this in df_T
df_tsM = pd.DataFrame(self._m_df_T, columns = ['alpha','Mz','Mzc','MP_z','M_zr','t','s'] )
# axM.plot(df_tsM['alpha'], df_tsM['Mz'],'k-*',linewidth=1.0,label="Mz Transient")
axM.plot(df_tsM['alpha'], df_tsM['Mzc'],'k-*',linewidth=1.0,label="Mzc Transient")
axM.plot(df_tsM['alpha'], df_tsM['MP_z'],'g--',linewidth=2,label="MP_z")
axM.plot(df_tsM['alpha'], df_tsM['M_zr'],'y--',linewidth=2,label="M_zr")
axM2 = axM.twinx()
axM2.plot(df_tsM['alpha'], df_tsM['t'],'b.',linewidth=1.0,label='t trail')
axM2.plot(df_tsM['alpha'], df_tsM['s'],'c.',linewidth=1.0,label='s arm')
axM2.legend(loc='lower right')
axM2.set_ylabel('length [m]')
axM.set_xlabel(r'$\alpha $[deg]')
axM.set_ylabel('Moment [N-m]')
axM.legend(loc='best')
axM.set_title(r'$\alpha $, pure lateral slip')
# @brief plot combined forces, moments vs. kappa
def plot_combined_kappa(self, adams_Fx_tab_filename="none",adams_Fy_tab_filename="none",adams_Mz_tab_filename="none"):
figF = plt.figure()
df_sy = pd.DataFrame(self._m_df, columns = ['kappa','Fxc','Fyc','m_Fz'])
axF = df_sy.plot(linewidth=1.5,x='kappa',y=['Fxc','Fyc']) # ,'Fz'])
# check to see if adams data is avaiable
if( adams_Fx_tab_filename != "none"):
# load in the adams junk data, Fx vs. kappa %
dfx_adams = pd.read_table(adams_Fx_tab_filename,sep='\t',header=0)
dfx_adams['longitudinal slip'] = dfx_adams['longitudinal slip'] / 100.0
axF.plot(dfx_adams['longitudinal slip'],dfx_adams['longitudinal force'],'r--',linewidth=1.5,label="Fx Adams")
if( adams_Fy_tab_filename != "none"):
# load in adams tabular data for Fy vs. alpha [deg]
dfy_adams = pd.read_table(adams_Fy_tab_filename,sep='\t',header=0)
axF.plot(dfy_adams['slip_angle'],dfy_adams['lateral_force'],'g--',linewidth=1.5,label="Fy Adams")
# plot transient slip output?
if( self._use_transient_slip):
# Fx here
df_T = pd.DataFrame(self._m_df_T, columns = ['kappa','Fxc','Fyc'])
axF.plot(df_T['kappa'], df_T['Fxc'],'k-*',linewidth=1.0,label='Fxc transient')
# axF.plot(df_T['kappa'], df_T['Fyc'],'c--',linewidth=1.5,label='Fyc transient')
axF.set_xlabel(r'$\kappa $')
axF.set_ylabel('Force [N]')
axF.legend(loc='best')
axF.set_title(r'$\kappa $, combined slip')
figM = plt.figure()
df_M = pd.DataFrame(self._m_df, columns = ['kappa','Mx','My','Mzc'])
axM = df_M.plot(linewidth=1.5,x='kappa',y=['Mx','My','Mzc'])
# chceck for adams tabular data
if( adams_Mz_tab_filename != "none"):
dfmz_adams = pd.read_table(adams_Mz_tab_filename,sep='\t',header=0)
dfmz_adams['longitudinal_slip'] = dfmz_adams['longitudinal_slip'] / 100.0
axM.plot(dfmz_adams['longitudinal_slip'], dfmz_adams['aligning_moment'],'r--',linewidth=1.5,label="Mz Adams")
# plot transient slip output for Mz?
if( self._use_transient_slip):
# Mzc here
df_T_M = pd.DataFrame(self._m_df_T, columns = ['kappa','Mzc','Fyc','Fxc','Mzx','Mzy','M_zrc','t','s'])
axM.plot(df_T_M['kappa'], df_T_M['Mzc'],'k-*',linewidth=1.0,label='Mzc transient')
# overlay the components of the moment from the x and y forces, respectively
axM.plot(df_T_M['kappa'], df_T_M['Mzx'],'b--',linewidth=2,label='Mz,x')
axM.plot(df_T_M['kappa'], df_T_M['Mzy'],'g--',linewidth=2,label='Mz,y')
axM.plot(df_T_M['kappa'], df_T_M['M_zrc'],'y--',linewidth=2,label='M_zrc')
axM2 = axM.twinx()
axM2.plot(df_T_M['kappa'], df_T_M['t'],'b.',linewidth=1.0,label='t trail')
axM2.plot(df_T_M['kappa'], df_T_M['s'],'c.',linewidth=1.0,label='s arm')
axM2.set_ylabel('length [m]')
axM2.legend(loc='lower right')
# overlay the forces, to see what is happening on those curves when
# Mz deviates from validation data values
'''
ax2 = axM.twinx()
ax2.plot(df_T_M['kappa'], df_T_M['Fxc'],'g-.',linewidth=1.5,label='Fxc transient')
ax2.plot(df_T_M['kappa'], df_T_M['Fyc'],'c-.',linewidth=1.5,label='Fyc transient')
ax2.set_ylabel('Force [N]')
ax2.legend(loc='lower right')
'''
axM.set_xlabel(r'$\kappa $')
axM.set_ylabel('Moment [N-m]')
axM.legend(loc='upper left')
axM.set_title(r'$\kappa $, combined slip')
# @brief plot Fy combined vs. kappa and alpha
def plot_combined_alpha(self,adams_Fx_tab_filename="none",adams_Fy_tab_filename="none",adams_Mz_tab_filename="none"):
figF = plt.figure()
df_sy = pd.DataFrame(self._m_df, columns = ['alpha','Fxc','Fyc','Fz'])
axF = df_sy.plot(linewidth=1.5,x='alpha',y=['Fxc','Fyc']) # ,'Fz'])
# check to see if adams data is avaiable
if( adams_Fx_tab_filename != "none"):
# load in the adams junk data, Fx vs. kappa %
dfx_adams = pd.read_table(adams_Fx_tab_filename,sep='\t',header=0)
dfx_adams['longitudinal slip'] = dfx_adams['longitudinal slip'] / 100.0
axF.plot(dfx_adams['longitudinal slip'],dfx_adams['longitudinal force'],'r--',linewidth=1.5,label="Fx Adams")
if( adams_Fy_tab_filename != "none"):
# load in adams tabular data for Fy vs. alpha [deg]
dfy_adams = pd.read_table(adams_Fy_tab_filename,sep='\t',header=0)
axF.plot(dfy_adams['slip_angle'],dfy_adams['lateral_force'],'g--',linewidth=1.5,label="Fy Adams")
if( self._use_transient_slip):
# Fy here
df_T = pd.DataFrame(self._m_df_T, columns = ['alpha','Fyc'])
axF.plot(df_T['alpha'], df_T['Fyc'],'k-*',linewidth=1.0,label='Fyc transient')
axF.set_xlabel(r'$\alpha $[deg]')
axF.set_ylabel('Force [N]')
axF.legend(loc='best')
axF.set_title(r'$\alpha $ , combined slip')
figM = plt.figure()
df_M = pd.DataFrame(self._m_df, columns = ['alpha','Mx','My','Mzc'])
axM = df_M.plot(linewidth=2.0,x='alpha',y=['Mx','My','Mzc'])
# chceck for adams tabular data
if( adams_Mz_tab_filename != "none"):
dfmz_adams = pd.read_table(adams_Mz_tab_filename,sep='\t',header=0)
axM.plot(dfmz_adams['slip_angle'], dfmz_adams['aligning_moment'],'r--',linewidth=1.5,label="Mz Adams")
# plot transient slip output
if( self._use_transient_slip):
# Fx here
df_T_M = pd.DataFrame(self._m_df_T, columns = ['alpha','Mzc'])
axM.plot(df_T_M['alpha'], df_T_M['Mzc'],'k-*',linewidth=1.0,label='Mzc transient')
axM.set_xlabel(r'$\alpha $[deg]')
axM.set_ylabel('Moment [N-m]')
axM.legend(loc='best')
axM.set_title(r'$\alpha $, combined slip')
# @brief plot what you please
# @param x_col the column name of the x-series data frame
# @param y_col_list list of col names of y-series data frames
# Usage: tire.plot_custom('Fx',['Fy','Fyc'])
# NOTE: y_col_list should be of the same units
def plot_custom(self,x_col, y_col_list,fig_title='custom plot',compare_transient=False,compare_adams_file="none"):
fig = plt.figure()
df_cols = []
df_cols = list(y_col_list)
df_cols.append(x_col)
df_sy = pd.DataFrame(self._m_df, columns = df_cols)
ax = df_sy.plot(linewidth=1.5, x=x_col, y=y_col_list)
if(self._use_transient_slip and compare_transient):
df_fxfy = pd.DataFrame(self._m_df_T, columns = df_cols)
ax.plot(df_fxfy[x_col], df_fxfy[y_col_list[0]], 'k-', linewidth=2,label='transient')
if( compare_adams_file != "none"):
dfy_adams = | pd.read_table(compare_adams_file, sep='\t', header=0) | pandas.read_table |
import os
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import cross_val_score
##### UTILITIES #######
def generate_keywords(keywords = "../data/keywords/keywords_italy.txt"):
"""
Generate a list of keywords (Wikipedia's pages) which are used to
select the columns of the dataframe we are going to use as dataset
to train our model.
:param keywords: the path to a file containing \n separated Wikipedia's
page names.
:return: a keyword list.
"""
selected_columns = []
file_ = open(keywords, "r")
for line in file_:
if line != "Week":
selected_columns.append(line.replace("\n", "").replace("\\", ""))
return selected_columns
def generate_features(year_a, year_b, number_a, number_b):
if not year_a.empty:
if (number_a != 2007):
first_part= year_a.copy()[41:52]
else:
first_part= year_a.copy()[48:52]
else:
first_part = pd.DataFrame()
if not year_b.empty and number_b != 2007:
second_part= year_b.copy()[0:15]
else:
second_part = pd.DataFrame()
return first_part.append(second_part)
def generate(stop_year, exclude, path_features="./../data/wikipedia_italy/new_data"):
"""
Generate a dataframe with as columns the Wikipedia's pages and as rows
the number of pageviews for each week and for each page. The dataframe
contains all the influenza season without the one specified by stop_year.
:param stop_year: The influenza seasosn which will not be inserted into
the final dataframe.
:param path_features: the path to the directory containing all the files
with the data.
:return: a dataframe containing all influenza season, which can be used to
train the model.
"""
# The stop year must not be in the exclude list
assert (stop_year not in exclude)
# Generate an empty dataframe
dataset = pd.DataFrame()
# Get all features files and sort the list
file_list = os.listdir(path_features)
file_list.sort()
for i in range(0, len(file_list)-1):
# If the file's year is equal than stop_year then do anything
if int(file_list[i].replace(".csv", "")) != stop_year-1:
tmp_a = pd.read_csv(os.path.join(path_features, file_list[i]), encoding = 'utf8', delimiter=',')
else:
tmp_a = pd.DataFrame()
if int(file_list[i+1].replace(".csv", "")) != stop_year:
tmp_b = pd.read_csv(os.path.join(path_features, file_list[i+1]), encoding = 'utf8', delimiter=',')
else:
tmp_b = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
### Read data from strip theory reference dataset
### Folder must contain List*.txt and Data*.*.bin files
from array import array
import pandas as pd
import matplotlib.pyplot as mpl
import os.path as path # to check either .csv file exists or not on disk
Ncfd = 2 # No. of CFD strips; same as the number of modes = 1 or 2
plot_data = True # plot data
data = {'nc':[],'md':[],'U':[],'d':[],'m':[],'L':[],'H':[],'Nt':[],'Dt':[],'tf':[],'ymax':[],'time':[],'y/d':[]} # dict object to create dataframe
# open List*.txt file
ftxt = open("List%d.txt" % Ncfd, "r")
line = ftxt.readline()
print(line[:-1])
for n in range(1000):
line = ftxt.readline()
if len(line) == 0:
break
print(line[:-1])
tmp = line.split()
nc = int(tmp[0]) # case number in List file
md = int(tmp[1]) # mode number
U = float(tmp[2]) # wind/air velocity [m/s]
d = float(tmp[3]) # cable diameter [m]
m = float(tmp[4]) # cable mass per unit length [kg/m]
L = float(tmp[5]) # cable length [m]
H = float(tmp[6]) # cable tension [N]
Nt = int(tmp[7]) # number of timesteps
Dt = float(tmp[8]) # timestep length [s]
tf = float(tmp[9]) # total time [s]
ymax = float(tmp[10]) # max(y) value [m]
filename = tmp[11] # = "Data%d.%d.bin" % (Ncfd, nc) # data file name
# open Data*.*.bin file
fdat=open(filename,"rb")
float_array = array('d')
float_array.fromfile(fdat, Nt)
time = float_array.tolist()
Fx = [[] for ncfd in range(Ncfd)]
Fy = [[] for ncfd in range(Ncfd)]
y = [[] for ncfd in range(Ncfd)]
for ncfd in range(Ncfd):
float_array = array('d')
float_array.fromfile(fdat, Nt)
Fx[ncfd] = float_array.tolist()
float_array = array('d')
float_array.fromfile(fdat, Nt)
Fy[ncfd] = float_array.tolist()
float_array = array('d')
float_array.fromfile(fdat, Nt)
y[ncfd] = float_array.tolist()
fdat.close()
# plot data
if plot_data:
fig3, axs = mpl.subplots(1)
for ncfd in range(Ncfd):
axs.plot(time, [y[ncfd][nt] / d for nt in range(Nt)])
for i,j in zip(time, [y[ncfd][nt] / d for nt in range(Nt)]):
# appending required data into dict
data['time'].append(i)
data['y/d'].append(j)
data['nc'].append(tmp[0])
data['md'].append(tmp[1])
data['U'].append(tmp[2])
data['d'].append(tmp[3])
data['m'].append(tmp[4])
data['L'].append(tmp[5])
data['H'].append(tmp[6])
data['Nt'].append(tmp[7])
data['Dt'].append(tmp[8])
data['tf'].append(tmp[9])
data['ymax'].append(tmp[10])
# can plot Fx and Fy in the same way
axs.set_xlabel('time [s]')
axs.set_ylabel('y / d')
axs.set_title('md = %d, H = %gN, U = %gm/s' % (md, H, U))
mpl.show()
ftxt.close()
pd.DataFrame(data).to_csv('data'+str(Ncfd)+'.csv',index=False) # Store dataframe on disk for current case
if path.exists('data1.csv') and path.exists('data2.csv'): # check if files exist on this path or not
df1 = pd.read_csv('data1.csv') # read case 1 dataframe
df2 = pd.read_csv('data2.csv') # read case 2 dataframe
df = | pd.concat([df1,df2]) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# License: BSD-3 (https://tldrlegal.com/license/bsd-3-clause-license-(revised))
# Copyright (c) 2016-2021, <NAME>; <NAME>
# Copyright (c) 2022, QuatroPe
# All rights reserved.
# =============================================================================
# DOCS
# =============================================================================
"""Data abstraction layer.
This module defines the DecisionMatrix object, which internally encompasses
the alternative matrix, weights and objectives (MIN, MAX) of the criteria.
"""
# =============================================================================
# IMPORTS
# =============================================================================
import enum
import functools
from collections import abc
import numpy as np
import pandas as pd
from pandas.io.formats import format as pd_fmt
import pyquery as pq
from .dominance import DecisionMatrixDominanceAccessor
from .plot import DecisionMatrixPlotter
from .stats import DecisionMatrixStatsAccessor
from ..utils import deprecated, doc_inherit
# =============================================================================
# CONSTANTS
# =============================================================================
class Objective(enum.Enum):
"""Representation of criteria objectives (Minimize, Maximize)."""
#: Internal representation of minimize criteria
MIN = -1
#: Internal representation of maximize criteria
MAX = 1
# INTERNALS ===============================================================
_MIN_STR = "\u25bc"
_MAX_STR = "\u25b2"
#: Another way to name the maximization criteria.
_MAX_ALIASES = frozenset(
[
MAX,
_MAX_STR,
max,
np.max,
np.nanmax,
np.amax,
"max",
"maximize",
"+",
">",
]
)
#: Another ways to name the minimization criteria.
_MIN_ALIASES = frozenset(
[
MIN,
_MIN_STR,
min,
np.min,
np.nanmin,
np.amin,
"min",
"minimize",
"<",
"-",
]
)
# CUSTOM CONSTRUCTOR ======================================================
@classmethod
def construct_from_alias(cls, alias):
"""Return the alias internal representation of the objective."""
if isinstance(alias, cls):
return alias
if isinstance(alias, str):
alias = alias.lower()
if alias in cls._MAX_ALIASES.value:
return cls.MAX
if alias in cls._MIN_ALIASES.value:
return cls.MIN
raise ValueError(f"Invalid criteria objective {alias}")
# METHODS =================================================================
def __str__(self):
"""Convert the objective to an string."""
return self.name
def to_string(self):
"""Return the printable representation of the objective."""
if self.value in Objective._MIN_ALIASES.value:
return Objective._MIN_STR.value
if self.value in Objective._MAX_ALIASES.value:
return Objective._MAX_STR.value
# =============================================================================
# _SLICER ARRAY
# =============================================================================
class _ACArray(np.ndarray, abc.Mapping):
"""Immutable Array to provide access to the alternative and criteria \
values.
The behavior is the same as a numpy.ndarray but if the slice it receives
is a value contained in the array it uses an external function
to access the series with that criteria/alternative.
Besides this it has the typical methods of a dictionary.
"""
def __new__(cls, input_array, skc_slicer):
obj = np.asarray(input_array).view(cls)
obj._skc_slicer = skc_slicer
return obj
@doc_inherit(np.ndarray.__getitem__)
def __getitem__(self, k):
try:
if k in self:
return self._skc_slicer(k)
return super().__getitem__(k)
except IndexError:
raise IndexError(k)
def __setitem__(self, k, v):
"""Raise an AttributeError, this object are read-only."""
raise AttributeError("_SlicerArray are read-only")
@doc_inherit(abc.Mapping.items)
def items(self):
return ((e, self[e]) for e in self)
@doc_inherit(abc.Mapping.keys)
def keys(self):
return iter(self)
@doc_inherit(abc.Mapping.values)
def values(self):
return (self[e] for e in self)
# =============================================================================
# DECISION MATRIX
# =============================================================================
class DecisionMatrix:
"""Representation of all data needed in the MCDA analysis.
This object gathers everything necessary to represent a data set used
in MCDA:
- An alternative matrix where each row is an alternative and each
column is of a different criteria.
- An optimization objective (Minimize, Maximize) for each criterion.
- A weight for each criterion.
- An independent type of data for each criterion
DecisionMatrix has two main forms of construction:
1. Use the default constructor of the DecisionMatrix class
:py:class:`pandas.DataFrame` where the index is the alternatives
and the columns are the criteria; an iterable with the objectives with
the same amount of elements that columns/criteria has the dataframe;
and an iterable with the weights also with the same amount of elements
as criteria.
.. code-block:: pycon
>>> import pandas as pd
>>> from skcriteria import DecisionMatrix, mkdm
>>> data_df = pd.DataFrame(
... [[1, 2, 3], [4, 5, 6]],
... index=["A0", "A1"],
... columns=["C0", "C1", "C2"]
... )
>>> objectives = [min, max, min]
>>> weights = [1, 1, 1]
>>> dm = DecisionMatrix(data_df, objectives, weights)
>>> dm
C0[▼ 1.0] C1[▲ 1.0] C2[▲ 1.0]
A0 1 2 3
A1 4 5 6
[2 Alternatives x 3 Criteria]
2. Use the classmethod `DecisionMatrix.from_mcda_data` which requests the
data in a more natural way for this type of analysis
(the weights, the criteria / alternative names, and the data types
are optional)
>>> DecisionMatrix.from_mcda_data(
... [[1, 2, 3], [4, 5, 6]],
... [min, max, min],
... [1, 1, 1])
C0[▼ 1.0] C1[▲ 1.0] C2[▲ 1.0]
A0 1 2 3
A1 4 5 6
[2 Alternatives x 3 Criteria]
For simplicity a function is offered at the module level analogous to
``from_mcda_data`` called ``mkdm`` (make decision matrix).
Parameters
----------
data_df: :py:class:`pandas.DatFrame`
Dataframe where the index is the alternatives and the columns
are the criteria.
objectives: :py:class:`numpy.ndarray`
Aan iterable with the targets with sense of optimality of every
criteria (You can use any alias defined in Objective)
the same length as columns/criteria has the data_df.
weights: :py:class:`numpy.ndarray`
An iterable with the weights also with the same amount of elements
as criteria.
"""
def __init__(self, data_df, objectives, weights):
self._data_df = (
data_df.copy()
if isinstance(data_df, pd.DataFrame)
else pd.DataFrame(data_df)
)
self._objectives = np.asarray(objectives, dtype=object)
self._weights = np.asanyarray(weights, dtype=float)
if not (
len(self._data_df.columns)
== len(self._weights)
== len(self._objectives)
):
raise ValueError(
"The number of weights, and objectives must be equal to the "
"number of criteria (number of columns in data_df)"
)
# CUSTOM CONSTRUCTORS =====================================================
@classmethod
def from_mcda_data(
cls,
matrix,
objectives,
weights=None,
alternatives=None,
criteria=None,
dtypes=None,
):
"""Create a new DecisionMatrix object.
This method receives the parts of the matrix, in what conceptually
the matrix of alternatives is usually divided
Parameters
----------
matrix: Iterable
The matrix of alternatives. Where every row is an alternative
and every column is a criteria.
objectives: Iterable
The array with the sense of optimality of every
criteria. You can use any alias provided by the objective class.
weights: Iterable o None (default ``None``)
Optional weights of the criteria. If is ``None`` all the criteria
are weighted with 1.
alternatives: Iterable o None (default ``None``)
Optional names of the alternatives. If is ``None``,
al the alternatives are names "A[n]" where n is the number of
the row of `matrix` statring at 0.
criteria: Iterable o None (default ``None``)
Optional names of the criteria. If is ``None``,
al the alternatives are names "C[m]" where m is the number of
the columns of `matrix` statring at 0.
dtypes: Iterable o None (default ``None``)
Optional types of the criteria. If is None, the type is inferred
automatically by pandas.
Returns
-------
:py:class:`DecisionMatrix`
A new decision matrix.
Example
-------
>>> DecisionMatrix.from_mcda_data(
... [[1, 2, 3], [4, 5, 6]],
... [min, max, min],
... [1, 1, 1])
C0[▼ 1.0] C1[▲ 1.0] C2[▲ 1.0]
A0 1 2 3
A1 4 5 6
[2 Alternatives x 3 Criteria]
For simplicity a function is offered at the module level analogous to
``from_mcda_data`` called ``mkdm`` (make decision matrix).
Notes
-----
This functionality generates more sensitive defaults than using the
constructor of the DecisionMatrix class but is slower.
"""
# first we need the number of alternatives and criteria
try:
a_number, c_number = np.shape(matrix)
except ValueError:
matrix_ndim = np.ndim(matrix)
raise ValueError(
f"'matrix' must have 2 dimensions, found {matrix_ndim} instead"
)
alternatives = np.asarray(
[f"A{idx}" for idx in range(a_number)]
if alternatives is None
else alternatives
)
if len(alternatives) != a_number:
raise ValueError(f"'alternatives' must have {a_number} elements")
criteria = np.asarray(
[f"C{idx}" for idx in range(c_number)]
if criteria is None
else criteria
)
if len(criteria) != c_number:
raise ValueError(f"'criteria' must have {c_number} elements")
weights = np.asarray(np.ones(c_number) if weights is None else weights)
data_df = pd.DataFrame(matrix, index=alternatives, columns=criteria)
if dtypes is not None and len(dtypes) != c_number:
raise ValueError(f"'dtypes' must have {c_number} elements")
elif dtypes is not None:
dtypes = {c: dt for c, dt in zip(criteria, dtypes)}
data_df = data_df.astype(dtypes)
return cls(data_df=data_df, objectives=objectives, weights=weights)
# MCDA ====================================================================
# This properties are usefull to access interactively to the
# underlying data a. Except for alternatives and criteria all other
# properties expose the data as dataframes or series
@property
def alternatives(self):
"""Names of the alternatives."""
arr = self._data_df.index.to_numpy()
slicer = self._data_df.loc.__getitem__
return _ACArray(arr, slicer)
@property
def criteria(self):
"""Names of the criteria."""
arr = self._data_df.columns.to_numpy()
slicer = self._data_df.__getitem__
return _ACArray(arr, slicer)
@property
def weights(self):
"""Weights of the criteria."""
return pd.Series(
self._weights,
dtype=float,
index=self._data_df.columns,
name="Weights",
)
@property
def objectives(self):
"""Objectives of the criteria as ``Objective`` instances."""
return pd.Series(
[Objective.construct_from_alias(a) for a in self._objectives],
index=self._data_df.columns,
name="Objectives",
)
@property
def minwhere(self):
"""Mask with value True if the criterion is to be minimized."""
mask = self.objectives == Objective.MIN
mask.name = "minwhere"
return mask
@property
def maxwhere(self):
"""Mask with value True if the criterion is to be maximized."""
mask = self.objectives == Objective.MAX
mask.name = "maxwhere"
return mask
# READ ONLY PROPERTIES ====================================================
@property
def iobjectives(self):
"""Objectives of the criteria as ``int``.
- Minimize = Objective.MIN.value
- Maximize = Objective.MAX.value
"""
return pd.Series(
[o.value for o in self.objectives],
dtype=np.int8,
index=self._data_df.columns,
)
@property
def matrix(self):
"""Alternatives matrix as pandas DataFrame.
The matrix excludes weights and objectives.
If you want to create a DataFrame with objetvies and weights, use
``DecisionMatrix.to_dataframe()``
"""
return self._data_df.copy()
@property
def dtypes(self):
"""Dtypes of the criteria."""
return self._data_df.dtypes.copy()
# ACCESSORS (YES, WE USE CACHED PROPERTIES IS THE EASIEST WAY) ============
@property
@functools.lru_cache(maxsize=None)
def plot(self):
"""Plot accessor."""
return DecisionMatrixPlotter(self)
@property
@functools.lru_cache(maxsize=None)
def stats(self):
"""Descriptive statistics accessor."""
return DecisionMatrixStatsAccessor(self)
@property
@functools.lru_cache(maxsize=None)
def dominance(self):
"""Dominance information accessor."""
return DecisionMatrixDominanceAccessor(self)
# UTILITIES ===============================================================
def copy(self, **kwargs):
"""Return a deep copy of the current DecisionMatrix.
This method is also useful for manually modifying the values of the
DecisionMatrix object.
Parameters
----------
kwargs :
The same parameters supported by ``from_mcda_data()``. The values
provided replace the existing ones in the object to be copied.
Returns
-------
:py:class:`DecisionMatrix`
A new decision matrix.
"""
dmdict = self.to_dict()
dmdict.update(kwargs)
return self.from_mcda_data(**dmdict)
def to_dataframe(self):
"""Convert the entire DecisionMatrix into a dataframe.
The objectives and weights ara added as rows before the alternatives.
Returns
-------
:py:class:`pd.DataFrame`
A Decision matrix as pandas DataFrame.
Example
-------
.. code-block:: pycon
>>> dm = DecisionMatrix.from_mcda_data(
>>> dm
... [[1, 2, 3], [4, 5, 6]],
... [min, max, min],
... [1, 1, 1])
C0[▼ 1.0] C1[▲ 1.0] C2[▲ 1.0]
A0 1 2 3
A1 4 5 6
>>> dm.to_dataframe()
C0 C1 C2
objectives MIN MAX MIN
weights 1.0 1.0 1.0
A0 1 2 3
A1 4 5 6
"""
data = np.vstack((self.objectives, self.weights, self.matrix))
index = np.hstack((["objectives", "weights"], self.alternatives))
df = pd.DataFrame(data, index=index, columns=self.criteria, copy=True)
return df
def to_dict(self):
"""Return a dict representation of the data.
All the values are represented as numpy array.
"""
return {
"matrix": self.matrix.to_numpy(),
"objectives": self.iobjectives.to_numpy(),
"weights": self.weights.to_numpy(),
"dtypes": self.dtypes.to_numpy(),
"alternatives": np.asarray(self.alternatives),
"criteria": np.asarray(self.criteria),
}
@deprecated(
reason=(
"Use 'DecisionMatrix.stats()', "
"'DecisionMatrix.stats(\"describe\")' or "
"'DecisionMatrix.stats.describe()' instead."
),
version=0.6,
)
def describe(self, **kwargs):
"""Generate descriptive statistics.
Descriptive statistics include those that summarize the central
tendency, dispersion and shape of a dataset's distribution,
excluding ``NaN`` values.
Parameters
----------
Same parameters as ``pandas.DataFrame.describe()``.
Returns
-------
``pandas.DataFrame``
Summary statistics of DecisionMatrix provided.
"""
return self._data_df.describe(**kwargs)
# CMP =====================================================================
@property
def shape(self):
"""Return a tuple with (number_of_alternatives, number_of_criteria).
dm.shape <==> np.shape(dm)
"""
return np.shape(self._data_df)
def __len__(self):
"""Return the number ot alternatives.
dm.__len__() <==> len(dm).
"""
return len(self._data_df)
def equals(self, other):
"""Return True if the decision matrix are equal.
This method calls `DecisionMatrix.aquals` whitout tolerance.
Parameters
----------
other : :py:class:`skcriteria.DecisionMatrix`
Other instance to compare.
Returns
-------
equals : :py:class:`bool:py:class:`
Returns True if the two dm are equals.
See Also
--------
aequals, :py:func:`numpy.isclose`, :py:func:`numpy.all`,
:py:func:`numpy.any`, :py:func:`numpy.equal`,
:py:func:`numpy.allclose`.
"""
return self.aequals(other, 0, 0, False)
def aequals(self, other, rtol=1e-05, atol=1e-08, equal_nan=False):
"""Return True if the decision matrix are equal within a tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
NaNs are treated as equal if they are in the same place and if
``equal_nan=True``. Infs are treated as equal if they are in the same
place and of the same sign in both arrays.
The proceeds as follows:
- If ``other`` is the same object return ``True``.
- If ``other`` is not instance of 'DecisionMatrix', has different shape
'criteria', 'alternatives' or 'objectives' returns ``False``.
- Next check the 'weights' and the matrix itself using the provided
tolerance.
Parameters
----------
other : :py:class:`skcriteria.DecisionMatrix`
Other instance to compare.
rtol : float
The relative tolerance parameter
(see Notes in :py:func:`numpy.allclose`).
atol : float
The absolute tolerance parameter
(see Notes in :py:func:`numpy.allclose`).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in dm will be
considered equal to NaN's in `other` in the output array.
Returns
-------
aequals : :py:class:`bool:py:class:`
Returns True if the two dm are equal within the given
tolerance; False otherwise.
See Also
--------
equals, :py:func:`numpy.isclose`, :py:func:`numpy.all`,
:py:func:`numpy.any`, :py:func:`numpy.equal`,
:py:func:`numpy.allclose`.
"""
return (self is other) or (
isinstance(other, DecisionMatrix)
and np.shape(self) == np.shape(other)
and np.array_equal(self.criteria, other.criteria)
and np.array_equal(self.alternatives, other.alternatives)
and np.array_equal(self.objectives, other.objectives)
and np.allclose(
self.weights,
other.weights,
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
)
and np.allclose(
self.matrix,
other.matrix,
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
)
)
# repr ====================================================================
def _get_cow_headers(self):
"""Columns names with COW (Criteria, Objective, Weight)."""
headers = []
fmt_weights = pd_fmt.format_array(self.weights, None)
for c, o, w in zip(self.criteria, self.objectives, fmt_weights):
header = f"{c}[{o.to_string()}{w}]"
headers.append(header)
return headers
def _get_axc_dimensions(self):
"""Dimension foote with AxC (Alternativs x Criteria)."""
a_number, c_number = self.shape
dimensions = f"{a_number} Alternatives x {c_number} Criteria"
return dimensions
def __repr__(self):
"""dm.__repr__() <==> repr(dm)."""
header = self._get_cow_headers()
dimensions = self._get_axc_dimensions()
max_rows = pd.get_option("display.max_rows")
min_rows = pd.get_option("display.min_rows")
max_cols = pd.get_option("display.max_columns")
max_colwidth = | pd.get_option("display.max_colwidth") | pandas.get_option |
"""Tools for generating and forecasting with ensembles of models."""
import datetime
import numpy as np
import pandas as pd
import json
from autots.models.base import PredictionObject
from autots.models.model_list import no_shared
from autots.tools.impute import fill_median
horizontal_aliases = ['horizontal', 'probabilistic']
def summarize_series(df):
"""Summarize time series data. For now just df.describe()."""
df_sum = df.describe(percentiles=[0.1, 0.25, 0.5, 0.75, 0.9])
return df_sum
def mosaic_or_horizontal(all_series: dict):
"""Take a mosaic or horizontal model and return series or models.
Args:
all_series (dict): dict of series: model (or list of models)
"""
first_value = all_series[next(iter(all_series))]
if isinstance(first_value, dict):
return "mosaic"
else:
return "horizontal"
def parse_horizontal(all_series: dict, model_id: str = None, series_id: str = None):
"""Take a mosaic or horizontal model and return series or models.
Args:
all_series (dict): dict of series: model (or list of models)
model_id (str): name of model to find series for
series_id (str): name of series to find models for
Returns:
list
"""
if model_id is None and series_id is None:
raise ValueError(
"either series_id or model_id must be specified in parse_horizontal."
)
if mosaic_or_horizontal(all_series) == 'mosaic':
if model_id is not None:
return [ser for ser, mod in all_series.items() if model_id in mod.values()]
else:
return list(set(all_series[series_id].values()))
else:
if model_id is not None:
return [ser for ser, mod in all_series.items() if mod == model_id]
else:
# list(set([mod for ser, mod in all_series.items() if ser == series_id]))
return [all_series[series_id]]
def BestNEnsemble(
ensemble_params,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime: dict,
prediction_interval: float = 0.9,
):
"""Generate mean forecast for ensemble of models.
Args:
ensemble_params (dict): BestN ensemble param dict
should have "model_weights": {model_id: weight} where 1 is default weight per model
forecasts (dict): {forecast_id: forecast dataframe} for all models
same for lower_forecasts, upper_forecasts
forecast_runtime (dict): dictionary of {forecast_id: timedelta of runtime}
prediction_interval (float): metadata on interval
"""
startTime = datetime.datetime.now()
forecast_keys = list(forecasts.keys())
model_weights = dict(ensemble_params.get("model_weights", {}))
ensemble_params['model_weights'] = model_weights
ensemble_params['models'] = {
k: v
for k, v in dict(ensemble_params.get('models')).items()
if k in forecast_keys
}
model_count = len(forecast_keys)
if model_count < 1:
raise ValueError("BestN failed, no component models available.")
sample_df = next(iter(forecasts.values()))
columnz = sample_df.columns
indices = sample_df.index
model_divisor = 0
ens_df = pd.DataFrame(0, index=indices, columns=columnz)
ens_df_lower = pd.DataFrame(0, index=indices, columns=columnz)
ens_df_upper = pd.DataFrame(0, index=indices, columns=columnz)
for idx, x in forecasts.items():
current_weight = float(model_weights.get(idx, 1))
ens_df = ens_df + (x * current_weight)
# also .get(idx, 0)
ens_df_lower = ens_df_lower + (lower_forecasts[idx] * current_weight)
ens_df_upper = ens_df_upper + (upper_forecasts[idx] * current_weight)
model_divisor = model_divisor + current_weight
ens_df = ens_df / model_divisor
ens_df_lower = ens_df_lower / model_divisor
ens_df_upper = ens_df_upper / model_divisor
ens_runtime = datetime.timedelta(0)
for x in forecasts_runtime.values():
ens_runtime = ens_runtime + x
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(ens_df.index),
forecast_index=ens_df.index,
forecast_columns=ens_df.columns,
lower_forecast=ens_df_lower,
forecast=ens_df,
upper_forecast=ens_df_upper,
prediction_interval=prediction_interval,
predict_runtime=datetime.datetime.now() - startTime,
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def DistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
):
"""Generate forecast for distance ensemble."""
# handle that the inputs are now dictionaries
forecasts = list(forecasts.values())
lower_forecasts = list(lower_forecasts.values())
upper_forecasts = list(upper_forecasts.values())
forecasts_runtime = list(forecasts_runtime.values())
first_model_index = forecasts_list.index(ensemble_params['FirstModel'])
second_model_index = forecasts_list.index(ensemble_params['SecondModel'])
forecast_length = forecasts[0].shape[0]
dis_frac = ensemble_params['dis_frac']
first_bit = int(np.ceil(forecast_length * dis_frac))
second_bit = int(np.floor(forecast_length * (1 - dis_frac)))
ens_df = (
forecasts[first_model_index]
.head(first_bit)
.append(forecasts[second_model_index].tail(second_bit))
)
ens_df_lower = (
lower_forecasts[first_model_index]
.head(first_bit)
.append(lower_forecasts[second_model_index].tail(second_bit))
)
ens_df_upper = (
upper_forecasts[first_model_index]
.head(first_bit)
.append(upper_forecasts[second_model_index].tail(second_bit))
)
id_list = list(ensemble_params['models'].keys())
model_indexes = [idx for idx, x in enumerate(forecasts_list) if x in id_list]
ens_runtime = datetime.timedelta(0)
for idx, x in enumerate(forecasts_runtime):
if idx in model_indexes:
ens_runtime = ens_runtime + forecasts_runtime[idx]
ens_result_obj = PredictionObject(
model_name="Ensemble",
forecast_length=len(ens_df.index),
forecast_index=ens_df.index,
forecast_columns=ens_df.columns,
lower_forecast=ens_df_lower,
forecast=ens_df,
upper_forecast=ens_df_upper,
prediction_interval=prediction_interval,
predict_runtime=datetime.timedelta(0),
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result_obj
def horizontal_classifier(df_train, known: dict, method: str = "whatever"):
"""
CLassify unknown series with the appropriate model for horizontal ensembling.
Args:
df_train (pandas.DataFrame): historical data about the series. Columns = series_ids.
known (dict): dict of series_id: classifier outcome including some but not all series in df_train.
Returns:
dict.
"""
# known = {'EXUSEU': 'xx1', 'MCOILWTICO': 'xx2', 'CSUSHPISA': 'xx3'}
columnz = df_train.columns.tolist()
X = summarize_series(df_train).transpose()
X = fill_median(X)
known_l = list(known.keys())
unknown = list(set(columnz) - set(known_l))
Xt = X.loc[known_l]
Xf = X.loc[unknown]
Y = np.array(list(known.values()))
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(Xt, Y)
result = clf.predict(Xf)
result_d = dict(zip(Xf.index.tolist(), result))
# since this only has estimates, overwrite with known that includes more
final = {**result_d, **known}
# temp = pd.DataFrame({'series': list(final.keys()), 'model': list(final.values())})
# temp2 = temp.merge(X, left_on='series', right_index=True)
return final
def mosaic_classifier(df_train, known):
"""CLassify unknown series with the appropriate model for mosaic ensembles."""
known.index.name = "forecast_period"
upload = pd.melt(
known,
var_name="series_id",
value_name="model_id",
ignore_index=False,
).reset_index(drop=False)
upload['forecast_period'] = upload['forecast_period'].astype(int)
missing_cols = df_train.columns[
~df_train.columns.isin(upload['series_id'].unique())
]
if not missing_cols.empty:
forecast_p = np.arange(upload['forecast_period'].max() + 1)
p_full = np.tile(forecast_p, len(missing_cols))
missing_rows = pd.DataFrame(
{
'forecast_period': p_full,
'series_id': np.repeat(missing_cols.values, len(forecast_p)),
'model_id': np.nan,
},
index=None if len(p_full) > 1 else [0],
)
upload = | pd.concat([upload, missing_rows]) | pandas.concat |
import numpy as np
import pandas as pd
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.metrics import f1_score, roc_curve, auc, precision_recall_curve, \
precision_recall_fscore_support, average_precision_score
import os
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 22})
class Meter():
def __init__(self, props, gaps, model_types):
self.props = props
self.gaps = gaps
self.model_types = model_types
self.meter = {}
for prop in self.props:
self.meter[prop] = {}
for gap in self.gaps:
self.meter[prop][gap] = {}
for model_type in self.model_types:
self.meter[prop][gap][model_type] = []
# if prop == 'ael_bulk_modulus_vrh':
# self.meter[prop][gap][model_type] = []
# prop, gap, model_type
def update(self, prop, gap, model_type, output):
self.meter[prop][gap][model_type] = output
print(prop, gap, model_type)
def metrics(self):
precision = {}
recall = {}
fscore = {}
roc_auc = {}
pr_auc = {}
roc_points = {}
pr_points = {}
for prop in self.props:
precision[prop] = {}
recall[prop] = {}
fscore[prop] = {}
roc_auc[prop] = {}
pr_auc[prop] = {}
roc_points[prop] = {}
pr_points[prop] = {}
for gap in self.gaps:
precision[prop][gap] = {}
recall[prop][gap] = {}
fscore[prop][gap] = {}
roc_auc[prop][gap] = {}
pr_auc[prop][gap] = {}
roc_points[prop][gap] = {}
pr_points[prop][gap] = {}
for model_type in self.model_types:
if model_type == 'ridge_density' and prop != 'ael_bulk_modulus_vrh':
continue
output = self.meter[prop][gap][model_type]
# get roc_auc metrics
fpr, tpr, _ = roc_curve(output[2], output[3])
precisions, recalls, _ = precision_recall_curve(output[2],
output[3])
roc_points[prop][gap][model_type] = [fpr, tpr]
pr_points[prop][gap][model_type] = [recalls, precisions]
y_pred_labeled = [1 if x >= output[0] else
0 for x in output[3]]
prfs = precision_recall_fscore_support(output[2],
y_pred_labeled)
precision[prop][gap][model_type] = prfs[0]
recall[prop][gap][model_type] = prfs[1]
fscore[prop][gap][model_type] = prfs[2]
roc_auc[prop][gap][model_type] = auc(fpr, tpr)
pr_auc[prop][gap][model_type] = average_precision_score(
output[2], output[3])
self.precision = precision
self.recall = recall
self.fscore = fscore
self.roc_auc = roc_auc
self.pr_auc = pr_auc
self.roc_points = roc_points
self.pr_points = pr_points
def plot_curve(self, curve='roc', folder='figures'):
for prop in self.props:
for gap in self.gaps:
plt.figure(figsize=(7, 7))
for model_type in self.model_types:
if model_type == 'ridge_density' and prop != 'ael_bulk_modulus_vrh':
continue
if curve == 'roc':
x, y = self.roc_points[prop][gap][model_type]
xlabel = 'false positive rate'
ylabel = 'true positive rate'
metric = self.roc_auc[prop][gap]
elif curve == 'pr':
x, y = self.pr_points[prop][gap][model_type]
xlabel = 'recall'
ylabel = 'precision'
metric = self.pr_auc[prop][gap]
else:
print('wronge curve type')
break
plt.plot(x, y, label=model_type)
save_dir = folder+'/' + prop + '/' + str(gap) + '/'
os.makedirs(save_dir, exist_ok=True)
fig_name = save_dir + curve + '_curve.png'
plt.legend()
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.tick_params(direction='in', top=True, right=True)
plt.savefig(fig_name, dpi=300)
print('Area under curve:', metric)
def save(self, folder='figures'):
for prop in self.props:
for gap in self.gaps:
save_dir = folder+'/' + prop + '/' + str(gap) + '/'
os.makedirs(save_dir, exist_ok=True)
columns = ['precision',
'recall',
'fscore',
'roc_auc',
'pr_auc']
df_metrics = | pd.DataFrame(columns=columns) | pandas.DataFrame |
import itertools
import numpy
import os
import random
import re
import scipy.spatial.distance as ssd
import scipy.stats
from scipy.cluster.hierarchy import dendrogram, linkage
import pandas
from matplotlib import colors
from matplotlib import pyplot as plt
import vectors
from libs import tsne
rubensteinGoodenoughData = None
def rubensteinGoodenough(wordIndexMap, embeddings):
global rubensteinGoodenoughData
if rubensteinGoodenoughData is None:
rubensteinGoodenoughData = []
rubensteinGoodenoughFilePath = 'res/RG/EN-RG-65.txt'
with open(rubensteinGoodenoughFilePath) as rgFile:
lines = rgFile.readlines()
for line in lines:
word0, word1, targetScore = tuple(line.strip().split('\t'))
targetScore = float(targetScore)
rubensteinGoodenoughData.append((word0, word1, targetScore))
scores = []
targetScores = []
for word0, word1, targetScore in rubensteinGoodenoughData:
if word0 in wordIndexMap and word1 in wordIndexMap:
targetScores.append(targetScore)
word0Index = wordIndexMap[word0]
word1Index = wordIndexMap[word1]
word0Embedding = embeddings[word0Index]
word1Embedding = embeddings[word1Index]
score = vectors.cosineSimilarity(word0Embedding, word1Embedding)
scores.append(score)
if len(scores) == 0:
return numpy.nan
pearson, pearsonDeviation = scipy.stats.pearsonr(scores, targetScores)
spearman, spearmanDeviation = scipy.stats.spearmanr(scores, targetScores)
rubensteinGoodenoughMetric = numpy.mean([pearson, spearman])
return rubensteinGoodenoughMetric
wordSimilarity353Data = None
def wordSimilarity353(wordIndexMap, embeddings):
global wordSimilarity353Data
if wordSimilarity353Data is None:
wordSimilarity353Data = []
wordSimilarity353FilePath = 'res/WordSimilarity-353/combined.csv'
data = pandas.read_csv(wordSimilarity353FilePath)
for word0, word1, score in zip(data['Word1'], data['Word2'], data['Score']):
wordSimilarity353Data.append((word0, word1, score))
scores = []
targetScores = []
for word0, word1, targetScore in wordSimilarity353Data:
if word0 in wordIndexMap and word1 in wordIndexMap:
targetScores.append(targetScore)
word0Index = wordIndexMap[word0]
word1Index = wordIndexMap[word1]
word0Embedding = embeddings[word0Index]
word1Embedding = embeddings[word1Index]
score = vectors.cosineSimilarity(word0Embedding, word1Embedding)
scores.append(score)
if len(scores) == 0:
return numpy.nan
pearson, pearsonDeviation = scipy.stats.pearsonr(scores, targetScores)
spearman, spearmanDeviation = scipy.stats.spearmanr(scores, targetScores)
metric = numpy.mean([pearson, spearman])
return metric
simLex999Data = None
def simLex999(wordIndexMap, embeddings):
global simLex999Data
if simLex999Data is None:
simLex999Data = []
simLex999FilePath = 'res/SimLex-999/SimLex-999.txt'
data = pandas.read_csv(simLex999FilePath, sep='\t')
for word0, word1, targetScore in zip(data['word1'], data['word2'], data['SimLex999']):
simLex999Data.append((word0, word1, targetScore))
targetScores = []
scores = []
for word0, word1, targetScore in simLex999Data:
if word0 in wordIndexMap and word1 in wordIndexMap:
targetScores.append(targetScore)
word0Index = wordIndexMap[word0]
word1Index = wordIndexMap[word1]
word0Embedding = embeddings[word0Index]
word1Embedding = embeddings[word1Index]
score = vectors.cosineSimilarity(word0Embedding, word1Embedding)
scores.append(score)
if len(scores) == 0:
return numpy.nan
pearson, pearsonDeviation = scipy.stats.pearsonr(scores, targetScores)
spearman, spearmanDeviation = scipy.stats.spearmanr(scores, targetScores)
simLex999Metric = numpy.mean([pearson, spearman])
return simLex999Metric
syntacticWordData = None
def syntacticWordRelations(wordIndexMap, embeddings, maxWords=10):
global syntacticWordData
if syntacticWordData is None:
syntacticWordData = []
syntWordRelFilePath = 'res/Syntactic-Word-Relations/questions-words.txt'
with open(syntWordRelFilePath, 'r') as swrFile:
lines = swrFile.readlines()
syntacticWordData = [tuple(line.lower().split(' ')) for line in lines if not line.startswith(':')]
syntacticWordData = [(word0.strip(), word1.strip(), word2.strip(), word3.strip()) for word0, word1, word2, word3 in syntacticWordData]
scores = []
for word0, word1, word2, word3 in syntacticWordData:
if word0 not in wordIndexMap or word1 not in wordIndexMap or word2 not in wordIndexMap or word3 not in wordIndexMap:
continue
word0Index = wordIndexMap[word0]
word1Index = wordIndexMap[word1]
word2Index = wordIndexMap[word2]
word3Index = wordIndexMap[word3]
word0Embedding = embeddings[word0Index]
word1Embedding = embeddings[word1Index]
word2Embedding = embeddings[word2Index]
word3Embedding = embeddings[word3Index]
similarity01 = vectors.cosineSimilarity(word0Embedding, word1Embedding)
similarity23 = vectors.cosineSimilarity(word2Embedding, word3Embedding)
score = 1
minSimilarityDelta = abs(similarity01 - similarity23)
for embedding in embeddings[:maxWords]:
similarity2N = vectors.cosineSimilarity(word2Embedding, embedding)
similarityDelta = abs(similarity01 - similarity2N)
score = not (similarityDelta < minSimilarityDelta)
if not score:
break
scores.append(score)
if len(scores) == 0:
return numpy.nan
syntacticWordRelationsMetric = float(sum(scores)) / len(scores)
return syntacticWordRelationsMetric
satQuestionsData = None
def satQuestions(wordIndexMap, embeddings):
global satQuestionsData
if satQuestionsData is None:
satQuestionsData = []
satQuestionsFilePath = 'res/SAT-Questions/SAT-package-V3.txt'
maxLineLength = 50
aCode = ord('a')
with open(satQuestionsFilePath) as satFile:
line = satFile.readline()
while line != '':
if len(line) < maxLineLength:
match = re.match('(?P<word0>[\w-]+)\s(?P<word1>[\w-]+)\s[nvar]:[nvar]', line)
if match:
stemWord0, stemWord1 = match.group('word0'), match.group('word1')
satQuestion = [stemWord0, stemWord1]
line = satFile.readline()
match = re.match('(?P<word0>[\w-]+)\s(?P<word1>[\w-]+)\s[nvar]:[nvar]', line)
while match:
choiceWord0, choiceWord1 = match.group('word0'), match.group('word1')
satQuestion.append(choiceWord0)
satQuestion.append(choiceWord1)
line = satFile.readline()
match = re.match('(?P<word0>[\w-]+)\s(?P<word1>[\w-]+)\s[nvar]:[nvar]', line)
correctChoiceIndex = ord(line.strip()) - aCode
satQuestion.append(correctChoiceIndex)
satQuestionsData.append(satQuestion)
line = satFile.readline()
scores = []
for satQuestion in satQuestionsData:
if any([word not in wordIndexMap for word in satQuestion[:-1]]):
continue
stemWord0, stemWord1 = satQuestion[:2]
stemWord0Index = wordIndexMap[stemWord0]
stemWord1Index = wordIndexMap[stemWord1]
stemWord0Embedding, stemWord1Embedding = embeddings[stemWord0Index], embeddings[stemWord1Index]
stemSimilarity = vectors.cosineSimilarity(stemWord0Embedding, stemWord1Embedding)
correctChoiceIndex = satQuestion[-1]
choiceSimilarityDeltas = []
choices = satQuestion[2:-1]
for i in xrange(0, len(choices), 2):
choiceWord0, choiceWord1 = choices[i], choices[i+1]
choiceWord0Index, choiceWord1Index = wordIndexMap[choiceWord0], wordIndexMap[choiceWord1]
choiceWord0Embedding, choiceWord1Embedding = embeddings[choiceWord0Index], embeddings[choiceWord1Index]
choiceSimilarity = vectors.cosineSimilarity(choiceWord0Embedding, choiceWord1Embedding)
choiceSimilarityDelta = abs(stemSimilarity - choiceSimilarity)
choiceSimilarityDeltas.append(choiceSimilarityDelta)
choiceIndex = numpy.argmin(choiceSimilarityDeltas)
scores.append(int(choiceIndex == correctChoiceIndex))
if len(scores) == 0:
return numpy.nan
metric = float(sum(scores)) / len(scores)
return metric
def validate(wordIndexMap, embeddings):
rg = rubensteinGoodenough(wordIndexMap, embeddings)
sim353 = wordSimilarity353(wordIndexMap, embeddings)
sl999 = simLex999(wordIndexMap, embeddings)
syntRel = syntacticWordRelations(wordIndexMap, embeddings)
sat = satQuestions(wordIndexMap, embeddings)
return rg, sim353, sl999, syntRel, sat
def dump(metricsPath, epoch, customMetrics):
metrics = {
'epoch': epoch
}
for name, value in customMetrics.items():
metrics[name] = value
metrics = [metrics]
if os.path.exists(metricsPath):
with open(metricsPath, 'a') as metricsFile:
metricsHistory = | pandas.DataFrame.from_dict(metrics) | pandas.DataFrame.from_dict |
from datetime import datetime, timedelta
import pandas as pd
import argparse
# My home instition(s)
_home_insts = ['Argonne National Laboratory', 'University of Chicago']
# Read in the command-line options
parser = argparse.ArgumentParser()
parser.add_argument('--date', help='Date of proposal submission in MM-DD-YYYY', type=str, default=None)
parser.add_argument('--format', help='Format of the output', type=str, default='latex',
choices=['latex', 'bes', 'bes-plus-advisors', 'paragraph', 'nsf'])
parser.add_argument('--years', help='Years of collaborators to print', default=4, type=int)
parser.add_argument('--remove-home', action='store_true', help='Whether to remove people from home institution.')
args = parser.parse_args()
# Get the date of the proposal
if args.date is None:
# Assume the next 14 days
date = datetime.now() + timedelta(days=14)
else:
date = datetime.strptime(args.date, '%m-%d-%Y')
print(f'Getting collaborators {args.years} years before {date.date()}')
# Read in collaborator list
collabs = | pd.read_excel('collaborators.xlsx', sheet_name='Coauthors') | pandas.read_excel |
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import matplotlib.dates as dates
import numpy as np
import pandas as pd
from datetime import datetime, timedelta, time
import calendar
import seaborn as sns
from hypnospy import Wearable
from hypnospy import Experiment
import warnings
class Viewer(object):
"""
Class used for plotting sleep, activity and HR signals from the Wearable.data df.
"""
def __init__(self, input: {Wearable, Experiment}):
if input is None:
raise ValueError("Invalid value for input.")
elif type(input) is Wearable:
self.wearables = [input]
elif type(input) is Experiment:
self.wearables = input.get_all_wearables()
sns.set_context("talk", font_scale=1.3, rc={"axes.linewidth": 2, 'image.cmap': 'plasma', })
plt.rcParams['font.size'] = 18
plt.rcParams['image.cmap'] = 'plasma'
plt.rcParams['axes.linewidth'] = 2
plt.rc('font', family='serif')
@staticmethod
def __get_details(alphas, colors, edgecolors, labels, part, index,
default_alpha=1.0, default_color="black", default_edgecolor=None, default_label="label"):
alpha, color, edgecolor, label = default_alpha, default_color, default_edgecolor, default_label
if alphas is not None and part in alphas:
alpha = alphas[part]
if isinstance(alpha, list):
alpha = alpha[index]
if colors is not None and part in colors:
color = colors[part]
if isinstance(color, list):
color = color[index]
if edgecolors is not None and part in edgecolors:
edgecolor = edgecolors[part]
if isinstance(edgecolor, list):
edgecolor = edgecolor[index]
if labels is not None and part in labels:
label = labels[part]
if isinstance(label, list):
label = label[index]
return alpha, color, edgecolor, label
@staticmethod
def get_day_label(df):
s = ""
startdate = df.index[0]
enddate = df.index[-1]
if startdate.day == enddate.day:
s = "%d - %s\n %s" % (
startdate.day, calendar.month_name[startdate.month][:3], calendar.day_name[startdate.dayofweek])
else:
if startdate.month == enddate.month:
s = "%d/%d - %s\n %s/%s" % (
startdate.day, enddate.day, calendar.month_name[startdate.month][:3],
calendar.day_name[startdate.dayofweek][:3], calendar.day_name[enddate.dayofweek][:3])
else:
s = "%d - %s/%d - %s\n %s/%s" % (
startdate.day, calendar.month_name[startdate.month][:3], enddate.day,
calendar.month_name[enddate.month][:3],
calendar.day_name[startdate.dayofweek][:3], calendar.day_name[enddate.dayofweek][:3])
return s
def view_signals(self, signal_categories: list = ["activity", "hr", "pa_intensity", "sleep"],
other_signals: list = [], signal_as_area: list = [], resample_to: str = None,
sleep_cols: list = [], select_days: list = None, zoom: list = ["00:00:00", "23:59:59"],
alphas: dict = None, colors: dict = None, edgecolors: dict = None, labels: dict = None,
text: list = []
):
# Many days, one day per panel
for wearable in self.wearables:
Viewer.view_signals_wearable(wearable, signal_categories, other_signals, signal_as_area, resample_to,
sleep_cols, select_days, zoom, alphas, colors, edgecolors, labels, text)
@staticmethod
def view_signals_wearable(wearable: Wearable, signal_categories: list, other_signals: list, signal_as_area: list,
resample_to: str, sleep_cols: list, select_days: list, zoom: list,
alphas: dict = None, colors: dict = None, edgecolors: dict = None, labels: dict = None,
text: list = []):
# Convert zoom to datatime object:
assert len(zoom) == 2
zoom_start = datetime.strptime(zoom[0], '%H:%M:%S')
zoom_end = datetime.strptime(zoom[1], '%H:%M:%S')
textstr = 'day: validation id \n'
cols = []
for signal in signal_categories:
if signal == "activity":
cols.append(wearable.get_activity_col())
elif signal == "hr":
if wearable.get_hr_col():
cols.append(wearable.get_hr_col())
else:
raise KeyError("HR is not available for PID %s" % wearable.get_pid())
elif signal == "pa_intensity":
if hasattr(wearable, 'pa_cutoffs') and hasattr(wearable, 'pa_names'):
for pa in wearable.pa_names:
if pa in wearable.data.keys():
cols.append(pa)
else:
raise ValueError("PA Intensity levels not available for PID %s" % (wearable.get_pid()))
elif signal == "sleep":
for sleep_col in sleep_cols:
if sleep_col not in wearable.data.keys():
raise ValueError("Could not find sleep_col (%s). Aborting." % sleep_col)
cols.append(sleep_col)
elif signal == "diary" and wearable.diary_onset in wearable.data.keys() and \
wearable.diary_offset in wearable.data.keys():
cols.append(wearable.diary_onset)
cols.append(wearable.diary_offset)
else:
cols.append(signal)
if len(cols) == 0:
raise ValueError("Aborting: Empty list of signals to show.")
if wearable.data.empty:
warnings.warn("Aborting: Dataframe for PID %s is empty." % wearable.get_pid())
return
cols.append(wearable.time_col)
for col in set(other_signals + signal_as_area):
cols.append(col)
if "validation" in text:
df_plot = wearable.data[cols + ['hyp_invalid'] ].set_index(wearable.time_col)
else:
df_plot = wearable.data[cols].set_index(wearable.time_col)
if resample_to is not None:
df_plot = df_plot.resample(resample_to).mean()
# Add column for experiment day. It will be resampled using the the mean
cols.append(wearable.experiment_day_col)
changed_experiment_hour = False
if not Viewer.__is_default_zoom(zoom_start, zoom_end) and zoom_start.hour != wearable.hour_start_experiment:
changed_experiment_hour = True
saved_start_hour = wearable.hour_start_experiment
wearable.change_start_hour_for_experiment_day(zoom_start.hour)
if resample_to is not None:
df_plot[wearable.experiment_day_col] = wearable.data[
[wearable.time_col, wearable.experiment_day_col]].set_index(wearable.time_col).resample(resample_to).median()
else:
df_plot[wearable.experiment_day_col] = wearable.data[
[wearable.time_col, wearable.experiment_day_col]].set_index(wearable.time_col)[wearable.experiment_day_col]
if changed_experiment_hour:
wearable.change_start_hour_for_experiment_day(saved_start_hour)
# Daily version
# dfs_per_day = [pd.DataFrame(group[1]) for group in df_plot.groupby(df_plot.index.day)]
# Based on the experiment day gives us the correct chronological order of the days
if select_days is not None:
df_plot = df_plot[df_plot[wearable.experiment_day_col].isin(select_days)]
if df_plot.empty:
raise ValueError("Invalid day selection: no remaining data to show.")
dfs_per_group = [ | pd.DataFrame(group[1]) | pandas.DataFrame |
'''
Module : Stats
Description : Statistical calculations for Hatch
Copyright : (c) <NAME>, 16 Oct 2019-2021
License : MIT
Maintainer : <EMAIL>
Portability : POSIX
'''
import argparse
import logging
import pandas as pd
import numpy as np
from itertools import combinations
import math
import scipy
from hatch.command_base import CommandBase
import hatch.utils as utils
import hatch.constants as const
class IsNorm(CommandBase, name="isnorm"):
description = "Test whether numerical features differ from a normal distribution."
category = "transformation"
def __init__(self):
self.options = None
def parse_args(self, args):
parser = argparse.ArgumentParser(usage=f'{self.name} -h | {self.name} <arguments>', add_help=True)
parser.add_argument(
'-c', '--columns', metavar='FEATURE', nargs="*", type=str, required=False,
help=f'Select only these columns (columns)')
self.options = parser.parse_args(args)
def run(self, df):
options = self.options
selected_df = df
if options.columns is not None:
utils.validate_columns_error(df, options.columns)
selected_df = df[options.columns]
# select only the numeric columns
selected_df = selected_df.select_dtypes(include=np.number)
selected_columns = selected_df.columns
out_columns = []
out_stats = []
out_p_values = []
# process each column in turn, computing normaltest
# we do each column separately so that we can handle NAs independently in each column
for column in selected_columns:
this_column = df[column]
this_notna = this_column.dropna()
k2, p_value = scipy.stats.normaltest(this_notna)
out_columns.append(column)
out_stats.append(k2)
out_p_values.append(p_value)
result_df = pd.DataFrame({'column': out_columns, 'statistic': out_stats, 'p_value': out_p_values})
return result_df
class Correlation(CommandBase, name="corr"):
description = "Pairwise correlation between numerical columns."
category = "transformation"
def __init__(self):
self.options = None
def parse_args(self, args):
parser = argparse.ArgumentParser(usage=f'{self.name} -h | {self.name} <arguments>', add_help=True)
parser.add_argument(
'-c', '--columns', metavar='FEATURE', nargs="*", type=str, required=False,
help=f'Select only these columns (columns)')
parser.add_argument('--method', required=False, default=const.DEFAULT_CORR_METHOD, choices=const.ALLOWED_CORR_METHODS,
help=f'Method for determining correlation. Allowed values: %(choices)s. Default: %(default)s.')
self.options = parser.parse_args(args)
def run(self, df):
options = self.options
if options.columns is not None:
utils.validate_columns_error(df, options.columns)
df = df[options.columns]
corr_df_wide = df.corr(method=options.method).reset_index()
corr_df_long = | pd.melt(corr_df_wide, id_vars='index') | pandas.melt |
"""
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
@author: <NAME>
"""
import argparse
import os
import configparser as CP
from jinja2 import Template
import pandas as pd
import create_input as ca
from azure.storage.blob import BlockBlobService, PageBlobService, AppendBlobService
from azure.storage.file import FileService
import asyncio
class ClipsInAzureStorageAccount(object):
def __init__(self, config, alg):
self._account_name = os.path.basename(config['StorageUrl']).split('.')[0]
if '.file.core.windows.net' in config['StorageUrl']:
self._account_type = 'FileStore'
elif '.blob.core.windows.net' in config['StorageUrl']:
self._account_type = 'BlobStore'
self._account_key = config['StorageAccountKey']
self._container = config['Container']
self._alg = alg
self._clips_path = config['Path'].lstrip('/')
self._clip_names = []
self._modified_clip_names = []
self._SAS_token = ''
@property
def container(self):
return self._container
@property
def alg(self):
return self._alg
@property
def clips_path(self):
return self._clips_path
@property
async def clip_names(self):
if len(self._clip_names) <= 0:
await self.get_clips()
return self._clip_names
@property
def store_service(self):
if self._account_type == 'FileStore':
return FileService(account_name = self._account_name, account_key = self._account_key)
elif self._account_type == 'BlobStore':
return BlockBlobService(account_name=self._account_name, account_key=self._account_key)
@property
def modified_clip_names(self):
self._modified_clip_names = [os.path.basename(clip) for clip in self._clip_names]
return self._modified_clip_names
async def traverse_down_filestore(self, dirname):
files = self.store_service.list_directories_and_files(self.container, os.path.join(self.clips_path, dirname))
await self.retrieve_contents(files, dirname)
async def retrieve_contents(self, list_generator, dirname=''):
for e in list_generator:
if '.wav' in e.name:
if not dirname:
self._clip_names.append(e.name)
else:
self._clip_names.append(posixpath.join(dirname.lstrip('/'), e.name))
else:
await self.traverse_down_filestore(e.name)
async def get_clips(self):
if self._account_type == 'FileStore':
files = self.store_service.list_directories_and_files(self.container, self.clips_path)
if not self._SAS_token:
self._SAS_token = self.store_service.generate_share_shared_access_signature(self.container, permission='read', expiry=datetime.datetime(2019, 10, 30, 12, 30), start=datetime.datetime.now())
await self.retrieve_contents(files)
elif self._account_type == 'BlobStore':
blobs = self.store_service.list_blobs(self.container, self.clips_path)
await self.retrieve_contents(blobs)
def make_clip_url(self, filename):
if self._account_type == 'FileStore':
source_url = self.store_service.make_file_url(self.container, self.clips_path, filename, sas_token=self._SAS_token)
elif self._account_type == 'BlobStore':
source_url = self.store_service.make_blob_url(self.container, filename)
return source_url
class GoldSamplesInStore(ClipsInAzureStorageAccount):
def __init__(self, config, alg):
super().__init__(config, alg)
self._SAS_token = ''
async def get_dataframe(self):
clips = await self.clip_names
df = pd.DataFrame(columns=['gold_clips', 'gold_clips_ans'])
clipsList = []
for clip in clips:
clipUrl = self.make_clip_url(clip)
rating = 5
if 'noisy' in clipUrl.lower():
rating = 1
clipsList.append({'gold_clips':clipUrl, 'gold_clips_ans':rating})
df = df.append(clipsList)
return df
class TrappingSamplesInStore(ClipsInAzureStorageAccount):
async def get_dataframe(self):
clips = await self.clip_names
df = pd.DataFrame(columns=['trapping_clips', 'trapping_ans'])
clipsList = []
for clip in clips:
clipUrl = self.make_clip_url(clip)
rating = 0
if '_bad_' in clip.lower():
rating = 1
elif '_poor_' in clip.lower():
rating = 2
elif '_fair_' in clip.lower():
rating = 3
elif '_good_' in clip.lower():
rating = 4
elif '_excellent_' in clip.lower():
rating = 5
clipsList.append({'trapping_clips':clipUrl, 'trapping_ans':rating})
df = df.append(clipsList)
return df
class PairComparisonSamplesInStore(ClipsInAzureStorageAccount):
async def get_dataframe(self):
clips = await self.clip_names
pair_a_clips = [self.make_clip_url(clip) for clip in clips if '40S_' in clip]
pair_b_clips = [clip.replace('40S_', '50S_') for clip in pair_a_clips]
df = pd.DataFrame({'pair_a':pair_a_clips, 'pair_b':pair_b_clips})
return df
def create_analyzer_cfg_acr(cfg, template_path, out_path):
"""
create cfg file to be used by analyzer script (acr method)
:param cfg:
:param template_path:
:param out_path:
:return:
"""
print("Start creating config file for result_parser")
config = {}
config['q_num'] = int(cfg['create_input']['number_of_clips_per_session']) + \
int(cfg['create_input']['number_of_trapping_per_session']) + \
int(cfg['create_input']['number_of_gold_clips_per_session'])
config['max_allowed_hits'] = cfg['acr_html']['allowed_max_hit_in_project']
config['quantity_hits_more_than'] = cfg['acr_html']['quantity_hits_more_than']
config['quantity_bonus'] = cfg['acr_html']['quantity_bonus']
config['quality_top_percentage'] = cfg['acr_html']['quality_top_percentage']
config['quality_bonus'] = cfg['acr_html']['quality_bonus']
with open(template_path, 'r') as file:
content = file.read()
file.seek(0)
t = Template(content)
cfg_file = t.render(cfg=config)
with open(out_path, 'w') as file:
file.write(cfg_file)
file.close()
print(f" [{out_path}] is created")
def create_analyzer_cfg_dcr_ccr(cfg, template_path, out_path):
"""
create cfg file to be used by analyzer script (ccr/dcr method)
:param cfg:
:param template_path:
:param out_path:
:return:
"""
print("Start creating config file for result_parser")
config = {}
config['q_num'] = int(cfg['create_input']['number_of_clips_per_session']) + \
int(cfg['create_input']['number_of_trapping_per_session'])
config['max_allowed_hits'] = cfg['dcr_ccr_html']['allowed_max_hit_in_project']
config['quantity_hits_more_than'] = cfg['dcr_ccr_html']['quantity_hits_more_than']
config['quantity_bonus'] = cfg['dcr_ccr_html']['quantity_bonus']
config['quality_top_percentage'] = cfg['dcr_ccr_html']['quality_top_percentage']
config['quality_bonus'] = cfg['dcr_ccr_html']['quality_bonus']
with open(template_path, 'r') as file:
content = file.read()
file.seek(0)
t = Template(content)
cfg_file = t.render(cfg=config)
with open(out_path, 'w') as file:
file.write(cfg_file)
file.close()
print(f" [{out_path}] is created")
async def create_hit_app_ccr_dcr(cfg, template_path, out_path, training_path, cfg_g):
"""
Create the hit_app (html file) corresponding to this project for ccr and dcr
:param cfg:
:param template_path:
:param out_path:
:return:
"""
print("Start creating custom hit_app (html)")
config = {}
config['cookie_name'] = cfg['cookie_name']
config['qual_cookie_name'] = cfg['qual_cookie_name']
config['allowed_max_hit_in_project'] = cfg['allowed_max_hit_in_project']
config['hit_base_payment'] = cfg['hit_base_payment']
config['quantity_hits_more_than'] = cfg['quantity_hits_more_than']
config['quantity_bonus'] = cfg['quantity_bonus']
config['quality_top_percentage'] = cfg['quality_top_percentage']
config['quality_bonus'] = float(cfg['quality_bonus']) + float(cfg['quantity_bonus'])
config['sum_quantity'] = float(cfg['quantity_bonus']) + float(cfg['hit_base_payment'])
config['sum_quality'] = config['quality_bonus'] + float(cfg['hit_base_payment'])
# rating urls
rating_urls = []
n_clips = int(cfg_g['number_of_clips_per_session'])
n_traps = int(cfg_g['number_of_trapping_per_session'])
for i in range(0, n_clips):
rating_urls.append({"ref": f"${{Q{i}_R}}", "processed": f"${{Q{i}_P}}"})
if n_traps > 1:
print("more than 1 trapping clips question is not supported. Proceed with 1 trap")
rating_urls.append({"ref": "${TP}", "processed": "${TP}"})
if 'number_of_gold_clips_per_session' in cfg_g:
print("Gold clips are not supported for CCR and DCR method. Proceed without them")
config['rating_urls'] = rating_urls
# training urls
df_train = pd.read_csv(training_path)
train_urls = []
train_ref = None
for index, row in df_train.iterrows():
if train_ref is None:
train_ref = row['training_references']
train_urls.append({"ref": f"{row['training_references']}", "processed": f"{row['training_clips']}"})
# add a trapping clips to the training section
train_urls.append({"ref": f"{train_ref}", "processed": f"{train_ref}"})
config['training_urls'] = train_urls
config['training_trap_urls'] = train_ref
with open(template_path, 'r') as file:
content = file.read()
file.seek(0)
t = Template(content)
html = t.render(cfg=config)
with open(out_path, 'w') as file:
file.write(html)
print(f" [{out_path}] is created")
async def create_hit_app_acr(cfg, template_path, out_path, training_path, trap_path, cfg_g, cfg_trapping_store):
"""
Create the ACR.html file corresponding to this project
:param cfg:
:param template_path:
:param out_path:
:return:
"""
print("Start creating custom acr.html")
df_trap = pd.DataFrame()
if trap_path and os.path.exists(trap_path):
df_trap = pd.read_csv(trap_path, nrows=1)
else:
trapclipsstore = TrappingSamplesInStore(cfg_trapping_store, 'TrappingQuestions')
df_trap = await trapclipsstore.get_dataframe()
for index, row in df_trap.iterrows():
trap_url = row['trapping_clips']
trap_ans = row['trapping_ans']
config = {}
config['cookie_name'] = cfg['cookie_name']
config['qual_cookie_name'] = cfg['qual_cookie_name']
config['allowed_max_hit_in_project'] = cfg['allowed_max_hit_in_project']
config['training_trap_urls'] = trap_url
config['training_trap_ans'] = trap_ans
config['hit_base_payment'] = cfg['hit_base_payment']
config['quantity_hits_more_than'] = cfg['quantity_hits_more_than']
config['quantity_bonus'] = cfg['quantity_bonus']
config['quality_top_percentage'] = cfg['quality_top_percentage']
config['quality_bonus'] = float(cfg['quality_bonus']) + float(cfg['quantity_bonus'])
config['sum_quantity'] = float(cfg['quantity_bonus']) + float(cfg['hit_base_payment'])
config['sum_quality'] = config['quality_bonus'] + float(cfg['hit_base_payment'])
df_train = pd.read_csv(training_path)
train = []
for index, row in df_train.iterrows():
train.append(row['training_clips'])
train.append(trap_url)
config['training_urls'] = train
# rating urls
rating_urls = []
n_clips = int(cfg_g['number_of_clips_per_session'])
n_traps = int(cfg_g['number_of_trapping_per_session'])
n_gold_clips = int(cfg_g['number_of_gold_clips_per_session'])
for i in range(0, n_clips ):
rating_urls.append('${Q'+str(i)+'}')
if n_traps > 1:
raise Exception("more than 1 trapping clips question is not supported.")
if n_traps == 1:
rating_urls.append('${TP}')
if n_gold_clips > 1:
raise Exception("more than 1 gold question is not supported.")
if n_gold_clips == 1:
rating_urls.append('${gold_clips}')
config['rating_urls'] = rating_urls
with open(template_path, 'r') as file:
content = file.read()
file.seek(0)
t = Template(content)
html = t.render(cfg=config)
with open(out_path, 'w') as file:
file.write(html)
print(f" [{out_path}] is created")
async def prepare_csv_for_create_input(cfg, test_method, clips, gold, trapping, general):
"""
Merge different input files into one dataframe
:param test_method
:param clips:
:param trainings:
:param gold:
:param trapping:
:param general:
:return:
"""
df_clips = pd.DataFrame()
df_gold = pd.DataFrame()
df_trap = pd.DataFrame()
rating_clips = []
if clips and os.path.exists(clips):
df_clips = pd.read_csv(clips)
else:
rating_clips_stores = cfg.get('RatingClips', 'RatingClipsConfigurations').split(',')
for model in rating_clips_stores:
enhancedClip = ClipsInAzureStorageAccount(cfg[model], model)
eclips = await enhancedClip.clip_names
eclips_urls = [enhancedClip.make_clip_url(clip) for clip in eclips]
print('length of urls for store [{0}] is [{1}]'.format(model, len(await enhancedClip.clip_names)))
rating_clips = rating_clips + eclips_urls
df_clips = pd.DataFrame({'rating_clips':rating_clips})
df_general = pd.read_csv(general)
if test_method == "acr":
if gold and os.path.exists(gold):
df_gold = pd.read_csv(gold)
else:
goldclipsstore = GoldSamplesInStore(cfg['GoldenSample'], 'GoldenSample')
df_gold = await goldclipsstore.get_dataframe()
print('total gold clips from store [{0}]'.format(len(await goldclipsstore.clip_names)))
if trapping and os.path.exists(trapping):
df_trap = pd.read_csv(trapping)
else:
trapclipsstore = TrappingSamplesInStore(cfg['TrappingQuestions'], 'TrappingQuestions')
df_trap = await trapclipsstore.get_dataframe()
print('total trapping clips from store [{0}]'.format(len(await trapclipsstore.clip_names)))
else:
df_gold = None
if not os.path.exists(clips):
testclipsstore = ClipsInAzureStorageAccount(cfg['noisy'], 'noisy')
testclipsurls = [testclipsstore.make_clip_url(clip) for clip in await testclipsstore.clip_names]
print('The total test clips for our study is [{0}]'.format(len(testclipsurls)))
clipdictList = []
for eclip in rating_clips:
for i, c in enumerate(testclipsurls):
if os.path.basename(c) in eclip:
clipdictList.append({'rating_clips':eclip, 'references':testclipsurls[i]})
break
df_clips = pd.DataFrame(clipdictList)
df_trap = df_clips[['references']].copy()
df_trap.rename(columns={'references': 'trapping_clips'}, inplace=True)
result = | pd.concat([df_clips, df_gold, df_trap, df_general], axis=1, sort=False) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# > Note: KNN is a memory-based model, that means it will memorize the patterns and not generalize. It is simple yet powerful technique and compete with SOTA models like BERT4Rec.
# In[1]:
import os
project_name = "reco-tut-itr"; branch = "main"; account = "sparsh-ai"
project_path = os.path.join('/content', project_name)
if not os.path.exists(project_path):
get_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content')
import mykeys
get_ipython().system(u'rm /content/mykeys.py')
path = "/content/" + project_name;
get_ipython().system(u'mkdir "{path}"')
get_ipython().magic(u'cd "{path}"')
import sys; sys.path.append(path)
get_ipython().system(u'git config --global user.email "<EMAIL>"')
get_ipython().system(u'git config --global user.name "reco-tut"')
get_ipython().system(u'git init')
get_ipython().system(u'git remote add origin https://"{mykeys.git_token}":[email protected]/"{account}"/"{project_name}".git')
get_ipython().system(u'git pull origin "{branch}"')
get_ipython().system(u'git checkout main')
else:
get_ipython().magic(u'cd "{project_path}"')
# In[2]:
import os
import numpy as np
import pandas as pd
import scipy.sparse
from scipy.spatial.distance import correlation
# In[13]:
df = pd.read_parquet('./data/silver/rating.parquet.gz')
df.info()
# In[16]:
df2 = pd.read_parquet('./data/silver/items.parquet.gz')
df2.info()
# In[17]:
df = pd.merge(df, df2, on='itemId')
df.info()
# In[5]:
rating_matrix = pd.pivot_table(df, values='rating',
index=['userId'], columns=['itemId'])
rating_matrix
# In[6]:
def similarity(user1, user2):
try:
user1=np.array(user1)-np.nanmean(user1)
user2=np.array(user2)-np.nanmean(user2)
commonItemIds=[i for i in range(len(user1)) if user1[i]>0 and user2[i]>0]
if len(commonItemIds)==0:
return 0
else:
user1=np.array([user1[i] for i in commonItemIds])
user2=np.array([user2[i] for i in commonItemIds])
return correlation(user1,user2)
except ZeroDivisionError:
print("You can't divide by zero!")
# In[31]:
def nearestNeighbourRatings(activeUser, K):
try:
similarityMatrix=pd.DataFrame(index=rating_matrix.index,columns=['Similarity'])
for i in rating_matrix.index:
similarityMatrix.loc[i]=similarity(rating_matrix.loc[activeUser],rating_matrix.loc[i])
similarityMatrix=pd.DataFrame.sort_values(similarityMatrix,['Similarity'],ascending=[0])
nearestNeighbours=similarityMatrix[:K]
neighbourItemRatings=rating_matrix.loc[nearestNeighbours.index]
predictItemRating= | pd.DataFrame(index=rating_matrix.columns, columns=['Rating']) | pandas.DataFrame |
import json
import os
import warnings
import casadi as ca
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
from scipy.signal import chirp
from skmid.integrator import RungeKutta4
from skmid.models import DynamicModel
from skmid.models import generate_model_attributes
@pytest.fixture
def load_non_linear_model_data():
"""Generate input signal"""
CWD = os.getcwd()
DATA_DIR = "data"
SUB_DATA_DIR = "non_linear_model"
U = pd.read_csv(
filepath_or_buffer=os.path.join(CWD, DATA_DIR, SUB_DATA_DIR, "u_data.csv"),
index_col=0,
)
Y = pd.read_csv(
filepath_or_buffer=os.path.join(CWD, DATA_DIR, SUB_DATA_DIR, "y_data.csv"),
index_col=0,
)
# reading the data from the file
with open(
os.path.join(CWD, DATA_DIR, SUB_DATA_DIR, "settings.json"), mode="r"
) as j_object:
settings = json.load(j_object)
return (U, Y, settings)
# test f(x,u) with multiple input, multiple size, list
@pytest.fixture
def generate_input_signal():
"""Generate input signal"""
N = 2000 # Number of samples
fs = 500 # Sampling frequency [hz]
t = np.linspace(0, (N - 1) * (1 / fs), N)
df_input = pd.DataFrame(
data={
"chirp": 2 * chirp(t, f0=1, f1=10, t1=5, method="logarithmic"),
"noise": 2 * np.random.random(N),
},
index=t,
)
return (df_input, fs)
@pytest.fixture
def generate_step_signal():
"""Generate Step input signal"""
N = 1000 # Number of samples
fs = 25 # Sampling frequency [hz]
t = np.linspace(0, (N - 1) * (1 / fs), N)
df_input = pd.DataFrame(index=t).assign(
step=lambda x: np.where(x.index < t[int(N / 4)], 0, 1)
)
return (df_input, fs)
@pytest.fixture
def generate_inpulse_signal():
"""Generate Impulse input signal"""
N = 500 # Number of samples
fs = 50 # Sampling frequency [hz]
t = np.linspace(0, (N - 1) * (1 / fs), N)
df_input = pd.DataFrame(index=t).assign(inpulse=np.zeros(N))
df_input.iloc[0] = 1
return (df_input, fs)
class TestRungeKutta4:
"""Test class for function generate_model_parameters."""
def test_model_with_states(self):
"""Test simulation with model dx=f(x)."""
(x, _, _) = generate_model_attributes(
state_size=1, input_size=0, parameter_size=0
)
# initialize first-order model
tau = 1
sys = DynamicModel(state=x, model_dynamics=[-(1 / tau) * x])
n_steps = 50
x0 = [1]
rk4 = RungeKutta4(model=sys)
_ = rk4.simulate(initial_condition=x0, n_steps=n_steps)
df_X = rk4.state_sim_
df_Y = rk4.output_sim_
# check equality of dataframe
pdt.assert_frame_equal(df_X, df_Y)
# check size of dataframe. Note: +1 is because it includes the initial condition
assert len(df_X) == n_steps + 1
assert len(df_Y) == n_steps + 1
# check frequency is assigned correctly fs=1 default
assert all(np.diff(df_Y.index) == 1)
# check no missing values
assert df_X.notna().all().values
assert df_Y.notna().all().values
# check model consistency: convergence with no bias
assert df_X.iloc[0].values == pytest.approx(x0)
assert df_X.iloc[-1].values == pytest.approx(0)
def test_model_with_states_input(self, generate_step_signal):
"""Test simulation with model dx=f(x,u)."""
(df_input, fs) = generate_step_signal
# initialize first-order model
(x, u, _) = generate_model_attributes(
state_size=1, input_size=1, parameter_size=0
)
tau, kp = 1, 1
sys = DynamicModel(
state=x, input=u, model_dynamics=[-(1 / tau) * x + (kp / tau) * u]
)
rk4 = RungeKutta4(model=sys, fs=fs)
_ = rk4.simulate(initial_condition=[0], input=df_input)
df_X = rk4.state_sim_
df_Y = rk4.output_sim_
# check equality of dataframe
| pdt.assert_frame_equal(df_X, df_Y) | pandas.testing.assert_frame_equal |
import csv
import itertools
import numpy
import numpy as np
import pandas as pd
import sklearn
from matplotlib import pyplot as plt
from pandas import DataFrame
import tsv
import experiments
import utils
from granularity import *
from sklearn.metrics import f1_score, accuracy_score
input_df = | pd.read_csv("data/answer_weather_ordinal.csv", sep=",") | pandas.read_csv |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = | get_upcast_box(left, NaT, True) | pandas.tests.arithmetic.common.get_upcast_box |
"""
A collection of plotting functions to use with pandas, numpy, and pyplot.
Created: 2016-36-28 11:10
"""
import sys
from operator import itemgetter
from itertools import groupby, cycle
import numpy as np
import scipy as sp
import scipy.stats as sts
import pandas as pd
import statsmodels.api as sm
from statsmodels.sandbox.regression.predstd import wls_prediction_std
import matplotlib as mpl
from matplotlib import colors
from matplotlib import gridspec
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
import networkx as nx
from adjustText import adjust_text
import matplotlib_venn
# Get rid of pandas future warnings
import warnings as _warn
_warn.simplefilter(action='ignore', category=FutureWarning)
_warn.simplefilter(action='ignore', category=UserWarning)
_warn.simplefilter(action='ignore', category=RuntimeWarning)
###############################################################################
# Regression #
###############################################################################
class LinearRegression(object):
"""
Use statsmodels, scipy, and numpy to do a linear regression.
Regression includes confidence intervals and prediction intervals.
Attributes
----------
{x,y}
Original x, y data.
X : pandas.core.frame.DataFrame
Original X with a column of ones added as a constant (unless x already
had a constant column).
x_pred : numpy.ndarray
Predicted evently spaced data. From
``numpy.linspace(x.min(), x.max(), len(x))``
x_pred_c : numpy.ndarry
Original x_pred with a column of ones added as a constant
y_pred :numpy.ndarray
Predicted y values from the model
model : statsmodels.regression.linear_model.OLS
fitted : statsmodels.regression.linear_model.RegressionResultsWrapper
Result of model.fit()
summary : statsmodels.iolib.summary.Summary
Summary data for regression
reg_line : numpy.ndarray
Statsmodels fitted values as an ndarray for plotting
p : float
P value for regression in x
rsquared : float
The r-squared for the regression
conf : numpy.ndarray
Confidence interval array for x values.
ci_{upper,lower} : numpy.ndarray
Upper and lower confidence interval arrays. 95% chance real regression
line in this interval
ci_t : float
T statistic used for the confidence interval. 95% chance real y value
is in this interval
pred_{upper,lower} : numpy.ndarray
Upper and lower prediction
{y_hat,y_err,s_err,sdev} : numpy.ndarray
"""
def __init__(self, x, y):
"""
Do the regression.
Params
------
{x,y} : numpy.ndarray or pandas.core.series.Series
X and Y data
log : bool
Do the regression in log10 space instead
"""
self.x = x
self.y = y
self.n = len(x)
self.X = sm.add_constant(x)
# Do regression
self.model = sm.OLS(self.y, self.X)
self.fitted = self.model.fit()
self.reg_line = self.fitted.fittedvalues
self.reg_summary = self.fitted.summary()
# Get predicted data
self.x_pred = np.linspace(x.min(), x.max(), self.n)
self.x_pred_c = sm.add_constant(self.x_pred)
self.y_pred = self.fitted.predict(self.x_pred_c)
# Calculate confidence intervals
self.y_hat = self.fitted.predict(self.X)
self.y_err = y - self.y_hat
mean_x = self.x.T[1].mean()
dof = self.n - self.fitted.df_model - 1
self.ci_t = sts.t.ppf(1-0.025, df=dof)
self.s_err = np.sum(np.power(self.y_err, 2))
self.conf = (
self.ci_t * np.sqrt(
(
self.s_err/(self.n-2)
)*(
1.0/self.n + (
np.power(
(self.x_pred-mean_x), 2
)/(
(np.sum(np.power(self.x_pred,2))) - self.n*(np.power(mean_x,2))
)
)
)
)
)
self.ci_upper = self.y_pred + abs(self.conf)
self.ci_lower = self.y_pred - abs(self.conf)
# Get prediction intervals
self.sdev, self.pred_lower, self.pred_upper = wls_prediction_std(
self.fitted, exog=self.x_pred_c, alpha=0.05
)
# Assign stats
self.rsquared = self.fitted.rsquared
self.P = self.fitted.pvalues.tolist()[0]
def plot_reg_line(self, ax, alpha=0.7, zorder=12, color=None,
include_label=True, unlog=False):
"""Plot the regression line."""
color = color if color else 'darkorchid'
x_pred = 10**self.x_pred if unlog else self.x_pred
y_pred = 10**self.y_pred if unlog else self.y_pred
label = self.legend_text() if include_label else None
ax.plot(
x_pred, y_pred, '-', color=color, linewidth=2,
label=label, alpha=alpha, zorder=zorder
)
def plot_ci_line(self, ax, alpha=0.3, zorder=10, color=None, unlog=False):
"""Plot the confidence interval lines."""
color = color if color else sns.xkcd_rgb['rust']
x_pred = 10**self.x_pred if unlog else self.x_pred
ci_upper = 10**self.ci_upper if unlog else self.ci_upper
ci_lower = 10**self.ci_lower if unlog else self.ci_lower
ax.fill_between(
x_pred, ci_lower, ci_upper, color=color,
alpha=alpha, zorder=zorder
)
def plot_pred_line(self, ax, alpha=0.1, zorder=5, color=None, unlog=False):
"""Plot the confidence interval lines."""
color = color if color else sns.xkcd_rgb['light green']
x_pred = 10**self.x_pred if unlog else self.x_pred
pred_upper = 10**self.pred_upper if unlog else self.pred_upper
pred_lower = 10**self.pred_lower if unlog else self.pred_lower
ax.fill_between(
x_pred, pred_lower, pred_upper, color=color,
interpolate=True, alpha=alpha, zorder=zorder
)
def print_reg_summary(self):
"""Print the regression summary."""
print(self.fitted.summary())
def legend_text(self):
"""Print summary stats."""
return 'OLS: {:.3} +/- {:.2}\n P: {:.2e}\n $R^2$: {:.2}'.format(
self.fitted.params.tolist()[0], self.fitted.bse.tolist()[0],
self.P, self.rsquared
)
def __str__(self):
"""Return summary stats."""
return 'OLS: {:.3} +/- {:.2}\nP: {:.2e}\nR-squared: {:.2}'.format(
self.fitted.params.tolist()[0], self.fitted.bse.tolist()[0],
self.P, self.rsquared
)
def __repr__(self):
"""Print repr for statsmodels."""
return 'LinearRegression({0}, R2: {1:.2e}, P: {2:.2e})'.format(
repr(self.model), self.rsquared, self.P
)
###############################################################################
# Basic Scatter Plots #
###############################################################################
def distcomp(y, x=None, bins=500, kind='qq', style=None, ylabel=None,
xlabel=None, title=None, fig=None, ax=None, size=10):
"""Compare two vectors of different length by creating equal bins.
If kind is qq, the plot is a simple Quantile-Quantile plot, if it is pp,
then the plot is a cumulative probability plot.
There are three different formats:
simple is a single scatter plot of either the QQ or cumulative dist
joint includes the simple scatter plot, but also plots the contributing
univatiate distributions on the axes
column includes the scatter plot on the left and adds two independent
histograms in two plots in a second column on the right.
We also add the Mann-Whitney U P-value for the *original distributions*,
pre-binning.
Cumulative probability is calculated like this:
Uses the min(x,y) and max(x,y) (+/- 1%) to set the limit of the bins,
and then divides both x and y into an equal number of bins between
those two limits, ensuring that the bin starts and ends are identical
for both distributions. Bins are labelled by the center value of the
bin.
Bins are then converted into frequencies, such that the sum of all
frequencies is 1. These frequencies are then converted to a cumulative
frequency, so that the final bin will always have a value of one.
Parameters
----------
y (actual|y-axis) : Series
Series of actual data, will go on y-axis
x (theoretical|x-axis) : Series or {'normal', 'uniform', 'pvalue'}, optional
Series of theoretical data, will go on x-axis, can also be one of
'normal', 'uniform', or 'pvalue', to use a random distribution of the
same length as the y data.
normal: will use a normal distribution anchored at the mean of y,
with a scope of the standard deviation of y.
uniform: will use a uniform distribution min(y) >= dist <= max(y)
pvalue: will use a uniform distribution between 0 and 1
Defaults to 'pvalue' if kind='qq_log' else 'normal'
bins : int, optional
Number of bins to use for plotting
kind : {'qq', 'qq_log', 'pp', 'cum', 'lin_pp'}, optional
qq: Plot a Q-Q plot
qq_log: Plot a Q-Q plot of -log10(pvalues)
pp|cum: Plot a cumulative probability plot
lin_pp: Plot a probability plot where bins are evenly spaced
style : str, optional
simple: Plot a simple scatter plot
joint: Plot a scatter plot with univariate dists on each axis.
column: Plot a scatter plot with univariate histograms, separately
calculated, on the side.
{y/x}label : str, optional
Optional label for y/x axis. y=Actual, x=Theretical
title : str, optional
Optional title for the whole plot
size : int, optional
A size to use for the figure, square is forced.
Returns
-------
fig, ax
Figure and axes always returned, if joint is True, axes
object will be a seaborn axgrid.
"""
if kind not in ['qq', 'qq_log', 'cum', 'pp', 'lin_pp']:
raise ValueError('kind must be one of qq, qq_log, pp, cum, or lin_pp')
if not style:
if kind is 'qq' or kind is 'qq_log':
style = 'simple'
else:
style = 'joint'
if x is None:
if kind is 'qq_log':
x = 'pvalue'
else:
x = 'normal'
if style not in ['simple', 'joint', 'column', 'scatter']:
raise ValueError('style must be one of simple, joint, or colummn')
# Convert to old names
theoretical = x
actual = y
kind = 'cum' if kind == 'pp' else kind
style = 'simple' if style == 'scatter' else style
if isinstance(theoretical, str):
if theoretical == 'normal':
mean = np.mean(actual)
std = np.std(actual)
theoretical = np.random.normal(
loc=mean, scale=std, size=len(actual)
)
elif theoretical == 'uniform' or theoretical == 'random':
theoretical = np.random.uniform(
np.min(actual), np.max(actual), len(actual)
)
elif theoretical == 'pvalue' or theoretical == 'p':
theoretical = np.random.random_sample(len(actual))
else:
raise ValueError('Invalid theoretical')
if kind == 'qq_log':
actual = -np.log10(actual)
theoretical = -np.log10(theoretical)
kind = 'qq'
reg_pp = True # If false, do a pp plot that is evenly spaced in the hist.
if kind is 'lin_pp':
kind = 'cum'
reg_pp = False
# Choose central plot type
if kind == 'qq':
cum = False
if not title:
title = 'QQ Plot'
# We use percentiles, so get evenly spaced percentiles from 0% to 100%
q = np.linspace(0, 100, bins+1)
xhist = np.percentile(theoretical, q)
yhist = np.percentile(actual, q)
elif kind == 'cum':
cum = True
# Create bins from sorted data
theoretical_sort = sorted(theoretical)
# Bins with approximately equal numbers of points
if reg_pp:
boundaries = uniform_bins(theoretical_sort, bins)
if not title:
title = 'Cumulative Probability Plot'
# Bins with equal ranges
else:
mx = max(np.max(x), np.max(y))
mx += mx*0.01
mn = min(np.min(x), np.min(y))
mn -= mx*0.01
boundaries = np.linspace(mn, mx, bins+1, endpoint=True)
if not title:
title = 'Linear Spaced Cumulative Probability Plot'
labels = [
(boundaries[i]+boundaries[i+1])/2 for i in range(len(boundaries)-1)
]
# Bin two series into equal bins
xb = pd.cut(x, bins=boundaries, labels=labels)
yb = pd.cut(y, bins=boundaries, labels=labels)
# Get value counts for each bin and sort by bin
xhist = xb.value_counts().sort_index(ascending=True)/len(xb)
yhist = yb.value_counts().sort_index(ascending=True)/len(yb)
# Make cumulative
for ser in [xhist, yhist]:
ttl = 0
for idx, val in ser.iteritems():
ttl += val
ser.loc[idx] = ttl
# Set labels
if not xlabel:
if hasattr(x, 'name'):
xlabel = x.name
else:
xlabel = 'Theoretical'
if not ylabel:
if hasattr(y, 'name'):
ylabel = y.name
else:
ylabel = 'Actual'
# Create figure layout
if fig or ax:
fig, ax == _get_fig_ax(fig, ax)
style = 'simple'
elif style == 'simple':
fig, ax = plt.subplots(figsize=(size,size))
elif style == 'joint':
# Create a jointgrid
sns.set_style('darkgrid')
gs = gridspec.GridSpec(2, 2, width_ratios=[5, 1], height_ratios=[1, 5])
fig = plt.figure(figsize=(size,size))
ax = plt.subplot(gs[1, 0])
axt = plt.subplot(gs[0, 0], sharex=ax, yticks=[])
axr = plt.subplot(gs[1, 1], sharey=ax, xticks=[])
# Plot side plots
axt.hist(xhist, bins=bins, cumulative=cum)
axr.hist(yhist, bins=bins, cumulative=cum, orientation='horizontal')
elif style == 'column':
# Create a two column grid
fig = plt.figure(figsize=(size*2,size))
ax = plt.subplot2grid((2,2), (0,0), rowspan=2)
ax2 = plt.subplot2grid((2,2), (0,1))
ax3 = plt.subplot2grid((2,2), (1,1), sharex=ax2, sharey=ax2)
# Plot extra plots - these ones are traditional histograms
sns.distplot(actual, ax=ax2, bins=bins)
sns.distplot(theoretical, ax=ax3, bins=bins)
ax2.set_title(ylabel)
ax3.set_title(xlabel)
for a in [ax2, ax3]:
a.set_frame_on(True)
a.set_xlabel = ''
a.axes.get_yaxis().set_visible(True)
a.yaxis.tick_right()
a.yaxis.set_label_position('right')
a.yaxis.set_label('count')
# Plot the scatter plot
ax.scatter(xhist, yhist, label='')
ax.set_xlabel(xlabel, fontsize=20)
ax.set_ylabel(ylabel, fontsize=20)
# Make the plot a square
emin = min(np.min(xhist), np.min(yhist))
emax = max(np.max(xhist), np.max(yhist))
t2b = abs(emax-emin)
scale = t2b*0.01
emin -= scale
emax += scale
lim = (emin, emax)
ax.set_xlim(lim)
ax.set_ylim(lim)
# Plot a 1-1 line in the background
ax.plot(lim, lim, '-', color='0.75', alpha=0.9, zorder=0.9)
# Add Mann-Whitney U p-value
mwu = sts.mannwhitneyu(actual, theoretical)
chi = sts.chisquare(yhist, xhist)
handles, _ = ax.get_legend_handles_labels()
prc, prp = sts.pearsonr(xhist, yhist)
if round(prc, 4) > 0.998:
plbl = 'pearsonr = {:.2f}; p = {:.2f}'
else:
plbl = 'pearsonr = {:.2}; p = {:.2e}'
handles.append(
mpatches.Patch(
color='none', label=plbl.format(prc, prp)
)
)
if kind == 'qq' or reg_pp:
if chi.pvalue < 0.001:
cpl = 'chisq = {:.2}; p = {:2e}'
else:
cpl = 'chisq = {:.2}; p = {:.2f}'
handles.append(
mpatches.Patch(
color='none', label=cpl.format(chi.statistic, chi.pvalue)
)
)
if mwu.pvalue < 0.001:
mwl = 'mannwhitneyu = {:.2}; p = {:.2e}'
else:
mwl = 'mannwhitneyu = {:.2}; p = {:.5f}'
handles.append(
mpatches.Patch(
color='none', label=mwl.format(mwu.statistic, mwu.pvalue)
)
)
ax.legend(handles=handles, loc=0)
fig.tight_layout()
if title:
if style == 'simple':
ax.set_title(title, fontsize=14)
else:
fig.suptitle(title, fontsize=16)
if style == 'joint':
fig.subplots_adjust(top=0.95)
elif style != 'simple':
fig.subplots_adjust(top=0.93)
# Last thing is to set the xtick labels for cummulative plots
if kind == 'cum':
labels = [round(i, 2) for i in labels]
fig.canvas.draw()
xlabels = ax.get_xticklabels()
ylabels = ax.get_yticklabels()
for i, tlabels in enumerate([xlabels, ylabels]):
for tlabel in tlabels:
pos = tlabel.get_position()
if round(pos[i], 2) < 0.0 or round(pos[i], 2) > 1.0:
tlabel.set_text('')
continue
if round(pos[i], 2) == 0.00:
txt = labels[0]
elif round(pos[i], 2) == 1.00:
# The last label is the end of the bin, not the start
txt = round(theoretical_sort[-1], 2)
else:
txt = labels[int(round(len(labels)*pos[i]))]
tlabel.set_text(str(txt))
ax.set_xticklabels(xlabels)
ax.set_yticklabels(ylabels)
if style == 'joint':
ax = (ax, axt, axr)
elif style == 'column':
ax = (ax, ax2, ax3)
return fig, ax
def scatter(x, y, df=None, xlabel=None, ylabel=None, title=None, pval=None,
density=True, log_scale=False, handle_nan=True, regression=True,
fill_reg=True, reg_details=False, labels=None, label_lim=10,
shift_labels=False, highlight=None, highlight_label=None,
legend='best', add_text=None, scale_factor=0.05, size=10,
cmap=None, cmap_midpoint=0.5, lock_axes=True, fig=None, ax=None):
"""Create a simple 1:1 scatter plot plus regression line.
Always adds a 1-1 line in grey and a regression line in green.
Can color the points by density if density is true (otherwise they are
always blue), can also do regular or negative log scaling.
Regression is done using the statsmodels OLS regression with a constant
added to the X values using sm.add_constant() to add a column of ones to
the array of x values. If a constant is already present none is added.
Parameters
----------
x : Series or str if df provided
X values
y : Series or str if df provided
Y values
df : DataFrame, optional
If provided, x and y must be strings that are column names
{x,y}label : str, optional
A label for the axes, defaults column value if df provided
title : str, optional
Name of the plot
pval : float, optional
Draw a line at this point*point count
density : bool or str, optional
Color points by density, 'kde' uses a cool kde method, but is too slow
on large datasets
log_scale : str, optional
Plot in log scale, can also be 'negative' for negative log scale.
handle_nan : bool, optional
When converting data to log, drop nan and inf values
regression : bool, optional
Do a regression
fill_reg : bool, optional
Add confidence lines to regression line
reg_details : bool, optional
Print regression summary
labels : Series, optional
Labels to show, must match indices of plotted data
label_lim : int, optional
Only show top # labels on each side of the line
shift_labels: bool, optional
If True, try to spread labels out. Imperfect.
highlight_label : Series, optional
Boolean series of same len as x/y
legend : str, optional
The location to place the legend
add_text : str, optional
Text to add to the legend
scale_factor : float, optional
A ratio to expand the axes by.
size : int or tuple, optional
Size of figure, defaults to square unless (x, y) length tuple given
cmap : str or cmap, optional
A cmap to use for density, defaults to a custom blue->red, light->dark
cmap
cmap_midpoint : float 0 <= n <= 1, optional
A midpoint for the cmap, 0.5 means no change
emphasizes density
lock_axes : bool, optional
Make X and Y axes the same length
fig/ax : matplotlib objects, optional
Use these instead
Returns
-------
fig : plt.figure
ax : plt.axes
reg : plots.LinearRegression or None
Statsmodel OLS regression results wrapped in a LinearRegression
object. If no regression requested, returns None
"""
f, a = _get_fig_ax(fig, ax, size=size)
# a.grid(False)
if isinstance(df, pd.DataFrame):
assert isinstance(x, str)
assert isinstance(y, str)
if log_scale:
df = df[(df[x] > 0) & (df[y] > 0)]
if not xlabel:
xlabel = x
x = df[x]
if not ylabel:
ylabel = y
y = df[y]
elif df is not None:
raise ValueError('df must be a DataFrame or None')
if not xlabel and hasattr(x, 'name'):
xlabel = x.name
if not ylabel and hasattr(y, 'name'):
ylabel = y.name
if not xlabel:
xlabel = 'X'
if not ylabel:
ylabel = 'Y'
if hasattr(x, 'astype'):
x = x.astype(np.float)
else:
x = np.float(x)
if hasattr(y, 'astype'):
y = y.astype(np.float)
else:
y = np.float(y)
# Get a color iterator
c = iter(sns.color_palette())
# Set up log scaling if necessary
if log_scale:
if log_scale == 'reverse' or log_scale == 'n' or log_scale == 'r':
log_scale = 'negative'
if log_scale == '-':
log_scale = 'negative'
lx = np.log10(x)
ly = np.log10(y)
if handle_nan:
tdf = | pd.DataFrame([x, y, lx, ly]) | pandas.DataFrame |
from geopandas import GeoDataFrame
import pandas as pd
import numpy as np
import geopandas as gp
OLR= gp.read_file('Roadways_gridV6.shp')
OLR1=pd.DataFrame(OLR)
def label_race (row):
if row['cat'] == 'trunk' :
tcc=(row.length/263.778046)*0.38
return tcc
elif row['cat'] == 'primary' :
pcc=(row.length/355.954679)*0.28
return pcc
elif row['cat'] == 'secondary' :
pcc=(row.length/368.126161)*0.15
return pcc
elif row['cat'] == 'tertiary' :
pcc=(row.length/2180.144677)*0.13
return pcc
elif row['cat'] == 'residential' :
pcc=(row.length/1334.340583)*0.06
return pcc
else:
pcc=0
return pcc
OLR1['lpc']=OLR1.apply (lambda row: label_race (row),axis=1)
df4=OLR1.drop_duplicates(subset='gid', take_last=True)
gidL=df4.gid.tolist()
OLR2=OLR1.groupby('gid')
aa=[]
for gL in gidL:
dL=OLR2.get_group(gL)
dL1=dL.lpc.sum()
pm25=dL1*918115910.4
pm10=dL1*3842589254.4
an=(gL,pm25,pm10)
aa.append(an)
sdb1= | pd.DataFrame(aa) | pandas.DataFrame |
###############################################################################
# Omid55
# Start date: 16 Jan 2020
# Modified date: 14 Apr 2020
# Author: <NAME>
# Email: <EMAIL>
# Module to load group dynamics logs for every team.
###############################################################################
from __future__ import division, print_function, absolute_import, unicode_literals
import glob
import json
import numpy as np
import pandas as pd
from collections import defaultdict
from enum import Enum, unique
from os.path import expanduser
from typing import Dict
from typing import List
from typing import Text
from typing import Tuple
import utils
@unique
class AttachmentType(Enum):
TO_INITIAL = 1
TO_PREVIOUS = 2
class TeamLogsLoader(object):
"""Processes the logs of one team who have finished the dyad group dynamics
Usage:
loader = TeamLogsLoader(
directory='/home/omid/Datasets/Jeopardy')
Properties:
team_id: The id of the existing team in this object.
"""
taskid2taskname = {
52: 'GD_solo_asbestos_initial',
53: 'GD_group_asbestos1',
57: 'GD_influence_asbestos1',
61: 'GD_group_asbestos2',
62: 'GD_solo_asbestos1',
63: 'GD_influence_asbestos2',
67: 'GD_solo_asbestos2',
68: 'GD_group_asbestos3',
69: 'GD_influence_asbestos3',
70: 'GD_solo_asbestos3',
71: 'GD_solo_disaster_initial',
72: 'GD_group_disaster1',
73: 'GD_solo_disaster1',
74: 'GD_influence_disaster1',
75: 'GD_solo_disaster3',
76: 'GD_solo_disaster2',
77: 'GD_influence_disaster3',
78: 'GD_influence_disaster2',
79: 'GD_group_disaster2',
80: 'GD_group_disaster3',
81: 'GD_frustration_asbestos',
82: 'GD_frustration_disaster',
83: 'GD_solo_sports_initial',
84: 'GD_solo_sports1',
85: 'GD_solo_sports2',
86: 'GD_solo_sports3',
87: 'GD_group_sports2',
88: 'GD_group_sports3',
89: 'GD_group_sports1',
90: 'GD_influence_sports3',
91: 'GD_influence_sports2',
92: 'GD_influence_sports1',
93: 'GD_frustration_sports',
94: 'GD_solo_school_initial',
95: 'GD_solo_school1',
96: 'GD_solo_school2',
97: 'GD_solo_school3',
98: 'GD_group_school3',
99: 'GD_group_school2',
100: 'GD_group_school1',
101: 'GD_frustration_school',
102: 'GD_influence_school3',
103: 'GD_influence_school2',
104: 'GD_influence_school1',
105: 'GD_solo_surgery_initial',
106: 'GD_solo_surgery1',
107: 'GD_solo_surgery2',
108: 'GD_solo_surgery3',
109: 'GD_group_surgery1',
110: 'GD_group_surgery2',
111: 'GD_group_surgery3',
112: 'GD_influence_surgery1',
113: 'GD_influence_surgery2',
114: 'GD_influence_surgery3',
115: 'GD_frustration_surgery'}
def __init__(self, directory: Text):
self.messages = None
self.answers = None
self.influences = None
self.frustrations = None
self._load(directory=directory)
def _load(self, directory: Text):
"""Loads all logs for one team in the given directory.
"""
logs_filepath = '{}/EventLog_*.csv'.format(directory)
logs = pd.read_csv(glob.glob(expanduser(logs_filepath))[0])
logs = logs.sort_values(by='Timestamp')
task_orders_filepath = '{}/CompletedTask_*.csv'.format(directory)
task_orders = pd.read_csv(
glob.glob(expanduser(task_orders_filepath))[0])
completed_taskid2taskname = {}
for _, row in task_orders.iterrows():
completed_taskid2taskname[row.Id] = TeamLogsLoader.taskid2taskname[
row.TaskId]
answers_dt = []
messages_dt = []
influences_dt = []
frustrations_dt = []
for _, row in logs.iterrows():
content_file_id = row.EventContent[9:]
question_name = completed_taskid2taskname[row.CompletedTaskId]
sender = row.Sender
timestamp = row.Timestamp
event_type = row.EventType
json_file_path = '{}/EventLog/{}_*.json'.format(
directory, content_file_id)
json_file = glob.glob(expanduser(json_file_path))
if len(json_file) != 1:
print(
'WARNING1: json file for id: {} was not found'
' in the EventLog folder.\n'.format(
content_file_id))
else:
with open(json_file[0], 'r') as f:
content = json.load(f)
if 'type' in content and content['type'] == 'JOINED':
continue
if event_type == 'TASK_ATTRIBUTE':
input_str = ''
if question_name[:7] == 'GD_solo':
if content['attributeName'] == 'surveyAnswer0':
input_str = 'answer'
elif content['attributeName'] == 'surveyAnswer1':
input_str = 'confidence'
else:
print('WARNING2: attribute name was unexpected.'
' It was {}, question was {} '
'and content was {}\n'.format(
content['attributeName'],
question_name,
content))
if question_name.endswith('_initial'):
question_name = question_name.split(
'_initial')[0] + '0'
answers_dt.append(
[sender, question_name, input_str,
content['attributeStringValue'], timestamp])
elif question_name[:12] == 'GD_influence':
if content['attributeName'] == 'surveyAnswer1':
input_str = 'self'
elif content['attributeName'] == 'surveyAnswer2':
input_str = 'other'
else:
print('WARNING3: attribute name was unexpected.'
' It was {}\n'.format(content['attributeName']))
influences_dt.append(
[sender, question_name, input_str,
content['attributeStringValue'], timestamp])
elif question_name[:14] == 'GD_frustration':
frustrations_dt.append(
[sender, question_name,
content['attributeStringValue'], timestamp])
else:
print('WARNING4: There was an unexpected '
'question: {}\n'.format(question_name))
elif event_type == 'COMMUNICATION_MESSAGE':
if len(content['message']) > 0:
messages_dt.append(
[sender, question_name,
content['message'], timestamp])
else:
print('WARNING5: There was an unexpected'
' EventType: {}\n'.format(event_type))
self.answers = pd.DataFrame(answers_dt, columns = [
'sender', 'question', 'input', 'value', 'timestamp'])
self.influences = pd.DataFrame(influences_dt, columns = [
'sender', 'question', 'input', 'value', 'timestamp'])
self.frustrations = pd.DataFrame(frustrations_dt, columns = [
'sender', 'question', 'value', 'timestamp'])
self.messages = pd.DataFrame(messages_dt, columns = [
'sender', 'question', 'text', 'timestamp'])
# Sorts all based on timestamp.
self.answers.sort_values(by='timestamp', inplace=True)
self.influences.sort_values(by='timestamp', inplace=True)
self.frustrations.sort_values(by='timestamp', inplace=True)
self.messages.sort_values(by='timestamp', inplace=True)
self.users = np.unique(
self.influences.sender.tolist() +
self.messages.sender.tolist() +
self.answers.sender.tolist())
if self.users.size > 0:
self.team_id = self.users[0].split('.')[0]
def get_answers_in_simple_format(self) -> pd.DataFrame:
"""Gets all answers in a simple format to read them in the easiest way.
"""
# if len(self.answers) == 0:
# raise ValueError('The object has not been initialized.')
users = self.users
questions = np.unique(self.answers.question)
data = []
for question in questions:
dt = [question[len('GD_solo_'):]]
for user in users:
for input in ['answer', 'confidence']:
this_answer = self.answers[
(self.answers.question == question) & (
self.answers.sender == user) & (
self.answers.input == input)]
val = ''
if len(this_answer.value) > 0:
# Because if there might be multiple log entry for the
# same text box, we take the last one.
val = list(this_answer.value)[-1]
dt.append(val)
data.append(dt)
columns = ['Question']
for user in users:
columns += [user + '\'s answer', user + '\'s confidence']
return | pd.DataFrame(data, columns=columns) | pandas.DataFrame |
import gdax
import csv
import datetime as dt
import pandas_datareader.data as web
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.finance import candlestick_ohlc
import matplotlib.dates as mdates
#gets price data
public_client = gdax.PublicClient()
def coin_df_operations(df, coin_name):
#converts unix time to readable time based on the coin dataframe passed to it
df['regular time'] = | pd.to_datetime(df.index,unit='s') | pandas.to_datetime |
# In[]
import sys, os
sys.path.append('../')
sys.path.append('../src/')
import numpy as np
import pandas as pd
from scipy import sparse
import networkx as nx
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
import time
from sklearn.decomposition import PCA
from sklearn.manifold import MDS
import diffusion_dist as diff
import dataset as dataset
import model as model
import loss as loss
import train
import TI as ti
import benchmark as bmk
from umap import UMAP
import utils as utils
import post_align as palign
from scipy.sparse import load_npz
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
plt.rcParams["font.size"] = 20
results_dir = "results_acc_anchor/"
# In[]
seeds = [0, 1, 2]
latent_dims = [4, 8, 32]
reg_ds = [1, 10]
reg_gs = [0.01, 1, 10]
reg_mmds = [1, 10, 20, 30]
latent_dim = latent_dims[eval(sys.argv[1])]
reg_d = reg_ds[eval(sys.argv[2])]
reg_g = reg_gs[eval(sys.argv[3])]
# harder to merge, need to make mmd loss larger
reg_mmd = reg_mmds[eval(sys.argv[4])]
learning_rate = 3e-4
n_epochs = 500
use_anchor = True
ts = [30, 50, 70]
use_potential = True
norm = "l1"
scores = pd.DataFrame(columns = ["dataset", "kendall-tau", "F1-score"])
# for data_name in ["lin_rand1", "lin_rand2", "lin_rand3",
# "lin_new1", "lin_new2", "lin_new3",
# "lin_new4", "lin_new5", "lin_new6",
# "bifur1", "bifur2", "bifur3",
# "bifur4", "bifur5", "bifur6",
# "bifur7", "bifur8", "bifur9",
# "trifur_rand1", "trifur_rand2", "trifur_rand3",
# "trifur_new1","trifur_new2","trifur_new3",
# "trifur_new4","trifur_new5","trifur_new6"
# ]:
# scores = pd.read_csv(results_dir + "scores.csv", index_col = 0)
for data_name in ["lin1", "lin2", "lin3", "lin4", "lin5", "lin6",
"bifur1", "bifur2", "bifur3","bifur4", "bifur5", "bifur6",
"trifur1", "trifur2", "trifur3","trifur4","trifur5","trifur6"]:
if not os.path.exists(results_dir + data_name + "/"):
print("make directory")
os.makedirs(results_dir + data_name + "/")
for seed in seeds:
print("Random seed: " + str(seed))
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
counts_rna = pd.read_csv("../data/simulated/" + data_name + "/GxC1.txt", sep = "\t", header = None).values.T
counts_atac = pd.read_csv("../data/simulated/" + data_name + "/RxC2.txt", sep = "\t", header = None).values.T
label_rna = pd.read_csv("../data/simulated/" + data_name + "/cell_label1.txt", sep = "\t")["pop"].values.squeeze()
label_atac = pd.read_csv("../data/simulated/" + data_name + "/cell_label2.txt", sep = "\t")["pop"].values.squeeze()
pt_rna = pd.read_csv("../data/simulated/" + data_name + "/pseudotime1.txt", header = None).values.squeeze()
pt_atac = pd.read_csv("../data/simulated/" + data_name + "/pseudotime2.txt", header = None).values.squeeze()
# preprocessing
libsize = 100
counts_rna = counts_rna/np.sum(counts_rna, axis = 1)[:, None] * libsize
counts_rna = np.log1p(counts_rna)
rna_dataset = dataset.dataset(counts = counts_rna, anchor = np.argsort(pt_rna)[:10])
atac_dataset = dataset.dataset(counts = counts_atac, anchor = np.argsort(pt_atac)[:10])
coarse_reg = torch.FloatTensor( | pd.read_csv("../data/simulated/" + data_name + "/region2gene.txt", sep = "\t", header = None) | pandas.read_csv |
# Scientific Library
import numpy as np
import pandas as pd
from scipy import stats
from scipy.stats import norm as sp_norm
from scipy.stats.distributions import chi2 as sp_chi2
# Standard Library
from dataclasses import asdict, dataclass, field
from importlib.metadata import version
import importlib.resources as importlib_resources
import logging
from pathlib import Path
import platform
import shutil
from typing import List, Optional, Union
# Third Party
from PyPDF2 import PdfFileReader
from click_help_colors import HelpColorsCommand, HelpColorsGroup
import dill
from joblib import Parallel
from psutil import cpu_count
from rich import box
from rich.console import Console, ConsoleOptions, RenderResult
from rich.table import Table
import toml
# First Party
from metadamage.progressbar import console, progress
#%%
logger = logging.getLogger(__name__)
#%%
@dataclass
class Config:
out_dir: Path
#
max_fits: Optional[int]
max_cores: int
# max_position: Optional[int]
#
min_alignments: int
min_k_sum: int
min_N_at_each_pos: int
#
substitution_bases_forward: str
substitution_bases_reverse: str
#
bayesian: bool
forced: bool
version: str
dask_port: int
#
filename: Optional[Path] = None
shortname: Optional[str] = None
N_filenames: Optional[int] = None
N_fits: Optional[int] = None
N_cores: int = field(init=False)
def __post_init__(self):
self._set_N_cores()
def _set_N_cores(self):
available_cores = cpu_count(logical=True)
if self.max_cores > available_cores:
self.N_cores = available_cores - 1
logger.info(
f"'max_cores' is set to a value larger than the maximum available"
f"so clipping to {self.N_cores} (available-1) cores"
)
elif self.max_cores < 0:
self.N_cores = available_cores - abs(self.max_cores)
logger.info(
f"'max-cores' is set to a negative value"
f"so using {self.N_cores} (available-max_cores) cores"
)
else:
self.N_cores = self.max_cores
def add_filenames(self, filenames):
self.N_filenames = len(filenames)
def add_filename(self, filename):
self.filename = filename
self.shortname = extract_name(filename)
@property
def filename_counts(self):
if self.shortname is None:
raise AssertionError(
"Shortname has to be set before filename_counts is defined: "
"cfg.add_filename(filename) "
)
return self.out_dir / "counts" / f"{self.shortname}.parquet"
@property
def filename_fit_results(self):
if self.shortname is None:
raise AssertionError(
"Shortname has to be set before filename_fit_results is defined: "
"cfg.add_filename(filename) "
)
return self.out_dir / "fit_results" / f"{self.shortname}.parquet"
@property
def filename_fit_predictions(self):
if self.shortname is None:
raise AssertionError(
"Shortname has to be set before filename_fit_predictions is defined: "
"cfg.add_filename(filename) "
)
return self.out_dir / "fit_predictions" / f"{self.shortname}.parquet"
def set_number_of_fits(self, df_counts):
self.N_tax_ids = len(pd.unique(df_counts.tax_id))
if self.max_fits is not None and self.max_fits > 0:
self.N_fits = min(self.max_fits, self.N_tax_ids)
# use all TaxIDs available
else:
self.N_fits = self.N_tax_ids
logger.info(f"Setting number_of_fits to {self.N_fits}")
def to_dict(self):
d_out = asdict(self)
for key, val in d_out.items():
if isinstance(val, Path):
d_out[key] = str(val)
return d_out
# def save_dict(self, dict_name):
# d_out = self.to_dict()
# with open(dict_name, "w") as f:
# toml.dump(d_out, f)
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
yield f""
my_table = Table(title="[b]Configuration:[/b]", box=box.MINIMAL_HEAVY_HEAD)
# my_table = Table(title="Configuration:")
my_table.add_column("Attribute", justify="left", style="cyan")
my_table.add_column("Value", justify="center", style="magenta")
if self.N_filenames:
my_table.add_row("Number of files", str(self.N_filenames))
my_table.add_row("Output directory", str(self.out_dir))
my_table.add_row("Maximum number of fits pr. file", str(self.max_fits))
if self.N_fits:
my_table.add_row("Number of fits pr. file", str(self.N_fits))
my_table.add_row("Maximum number of cores to use", str(self.max_cores))
if self.N_cores:
my_table.add_row("Number of cores to use", str(self.N_cores))
my_table.add_row("Minimum number of alignments", str(self.min_alignments))
my_table.add_row("Minimum k sum", str(self.min_k_sum))
my_table.add_row("Minimum N at each position", str(self.min_N_at_each_pos))
my_table.add_row(
"Substitution bases forward", str(self.substitution_bases_forward)
)
my_table.add_row(
"Substitution bases reverse", str(self.substitution_bases_reverse)
)
my_table.add_row("Bayesian", str(self.bayesian))
my_table.add_row("Forced", str(self.forced))
my_table.add_row("Version", self.version)
if self.filename:
my_table.add_row("Filename", str(self.filename))
my_table.add_row("Shortname", str(self.shortname))
yield my_table
#%%
def find_style_file():
with importlib_resources.path("metadamage", "style.mplstyle") as path:
return path
def is_ipython():
try:
return __IPYTHON__
except NameError:
return False
def extract_name(filename, max_length=60):
shortname = Path(filename).stem.split(".")[0]
if len(shortname) > max_length:
shortname = shortname[:max_length] + "..."
logger.info(f"Running new file: {shortname}")
return shortname
def file_is_valid(filename):
if Path(filename).exists() and Path(filename).stat().st_size > 0:
return True
exists = Path(filename).exists()
valid_size = Path(filename).stat().st_size > 0
logger.error(
f"{filename} is not a valid file. "
f"{exists=} and {valid_size=}. "
f"Skipping for now."
)
return False
def delete_folder(path):
try:
shutil.rmtree(path)
except OSError:
logger.exception(f"Could not delete folder, {path}")
def init_parent_folder(filename):
if isinstance(filename, str):
filename = Path(filename)
filename.parent.mkdir(parents=True, exist_ok=True)
def is_forward(df):
return df["strand"] == "5'"
def get_forward(df):
return df[is_forward(df)]
def get_reverse(df):
return df[~is_forward(df)]
def get_specific_tax_id(df, tax_id):
if tax_id == -1:
tax_id = df.tax_id.iloc[0]
return df.query("tax_id == @tax_id")
def load_dill(filename):
with open(filename, "rb") as file:
return dill.load(file)
def save_dill(filename, x):
init_parent_folder(filename)
with open(filename, "wb") as file:
dill.dump(x, file)
# def save_to_hdf5(filename, key, value):
# with pd.HDFStore(filename, mode="a") as store:
# store.put(key, value, data_columns=True, format="Table")
# def save_metadata_to_hdf5(filename, key, value, metadata):
# with pd.HDFStore(filename, mode="a") as store:
# store.get_storer(key).attrs.metadata = metadata
# def load_from_hdf5(filename, key):
# if isinstance(key, str):
# with pd.HDFStore(filename, mode="r") as store:
# df = store.get(key)
# return df
# elif isinstance(key, (list, tuple)):
# keys = key
# out = []
# with pd.HDFStore(filename, mode="r") as store:
# for key in keys:
# out.append(store.get(key))
# return out
# def load_metadata_from_hdf5(filename, key):
# with pd.HDFStore(filename, mode="r") as store:
# metadata = store.get_storer(key).attrs.metadata
# return metadata
# def get_hdf5_keys(filename, ignore_subgroups=False):
# with pd.HDFStore(filename, mode="r") as store:
# keys = store.keys()
# if ignore_subgroups:
# keys = list(set([key.split("/")[1] for key in keys]))
# return keys
# else:
# raise AssertionError(f"ignore_subgroups=False not implemented yet.")
#%%
def downcast_dataframe(df, categories, fully_automatic=False):
categories = [category for category in categories if category in df.columns]
d_categories = {category: "category" for category in categories}
df2 = df.astype(d_categories)
int_cols = df2.select_dtypes(include=["integer"]).columns
if df2[int_cols].max().max() > np.iinfo("uint32").max:
raise AssertionError("Dataframe contains too large values.")
for col in int_cols:
if fully_automatic:
df2.loc[:, col] = pd.to_numeric(df2[col], downcast="integer")
else:
if col == "position":
df2.loc[:, col] = df2[col].astype("int8")
else:
df2.loc[:, col] = df2[col].astype("uint32")
for col in df2.select_dtypes(include=["float"]).columns:
if fully_automatic:
df2.loc[:, col] = | pd.to_numeric(df2[col], downcast="float") | pandas.to_numeric |
# -*- coding: utf-8 -*-
from datetime import datetime
from pandas.compat import range, lrange
import operator
import pytest
from warnings import catch_warnings
import numpy as np
from pandas import Series, Index, isna, notna
from pandas.core.dtypes.common import is_float_dtype
from pandas.core.dtypes.missing import remove_na_arraylike
from pandas.core.panel import Panel
from pandas.core.panel4d import Panel4D
from pandas.tseries.offsets import BDay
from pandas.util.testing import (assert_frame_equal, assert_series_equal,
assert_almost_equal)
import pandas.util.testing as tm
def add_nans(panel4d):
for l, label in enumerate(panel4d.labels):
panel = panel4d[label]
tm.add_nans(panel)
class SafeForLongAndSparse(object):
def test_repr(self):
repr(self.panel4d)
def test_iter(self):
tm.equalContents(list(self.panel4d), self.panel4d.labels)
def test_count(self):
f = lambda s: notna(s).sum()
self._check_stat_op('count', f, obj=self.panel4d, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
pytest.skip("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel4d
# # set some NAs
# obj.loc[5:10] = np.nan
# obj.loc[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na_arraylike(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
with catch_warnings(record=True):
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
expected = obj.apply(wrapper, axis=i)
tm.assert_panel_equal(result, expected)
else:
skipna_wrapper = alternative
wrapper = alternative
with catch_warnings(record=True):
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
expected = obj.apply(skipna_wrapper, axis=i)
tm.assert_panel_equal(result, expected)
pytest.raises(Exception, f, axis=obj.ndim)
class SafeForSparse(object):
def test_get_axis(self):
assert self.panel4d._get_axis(0) is self.panel4d.labels
assert self.panel4d._get_axis(1) is self.panel4d.items
assert self.panel4d._get_axis(2) is self.panel4d.major_axis
assert self.panel4d._get_axis(3) is self.panel4d.minor_axis
def test_set_axis(self):
with catch_warnings(record=True):
new_labels = Index(np.arange(len(self.panel4d.labels)))
# TODO: unused?
# new_items = Index(np.arange(len(self.panel4d.items)))
new_major = Index(np.arange(len(self.panel4d.major_axis)))
new_minor = Index(np.arange(len(self.panel4d.minor_axis)))
# ensure propagate to potentially prior-cached items too
# TODO: unused?
# label = self.panel4d['l1']
self.panel4d.labels = new_labels
if hasattr(self.panel4d, '_item_cache'):
assert 'l1' not in self.panel4d._item_cache
assert self.panel4d.labels is new_labels
self.panel4d.major_axis = new_major
assert self.panel4d[0].major_axis is new_major
assert self.panel4d.major_axis is new_major
self.panel4d.minor_axis = new_minor
assert self.panel4d[0].minor_axis is new_minor
assert self.panel4d.minor_axis is new_minor
def test_get_axis_number(self):
assert self.panel4d._get_axis_number('labels') == 0
assert self.panel4d._get_axis_number('items') == 1
assert self.panel4d._get_axis_number('major') == 2
assert self.panel4d._get_axis_number('minor') == 3
def test_get_axis_name(self):
assert self.panel4d._get_axis_name(0) == 'labels'
assert self.panel4d._get_axis_name(1) == 'items'
assert self.panel4d._get_axis_name(2) == 'major_axis'
assert self.panel4d._get_axis_name(3) == 'minor_axis'
def test_arith(self):
with catch_warnings(record=True):
self._test_op(self.panel4d, operator.add)
self._test_op(self.panel4d, operator.sub)
self._test_op(self.panel4d, operator.mul)
self._test_op(self.panel4d, operator.truediv)
self._test_op(self.panel4d, operator.floordiv)
self._test_op(self.panel4d, operator.pow)
self._test_op(self.panel4d, lambda x, y: y + x)
self._test_op(self.panel4d, lambda x, y: y - x)
self._test_op(self.panel4d, lambda x, y: y * x)
self._test_op(self.panel4d, lambda x, y: y / x)
self._test_op(self.panel4d, lambda x, y: y ** x)
pytest.raises(Exception, self.panel4d.__add__,
self.panel4d['l1'])
@staticmethod
def _test_op(panel4d, op):
result = op(panel4d, 1)
tm.assert_panel_equal(result['l1'], op(panel4d['l1'], 1))
def test_keys(self):
tm.equalContents(list(self.panel4d.keys()), self.panel4d.labels)
def test_iteritems(self):
"""Test panel4d.iteritems()"""
assert (len(list(self.panel4d.iteritems())) ==
len(self.panel4d.labels))
def test_combinePanel4d(self):
with catch_warnings(record=True):
result = self.panel4d.add(self.panel4d)
tm.assert_panel4d_equal(result, self.panel4d * 2)
def test_neg(self):
with catch_warnings(record=True):
tm.assert_panel4d_equal(-self.panel4d, self.panel4d * -1)
def test_select(self):
with catch_warnings(record=True):
p = self.panel4d
# select labels
result = p.select(lambda x: x in ('l1', 'l3'), axis='labels')
expected = p.reindex(labels=['l1', 'l3'])
tm.assert_panel4d_equal(result, expected)
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
tm.assert_panel4d_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15),
axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
tm.assert_panel4d_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=3)
expected = p.reindex(minor=['A', 'D'])
tm.assert_panel4d_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo',), axis='items')
tm.assert_panel4d_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
def test_abs(self):
with catch_warnings(record=True):
result = self.panel4d.abs()
expected = np.abs(self.panel4d)
tm.assert_panel4d_equal(result, expected)
p = self.panel4d['l1']
result = p.abs()
expected = np.abs(p)
tm.assert_panel_equal(result, expected)
df = p['ItemA']
result = df.abs()
expected = np.abs(df)
assert_frame_equal(result, expected)
class CheckIndexing(object):
def test_getitem(self):
pytest.raises(Exception, self.panel4d.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
with catch_warnings(record=True):
expected = self.panel4d['l2']
result = self.panel4d.pop('l2')
tm.assert_panel_equal(expected, result)
assert 'l2' not in self.panel4d.labels
del self.panel4d['l3']
assert 'l3' not in self.panel4d.labels
pytest.raises(Exception, self.panel4d.__delitem__, 'l3')
values = np.empty((4, 4, 4, 4))
values[0] = 0
values[1] = 1
values[2] = 2
values[3] = 3
panel4d = Panel4D(values, lrange(4), lrange(4),
lrange(4), lrange(4))
# did we delete the right row?
panel4dc = panel4d.copy()
del panel4dc[0]
tm.assert_panel_equal(panel4dc[1], panel4d[1])
tm.assert_panel_equal(panel4dc[2], panel4d[2])
tm.assert_panel_equal(panel4dc[3], panel4d[3])
panel4dc = panel4d.copy()
del panel4dc[1]
tm.assert_panel_equal(panel4dc[0], panel4d[0])
tm.assert_panel_equal(panel4dc[2], panel4d[2])
tm.assert_panel_equal(panel4dc[3], panel4d[3])
panel4dc = panel4d.copy()
del panel4dc[2]
tm.assert_panel_equal(panel4dc[1], panel4d[1])
tm.assert_panel_equal(panel4dc[0], panel4d[0])
tm.assert_panel_equal(panel4dc[3], panel4d[3])
panel4dc = panel4d.copy()
del panel4dc[3]
tm.assert_panel_equal(panel4dc[1], panel4d[1])
tm.assert_panel_equal(panel4dc[2], panel4d[2])
| tm.assert_panel_equal(panel4dc[0], panel4d[0]) | pandas.util.testing.assert_panel_equal |
"""
Goals
------
Program should generate a report (Excel File) that shows
how data quality metrics for each HPO site change over time.
Data quality metrics include:
1. the number of duplicates per table
2. number of 'start dates' that precede 'end dates'
3. number of records that are >30 days after a patient's death date
4. source table success rates
5. concept table success rates
ASSUMPTIONS
-----------
1. The user has all of the files s/he wants to analyze in the current
directory
2. The user will know to change the 'report' variables to match the
file names of the .xlsx files in the current working directory.
3. All sheets are saved as month_date_year.xlsx
- year should be four digits
- this name is used to determine the date
4. The sheet names for all of the generated reports are consistent
5. The 'aggregate_info' statistics generated in some reports are
always labeled as 'aggregate_info.' This ensures these rows can
be excluded when generating initial dataframes. These aggregate
statistics can then be generated more effectively down the line
with an appropriate 'weighting'.
6. Assumes that all of the columns from the 'source' tab of the
analytics reports will always have the same names.
FIXME:
-------------------------------------------------------
1. tons of naming inconsistencies; does not allow effective
parsing through files
"""
import datetime
import math
import os
import sys
import pandas as pd
def get_user_analysis_choice():
"""
Function gets the user input to determine what kind of data
quality metrics s/he wants to investigate.
:return:
analytics_type (str): the data quality metric the user wants to
investigate
percent_bool (bool): determines whether the data will be seen
as 'percentage complete' or individual instances of a
particular error
target_low (bool): determines whether the number displayed should
be considered a desirable or undesirable characteristic
"""
analysis_type_prompt = "\nWhat kind of analysis over time report " \
"would you like to generate for each site?\n\n" \
"A. Duplicates\n" \
"B. Amount of data following death dates\n" \
"C. Amount of data with end dates preceding start dates\n" \
"D. Success Rate for Source Tables\n" \
"E. Success Rate for Concept Tables\n\n" \
"Please specify your choice by typing the corresponding letter."
user_command = input(analysis_type_prompt).lower()
choice_dict = {
'a': 'duplicates',
'b': 'data_after_death',
'c': 'end_before_begin',
'd': 'source',
'e': 'concept'}
while user_command not in choice_dict.keys():
print("\nInvalid choice. Please specify a letter that corresponds "
"to an appropriate analysis report.\n")
user_command = input(analysis_type_prompt).lower()
# NOTE: This dictionary needs to be expanded in the future
percentage_dict = {
'duplicates': False,
'data_after_death': True,
'end_before_begin': True,
'source': True,
'concept': True}
# dictionary indicates if the target is to minimize or maximize number
target_low = {
'duplicates': True,
'data_after_death': True,
'end_before_begin': True,
'source': False, # table success rates
'concept': False}
analytics_type = choice_dict[user_command]
percent_bool = percentage_dict[analytics_type]
target_low = target_low[analytics_type]
return analytics_type, percent_bool, target_low
def load_files(user_choice, file_names):
"""
Function loads the relevant sheets from all of the
files in the directory (see 'file_names' list from above).
'Relevant sheet' is defined by previous user input.
:parameter
user_choice (string): represents the sheet from the analysis reports
whose metrics will be compared over time
file_names (list): list of the user-specified Excel files that are
in the current directory. Files are analytics reports to be
scanned.
:returns
sheets (list): list of pandas dataframes. each dataframe contains
info about data quality for all of the sites for a date.
"""
num_files_indexed = 0
sheets = []
while num_files_indexed < len(file_names):
try:
file_name = file_names[num_files_indexed]
sheet = pd.read_excel(file_name, sheet_name=user_choice)
if sheet.empty:
print("WARNING: No {} sheet found in dataframe {}".format(
user_choice, file_name))
del file_names[num_files_indexed]
num_files_indexed -= 1 # skip over the date
else:
sheets.append(sheet)
num_files_indexed += 1
except FileNotFoundError:
print("{} not found in the current directory: {}. Please "
"ensure that the file names are consistent between "
"the Python script and the file name in your current "
"directory. ".format(file_names[num_files_indexed], cwd))
sys.exit(0)
return sheets
def get_comprehensive_tables(dataframes, analytics_type):
"""
Function is used to ensure that all of the HPO sites will
have all of the same table types. This is important
if a table type is introduced in future iterations of
the analysis script.
:param
dataframes (lst): list of pandas dataframes that are
representations of the Excel analytics files
analytics_type (str): the data quality metric the user wants to
investigate
:return:
final_tables (lst): list of the tables that should be represented
for each HPO at each date. these are extracted from the
column labels of the Excel analytics files.
"""
# FIXME: this is a hacky way to bypass columns we are less
# interested in. Could be improved. Still want to prevent
# hard-coding in tables that 'should' be there.
# FIXME: We need to think about how to ensure we might only
# be interested in getting tables that have the total row
# count displayed on the source or concept sheets
undocumented_cols = ['Unnamed: 0', 'src_hpo_id', 'HPO',
'total', 'device_exposure']
rate_focused_inputs = ['source', 'concept']
# FIXME: Need consistent way to document the rates between
# different error reports; right now a 'hacky' fix because
# of report naming inconsistencies
for _, sheet in enumerate(dataframes): # for each date
data_info = sheet.iloc[1, :] # just to get the columns
column_names = data_info.keys()
if analytics_type in rate_focused_inputs:
for col_label, _ in data_info.iteritems():
if col_label[-5:] != '_rate' and \
col_label[-7:] != '_rate_y':
undocumented_cols.append(col_label)
final_tables = [x for x in column_names if x not in
undocumented_cols]
# eliminate duplicates
final_tables = list(dict.fromkeys(final_tables))
return final_tables
def get_info(sheet, row_num, percentage, sheet_name,
mandatory_tables):
"""
Function is used to create a dictionary that contains
the number of flawed records for a particular site.
:param
sheet (dataframe): pandas dataframe to traverse. Represents a
sheet with numbers indicating data quality.
row_num (int): row (0-index) with all of the information for
the specified site's data quality
percentage (boolean): used to determine whether or not the
number is a simple record count (e.g. duplicates)
versus the percentage of records (e.g. the success rate
for each of the tables)
sheet_name (str): name for the sheet for use in the error
message
analytics_type (str): the data quality metric the user wants to
investigate
mandatory_tables (lst): contains the tables that should be
documented for every table and at every date.
:return:
err_dictionary (dictionary): key:value pairs represent the
column name:number that represents the quality of the data
NOTE: This function was modified from the e-mail generator. This
function, however, logs ALL of the information in the returned
error dictionary. This includes 0 values if the data is wholly
complete.
"""
if row_num is not None:
data_info = sheet.iloc[row_num, :] # series, row labels and values
else: # row in future sheets but not current sheet
data_info = sheet.iloc[1, :] # just to get the columns
column_names = data_info.keys()
null_list = [None] * len(column_names)
data_info = pd.Series(null_list, column_names)
err_dictionary = {}
for col_label, number in data_info.iteritems():
if col_label in mandatory_tables:
if number is None or number == 'No Data': # row does not exist
err_dictionary[col_label] = float('NaN')
else:
try:
number = float(number)
except ValueError:
pass
else:
if number < 0: # just in case
raise ValueError("Negative number detected in sheet "
"{} for column {}".format(
sheet_name, col_label))
elif percentage and number > 100:
raise ValueError("Percentage value > 100 detected in "
"sheet {} for column {}".format(
sheet_name, col_label))
elif percentage and target_low: # proportion w/ errors
err_dictionary[col_label] = round(100 - number, 1)
elif percentage and not target_low: # effective
err_dictionary[col_label] = round(number, 1)
elif not percentage and number > -1:
err_dictionary[col_label] = int(number)
else:
pass # do nothing; do not want to document the column
# adding all the tables; maintaining consistency for versatility
for table in mandatory_tables:
if table not in err_dictionary.keys():
err_dictionary[table] = float('NaN')
return err_dictionary
def find_hpo_row(sheet, hpo, sheet_name, selective_rows,
analytics_type):
"""
Finds the row index of a particular HPO site within
a larger sheet.
:param
sheet (dataframe): dataframe with all of the data quality
metrics for the sites.
hpo (string): represents the HPO site whose row in
the particular sheet needs to be determined
sheet_name (string): name of the file from which the
particular sheet of user_command type was extracted
selective_rows (list): list of rows (potentially HPO sites)
that are in some (but not all) of the sheets used in the
analysis report
analytics_type (str): the data quality metric the user wants to
investigate
:return:
row_num (int): row number where the HPO site of question
lies within the sheet. returns none if the row is not
in the sheet in question but exists in other sheets
"""
hpo_column_name = 'src_hpo_id'
sheet_hpo_col = sheet[hpo_column_name]
row_num = 9999
for idx, site_id in enumerate(sheet_hpo_col):
row_not_in_sheet = ((hpo in selective_rows) and
(hpo not in sheet_hpo_col))
if hpo == site_id:
row_num = idx
elif row_not_in_sheet:
return None
if row_num == 9999: # just in case
raise NameError("{} not found in the {} sheet "
"from {}".format(
hpo, analytics_type, sheet_name))
return row_num
def iterate_sheets(dataframes, hpo_id_list, selective_rows,
percent, analytics_type, file_names):
"""
Function iterates through all of the sheets and ultimately
generates a series of dictionaries that contain all of the
data quality information for all of the
a. dates (for the specified sheet)
b. sites
c. table types
:param
dataframes (list): list of the Pandas dataframes that
contain data quality info for each of the sites
hpo_id_list (list): HPO site IDs to iterate through on
each sheet. organized alphabetically
selective_rows (list): list of rows (potentially HPO
sites) that are in some (but not all) of the sheets
used in the analysis report
percent (boolean): used to determine whether or not the
number is a simple record count (e.g. duplicates)
versus the percentage of records (e.g. the success rate
for each of the tables)
analytics_type (string): the user's choice for the
data metric he/she wants to measure
target_low (bool): determines whether the number displayed
should be considered a positive or negative metric
file_names (list): list of the user-specified Excel files that are
in the current directory. Files are analytics reports to be
scanned.
:return:
dates_and_info: dictionary with three sub-dictionaries.
the key value pairs are as follows (highest-to-lowest
level)
a. dates_and_info
key is a date
value is the following dictionary
b. hpo_errors_for_date
key is an HPO site
value is the following dictionary
c. err_dict_for_hpo
key is the table type
value is a number representing the data quality
for that table type
mandatory_tables (lst): contains the tables that should be
documented for every table and at every date.
"""
dates_and_info = {} # key:value will be date:dict
mandatory_tables = get_comprehensive_tables(
dataframes, analytics_type)
for number, sheet in enumerate(dataframes): # for each date
num_chars_to_chop = 5 # get rid of .xlsx
sheet_name = file_names[number]
sheet_name = sheet_name[:-num_chars_to_chop]
errors_for_date = {} # key:value will be hpo:dict
for hpo in hpo_id_list: # for each HPO
hpo_row_idx = find_hpo_row(
sheet, hpo, sheet_name,
selective_rows, analytics_type)
if hpo == 'aggregate counts': # will be added later
pass
else:
err_dict_for_hpo = get_info(
sheet, hpo_row_idx, percent,
sheet_name, mandatory_tables)
errors_for_date[hpo] = err_dict_for_hpo
# error information for all of the dates
dates_and_info[sheet_name] = errors_for_date
return dates_and_info, mandatory_tables
def generate_hpo_id_col(dataframes):
"""
Function is used to distinguish between row labels that
are in all of the data analysis outputs versus row labels
that are only in some of the data analysis outputs.
:param
dataframes (list): list of dataframes that were loaded
from the analytics files in the path
:return:
hpo_id_col (list): list of the strings that should go
into an HPO ID column. for use in generating subsequent
dataframes.
selective_rows (list): list of the strings that are in
the HPO ID columns for some but not all of the sheets.
useful down the line when detecting an HPO's row.
"""
hpo_col_name = 'src_hpo_id'
selective_rows, total_hpo_id_columns = [], []
for _, df in enumerate(dataframes):
hpo_id_col_sheet = df[hpo_col_name].values
total_hpo_id_columns.append(hpo_id_col_sheet)
# find the intersection of all the lists
in_all_sheets = set(dataframes[0][hpo_col_name].values)
for df in dataframes[1:]:
hpo_id_col_sheet = set(df[hpo_col_name].values)
in_all_sheets.intersection_update(hpo_id_col_sheet)
# eliminate blank rows
in_all_sheets = list(in_all_sheets)
in_all_sheets = [row for row in in_all_sheets if isinstance(row, str)]
# determining rows in some (but not all) of the dataframes
for _, df in enumerate(dataframes):
hpo_id_col_sheet = df[hpo_col_name].values
selective = set(in_all_sheets) ^ set(hpo_id_col_sheet)
for row in selective:
if (row not in selective_rows) and isinstance(row, str):
selective_rows.append(row)
# standardize; all sheets now have the same rows
hpo_id_col = in_all_sheets + selective_rows
bad_rows = [' Avarage', 'aggregate counts'] # do not include
hpo_id_col = [x for x in hpo_id_col if x not in bad_rows]
hpo_id_col = sorted(hpo_id_col)
return hpo_id_col, selective_rows
def sort_names_and_tables(site_and_date_info, mandatory_tables):
"""
Function is used to sort the information (dates, tables,
and HPO site names) in an intuitive manner to ensure
consistency across sheets and dataframes.
:param
site_and_date_info (dict): dictionary with key:value
of date:additional dictionaries that contain metrics
for each HPO's data quality by type
mandatory_tables (lst): contains the tables that should be
documented for every table and at every date.
:return:
ordered_dates_str (list): list of all the dates (from
most oldest to most recent) in string form
sorted_names (list): names of all the HPO sites in
alphabetical order (with the addition of 'aggregate
info')
sorted_tables (lits): names of all the table types
in alphabetical order
"""
ordered_dates_dt = []
# NOTE: requires files to have full month name and 4-digit year
for date_str in site_and_date_info.keys():
date = datetime.datetime.strptime(date_str, '%B_%d_%Y')
ordered_dates_dt.append(date)
ordered_dates_dt = sorted(ordered_dates_dt)
# converting back to standard form to index into file
ordered_dates_str = [x.strftime('%B_%d_%Y').lower() for x
in ordered_dates_dt]
# earlier code ensured all sheets have same rows - can just take first
all_rows = site_and_date_info[ordered_dates_str[0]]
sorted_names = sorted(all_rows.keys())
sorted_names.append('aggregate_info')
mandatory_tables.sort()
return ordered_dates_str, sorted_names, mandatory_tables
def add_aggregate_info(site_and_date_info, percentage, sorted_names):
"""
Function is used to add an 'aggregate metric' that
summarizes all of the data quality issues for a
particular site on a particular date.
NOTE: This function DOES NOT take the weighted value
of all of these metrics. This is merely to attach
the aggregate statistic.
NOTE: This is for the DICTIONARY with the date as the
first set of keys.
:param
site_and_date_info (dict): dictionary with key:value
of date:additional dictionaries that contain metrics
for each HPO's data quality by type
percentage (boolean): used to determine whether or not the
number is a simple record count (e.g. duplicates)
versus the percentage of records (e.g. the success rate
for each of the tables)
sorted_names (lst): list of the names that should have an
aggregate statistic analyzed (e.g. avoiding 'avarage'
statistics)
:return:
site_and_date_info (dict): same as input parameter but
now each site and date has an added aggregate
statistic.
"""
for date in site_and_date_info.keys():
date_report = site_and_date_info[date]
date_metric = 0
for site in sorted_names:
table_metrics = date_report[site]
date_metric, num_iterated = 0, 0
for table in table_metrics.keys():
stat = table_metrics[table]
if not math.isnan(stat):
date_metric += stat
num_iterated += 1
if percentage and num_iterated > 0: # not really used
date_metric = date_metric / num_iterated
elif percentage and num_iterated == 0:
date_metric = float('NaN')
date_report['aggregate_info'] = date_metric
return site_and_date_info
def generate_weighted_average_table_sheet(
file_names, date, table, new_col_info):
"""
Function is used to generate a weighted average to indicate the
'completeness' of a particular table type across all sites.
This function is employed when the
a. HPO sites are the ROWS
b. The table types are the SHEETS
This is to make the final value of the column (a weighted average).
:param
file_names (list): list of the user-specified Excel files that are
in the current directory. Files are analytics reports to be
scanned.
date (str): date for the column that is being investigated
table (str): table whose weighted average for a particular date is
being determined
new_col_info (list): shows the proportion of 'poor' records per HPO
site
:return:
total_quality (float): indicates the overall proportion of well
defined rows with respect to the total number of rows
Function returns None object when it cannot calculate a weighted
average
"""
hpo_total_rows_by_date, valid_cols_tot = generate_hpo_contribution(
file_names, 'total')
first_underscore = True
underscore_idx = 0
for idx, char in enumerate(table):
if first_underscore and char == '_':
underscore_idx = idx
first_underscore = False
# NOTE: the analysis script needs to output to source with a consistent
# naming convention with respect to the table types
if not first_underscore: # no underscore in the table name
table = table[0:underscore_idx]
table_tot = table + "_total_row"
if table_tot in valid_cols_tot:
site_totals = hpo_total_rows_by_date[date][table_tot]
total_table_rows_across_all_sites = 0
total_poor_rows_across_all_sites = 0
# can only count actual values
for site_rows, site_err_rate in zip(site_totals, new_col_info):
if not math.isnan(site_rows) and not math.isnan(site_err_rate):
total_table_rows_across_all_sites += site_rows
site_err_rate = site_err_rate / 100 # logged as a percent
site_poor_rows = site_err_rate * site_rows
total_poor_rows_across_all_sites += site_poor_rows
if total_table_rows_across_all_sites > 0:
total_quality = 100 * round(total_poor_rows_across_all_sites /
total_table_rows_across_all_sites, 3)
else: # table only started to appear in later sheets
return float('NaN')
return total_quality
else: # no row count for table; cannot generate weighted average
return None
def generate_table_dfs(sorted_names, sorted_tables,
ordered_dates_str, site_and_date_info,
percentage, file_names):
"""
Function generates a unique dataframe containing data quality
metrics. Each dataframe should match to the metrics for a
particular table type.
The ROWS of each dataframe should be each HPO site.
NOTE: This function INTENTIONALLY excludes aggregate
information generation. This is so it can be generated
more efficiently down the line.
:param
sorted_names (lst): list of the hpo site names sorted
alphabetically
sorted_tables (lst): list of the different table types
sorted alphabetically
ordered_dates_str (lst): list of the different dates for
the data analysis outputs. goes from oldest to most
recent
site_and_date_info (dict): dictionary with key:value
of date:additional dictionaries that contain metrics
for each HPO's data quality by type
percentage (boolean): used to determine whether or not the
number is a simple record count (e.g. duplicates)
versus the percentage of records (e.g. the success rate
for each of the tables)
file_names (list): list of the user-specified Excel files that are
in the current directory. Files are analytics reports to be
scanned.
:return:
df_dictionary_by_table (dict): dictionary with key:value
of table type: pandas df with the format described above
"""
# create a new pandas df for each of the table types
total_dfs = []
num_tables = len(sorted_tables)
for _ in range(num_tables):
new_df = pd.DataFrame({'hpo_ids': sorted_names})
total_dfs.append(new_df)
# df for each table
for new_sheet_num, table in enumerate(sorted_tables):
df_in_question = total_dfs[new_sheet_num]
for date in ordered_dates_str: # generate the columns
new_col_info = []
hpo_site_info = site_and_date_info[date]
for site in sorted_names: # add the rows for the column
if site != 'aggregate_info':
hpo_table_info = hpo_site_info[site][table]
if not math.isnan(hpo_table_info):
new_col_info.append(hpo_table_info)
else:
new_col_info.append(float('NaN'))
if not percentage:
total = 0
for site_val in new_col_info:
if not math.isnan(site_val):
total += site_val
new_col_info.append(total) # adding aggregate
else:
# FIXME: There is no way to actually create a weighted
# average with some of the tables whose row counts
# do not exist.
weighted_avg = generate_weighted_average_table_sheet(
file_names, date, table, new_col_info)
if weighted_avg is not None: # successful calculation
new_col_info.append(weighted_avg)
else:
new_col_info.append("N/A")
df_in_question[date] = new_col_info
df_dictionary_by_table = dict(zip(sorted_tables, total_dfs))
return df_dictionary_by_table
def load_total_row_sheet(file_names, sheet_name):
"""
Function loads the sheets that contain information regarding the
total number of rows for each site for each table type. This loads
the corresponding sheets for all of the analytics reports in the
current directory.
:param
file_names (list): list of the user-specified Excel files that are
in the current directory. Files are analytics reports to be
scanned.
sheet_name (str): label for the sheet with the information
containing the number of rows
:return:
dataframes (list): list of Pandas dataframes that contain the
information regarding the total number of rows
hpo_id_col (list): list of the strings that should go
into an HPO ID column. for use in generating subsequent
dataframes.
selective_rows (list): list of the strings that are in
the HPO ID columns for some but not all of the sheets
"""
num_files = len(file_names)
dataframes = []
for file_num in range(num_files):
sheet = pd.read_excel(file_names[file_num], sheet_name)
dataframes.append(sheet)
hpo_id_col, selective_rows = generate_hpo_id_col(dataframes)
return dataframes, hpo_id_col, selective_rows
def get_valid_columns(dataframes, contribution_type, row_sheet_name):
"""
Function is used to determine the columns to be investigated in
across all of the site report dataframes. The columns should all
be the same in the 'source' sheet (otherwise an error is thrown).
The columns of interest are those that can help elucidate an
HPO site's relative contribution (total rows, well defined rows,
etc.)
:param
dataframes (lst): list of pandas dataframes loaded from the Excel
files generated from the analysis reports
contribution_type (str): string representing the types of columns to
look at for the dataframe. either can represent the 'total' row
metrics or the 'error' metrics for a particular column.
row_sheet_name (str): sheet name within the analytics files that
show the total number of rows and the number of well defined
rows
:return:
valid_cols (lst): list of the columns that are consistent across all
of the sheets and relevant to the HPO weighting report needed
"""
valid_cols = []
# find the columns you want to investigate
for df in dataframes:
for column in df:
if contribution_type == 'total' and len(column) > 9 and \
column[-9:] == 'total_row':
valid_cols.append(column)
elif contribution_type == 'valid' and len(column) > 16 and \
column[-16:] == 'well_defined_row':
valid_cols.append(column)
valid_cols = list(dict.fromkeys(valid_cols))
valid_cols.sort()
return valid_cols
def generate_hpo_contribution(file_names, contribution_type):
"""
Function is used to determine the 'contribution' for
each of the HPO sites. This is useful in determining
the 'weight' each site should be given when generating
aggregate statistics with respect to data quality.
:param
file_names (list): list of the user-specified Excel files that are
in the current directory. Files are analytics reports to be
scanned.
contribution_type (str): string representing the types of columns to
look at for the dataframe. either can represent the 'total' row
metrics or the 'error' metrics for a particular column.
:return:
hpo_contributions_by_date (dictionary): dictionary with the following
key:value pairings
a. date:following dictionary
b. table type: list with the relative contribution of each HPO
with respect to the number of rows it contributes for the
particular table type. the sequence of the list follows
the alphabetical order of all the HPO sites.
valid_cols (lst): list of the table types to be iterated over
"""
hpo_contributions_by_date = {} # key:value will be date:dict
row_sheet_name = 'concept'
dataframes, hpo_id_col, selective_rows = load_total_row_sheet(
file_names, row_sheet_name)
valid_cols = get_valid_columns(dataframes, contribution_type,
row_sheet_name)
for number, sheet in enumerate(dataframes): # for each date
num_chars_to_chop = 5 # get rid of .xlsx
date = file_names[number][:-num_chars_to_chop]
total_per_sheet = {}
for _, table_type in enumerate(valid_cols): # for each table
rows_for_table = []
for hpo in hpo_id_col: # follows alphabetical order
hpo_row_idx = find_hpo_row(sheet, hpo, date, selective_rows,
row_sheet_name)
if hpo == 'aggregate counts': # will be added later
pass
elif table_type not in sheet:
pass
elif hpo_row_idx is None:
rows_for_table.append(float('NaN'))
else:
rows_for_hpo = sheet[table_type][hpo_row_idx]
try: # later sheets put in the number of rows as a str
rows_for_hpo = float(rows_for_hpo)
except ValueError:
pass
if isinstance(rows_for_hpo, (int, float)) and \
not math.isnan(rows_for_hpo):
rows_for_table.append(rows_for_hpo)
else:
rows_for_table.append(float('NaN'))
# error information for the table type
total_per_sheet[table_type] = rows_for_table
# error information for all of the dates
hpo_contributions_by_date[date] = total_per_sheet
return hpo_contributions_by_date, valid_cols
# This part of the script deals with making sheets where the
# a. SITES are SHEETS
# b. TABLES are ROWS
def generate_site_dfs(sorted_names, sorted_tables,
ordered_dates_str, site_and_date_info,
percentage, file_names, analytics_type):
"""
Function generates a unique dataframe for each site
containing data quality metrics. The dictionary has
the following structure:
a. HPO name:data quality metrics
data quality metrics are stored in a pandas df
The rows of each dataframe should be each table.
The columns of each dataframe are the dates
:param
sorted_names (lst): list of the hpo site names sorted
alphabetically
sorted_tables (lst): list of the different table types
sorted alphabetically
ordered_dates_str (lst): list of the different dates for
the data analysis outputs. goes from oldest to most
recent.
site_and_date_info (dict): dictionary with key:value
of date:additional dictionaries that contain metrics
for each HPO's data quality by type
percentage (boolean): used to determine whether or not the
number is a 'flawed' record count (e.g. duplicates)
versus the percentage of 'acceptable' records
file_names (list): list of the user-specified Excel files that are
in the current directory. Files are analytics reports to be
scanned.
analytics_type (str): the data quality metric the user wants to
investigate
:return:
df_dictionary_by_site (dict): dictionary with key:value
of table type: pandas df with the format described above
"""
total_dfs = []
sorted_tables.append('total')
# ignoring 'total' and 'aggregate info'
tables_only, hpo_names_only = sorted_tables[:-1], sorted_names[:-1]
# df generation for each HPO and 'total'
for _ in range(len(hpo_names_only)):
new_df = pd.DataFrame({'table_type': sorted_tables})
total_dfs.append(new_df)
for new_sheet_num, site in enumerate(hpo_names_only):
df_in_question = total_dfs[new_sheet_num]
for date in ordered_dates_str:
tot_errs_date = 0
new_col_info = []
site_info = site_and_date_info[date][site]
for table in tables_only: # populating the row
new_col_info.append(site_info[table])
tot_errs_date += site_info[table]
# creating the 'aggregate' statistic
if not percentage:
new_col_info.append(tot_errs_date)
else:
weighted_errs = determine_overall_percent_of_an_hpo(
site_and_date_info, file_names, sorted_names,
sorted_tables, date, site)
new_col_info.append(weighted_errs)
df_in_question[date] = new_col_info
# make the aggregate sheet
aggregate_dfs = generate_aggregate_sheets(
file_names, tables_only, site_and_date_info,
ordered_dates_str, percentage, analytics_type,
sorted_names)
total_dfs.extend(aggregate_dfs)
if len(aggregate_dfs) == 3:
sorted_names.extend(['poorly_defined_rows_total',
'total_rows'])
df_dictionary_by_site = dict(zip(sorted_names, total_dfs))
return df_dictionary_by_site
def generate_nonpercent_aggregate_col(sorted_tables, site_and_date_info,
date):
"""
Function is used to generate a column that shows the SUM
of the data quality metrics for a particular table across all
sites. This column is for one date of analytics.
This function is employed when the TABLES are the ROWS in the
outputted Excel sheet.
This ONLY adds aggregate info for values that should NOW be
weighted.
:param
sorted_tables (lst): list of the different table types
sorted alphabetically
site_and_date_info (dict): dictionary with key:value
of date:additional dictionaries that contain metrics
for each HPO's data quality by type
date (string): string for the date used to generate the column.
should be used to index into the larger data dictionary.
:return:
aggregate_metrics_col (list): list of values representing the
total number of aggregate problems for each table. should follow
the order of "sorted tables". should also represent the column
for only one date.
"""
aggregate_metrics_col = []
info_for_date = site_and_date_info[date]
for table in sorted_tables[:-1]:
total, sites_and_dates_iterated = 0, 0
for site in list(info_for_date.keys())[:-1]:
site_table_info = info_for_date[site][table]
if not math.isnan(site_table_info):
total += site_table_info
sites_and_dates_iterated += 1
aggregate_metrics_col.append(total)
total_across_all_tables = sum(aggregate_metrics_col)
aggregate_metrics_col.append(total_across_all_tables)
return aggregate_metrics_col
def generate_aggregate_data_completeness_sheet(
file_names, ordered_dates_str):
"""
Function is used to generate two 'aggregate data' sheets that
show the data quality for various tables across the various
dates. The two sheets generated show:
a. the total number of 'poorly defined' rows for each of the
table types
b. the relative proportion by which each table contributes to
the total number of 'poorly defined' rows. for instance, if
half of the poorly defined rows are from the condition_occurrence
table, condition_occurrence will have a value of 0.5
:param
file_names (list): list of the user-specified Excel files that are
in the current directory. Files are analytics reports to be
scanned.
ordered_dates_str (lst): list of the different dates for
the data analysis outputs. goes from oldest to most
recent.
:return:
total_dfs (lst): list with the two pandas dataframes described
above.
"""
# need to generate aggregate df before any dates
hpo_total_rows_by_date, valid_cols_tot = generate_hpo_contribution(
file_names, 'total')
hpo_errors_by_date, valid_cols_val = generate_hpo_contribution(
file_names, 'valid')
valid_cols_tot.append('total')
aggregate_df_proportion = pd.DataFrame({'table_type': valid_cols_tot})
aggregate_df_error = pd.DataFrame({'table_type': valid_cols_tot})
aggregate_df_total = | pd.DataFrame({'table_type': valid_cols_tot}) | pandas.DataFrame |
import argparse
import json
import logging
import sys
import fiona
import geopandas as gpd
import numpy as np
import pandas as pd
import torch
from eolearn.core.utils.fs import get_aws_credentials, join_path
from sentinelhub import SHConfig
from hiector.utils.aws_utils import LocalFile
from hiector.utils.training_data import filter_dataframe, train_test_val_split
stdout_handler = logging.StreamHandler(sys.stdout)
handlers = [stdout_handler]
logging.basicConfig(
level=logging.INFO, format="[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s", handlers=handlers
)
LOGGER = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description="Execute training and evaluation using the SSRDD model.\n")
parser.add_argument("--config", type=str, help="Path to config file with execution parameters", required=True)
args = parser.parse_args()
def prepare_data(config):
data_dir = config["data_dir"]
sh_config = SHConfig()
if config["aws_profile"]:
sh_config = get_aws_credentials(aws_profile=config["aws_profile"], config=sh_config)
input_gpkg_path = join_path(data_dir, config["input_dataframe_filename"])
dfs = []
with LocalFile(input_gpkg_path, mode="r", config=sh_config) as local_file:
for layername in fiona.listlayers(local_file.path):
scale = int(layername.split("_")[1])
# Assumes layers to be named as PATCHLETS_<SCALE>_<CRS_CODE>
if scale in config["scale_sizes"]:
LOGGER.info(f"Reading layer: {layername}")
df = gpd.read_file(local_file.path, layer=layername)
df["CRS"] = str(df.crs)
df["LAYER_NAME"] = layername
df["SCALE"] = scale
# Convert to WGS84, because we want stuff from different CRSes to be stored together
df = df.to_crs("epsg:4326")
dfs.append(df)
LOGGER.info("Concatenating layers together.")
dataframe = | pd.concat(dfs) | pandas.concat |
import numpy as np
from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import preprocessing, svm
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
import statsmodels.api as sm
import matplotlib.dates as mdates
import warnings
import itertools
import dateutil
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV as gsc
from sklearn.linear_model import Ridge,Lasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
def main ():
# Using svm
data=pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
S1,S2=AQI_SVM(data)
S3,S4=AQI_Feature_importance_SVM(data)
S5,S6=AQI_Domain_Knowledge_SVM(data)
S7,S8=AQI_without_Domain_Knowledge_SVM(data)
##Linear Regression
data=pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
LR1,LR2=AQI(data)
LR3,LR4=AQI_Feature_importance(data)
LR5,LR6==AQI_Domain_Knowledge(data)
LR7,LR8=AQI_without_Domain_Knowledge(data)
## Predincting for next day
data=pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
normalize(data)
y=pd.read_csv('AQI_prediction_add.csv')
LR_F1,LR_F2=AQI_Future(data,y.AQI_predicted)
LR_F3,LR_F4=AQI_Feature_importance_Future(data,y.AQI_predicted)
LR_F5,LR_F6=AQI_Domain_Knowledge_Future(data,y.AQI_predicted)
LR_F7,LR_F8=AQI_without_Domain_Knowledge_Future(data,y.AQI_predicted)
##Predicting for Autumn Season
data=pd.read_csv('autumn_data.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
data=pd.get_dummies(data, columns=[' _conds'], prefix = [' _conds'])
data=pd.get_dummies(data, columns=[' _wdire'], prefix = [' _wdire'])
data=pd.get_dummies(data, columns=['Type'], prefix = ['Type'])
LR_A1,LR_A2=AQI(data)
LR_A3,LR_A4=AQI_Feature_importance(data)
LR_A5,LR_A6=AQI_Domain_Knowledge(data)
LR_A7,LR_A8=AQI_without_Domain_Knowledge(data)
##Predicting for Summer Season
data=pd.read_csv('summer_data.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
data=pd.get_dummies(data, columns=[' _conds'], prefix = [' _conds'])
data=pd.get_dummies(data, columns=[' _wdire'], prefix = [' _wdire'])
data=pd.get_dummies(data, columns=['Type'], prefix = ['Type'])
LR_S1,LR_S2=AQI(data)
LR_S3,LR_S4=AQI_Feature_importance(data)
LR_S5,LR_S6=AQI_Domain_Knowledge(data)
LR_S7,LR_S8=AQI_without_Domain_Knowledge(data)
##Predicting for Winter Season
data=pd.read_csv('winter_data.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
data=pd.get_dummies(data, columns=[' _conds'], prefix = [' _conds'])
data=pd.get_dummies(data, columns=[' _wdire'], prefix = [' _wdire'])
data=pd.get_dummies(data, columns=['Type'], prefix = ['Type'])
LR_W1,LR_W2=AQI(data)
LR_W3,LR_W4=AQI_Feature_importance(data)
LR_W5,LR_W6=AQI_Domain_Knowledge(data)
LR_W7,LR_W8=AQI_without_Domain_Knowledge(data)
##Using Ridge
data = pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
h = BestParams(data)
## Using all features
R1,R2=AQI_Ridge(data,h)
R3,R4=AQI_Feature_importance_Ridge(data,h)
R5,R6=AQI_Domain_Knowledge_Ridge(data,h)
R7,R8=AQI_without_Domain_Knowledge_Ridge(data,h)
##Future
data = pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
h = BestParams(data)
y = pd.read_csv('AQI_prediction_add.csv')
R_F1,R_F2=AQI_Future_Ridge(data, y.AQI_predicted,h)
R_F3,R_F4=AQI_Feature_importance_Future_Ridge(data, y.AQI_predicted,h)
R_F5,R_F6=AQI_Domain_Knowledge_Future_Ridge(data, y.AQI_predicted,h)
R_F7,R_F8=AQI_without_Domain_Knowledge_Future_Ridge(data, y.AQI_predicted,h)
##using Lasso
data= | pd.read_csv('Original_with_dummies.csv') | pandas.read_csv |
import os
import pandas as pd
import seaborn as sns
import matplotlib.dates as d
import matplotlib.pyplot as plt
from ..utils import everion_keys
from ..utils.plotter_helper import PlotterHelper
from ..utils.data_aggregator import DataAggregator
from ..patient.patient_data_loader import PatientDataLoader
sns.set()
class Plotter:
loader = PatientDataLoader()
aggregator = DataAggregator()
def plot_patient(self, in_dir, out_dir, in_file_name):
patient_id = in_file_name[:3]
out_dir_subset = os.path.join(out_dir, 'subset')
out_dir_quality = os.path.join(out_dir, 'quality')
out_dir_all = os.path.join(out_dir, 'all')
out_dir_qv = os.path.join(out_dir, 'quality_values')
if not os.path.exists(out_dir):
os.mkdir(out_dir)
if not os.path.exists(out_dir_all):
os.mkdir(out_dir_all)
if not os.path.exists(out_dir_quality):
os.mkdir(out_dir_quality)
if not os.path.exists(out_dir_subset):
os.mkdir(out_dir_subset)
if not os.path.exists(out_dir_qv):
os.mkdir(out_dir_qv)
df = self.loader.load_everion_patient_data(in_dir, in_file_name, ';')
if df.empty:
return
df['barometer_pressure'] = df['barometer_pressure'] / 100
self.generate_subset_plots(df, out_dir_subset, patient_id)
self.generate_all_plots(df, out_dir_all, patient_id)
self.generate_quality_plots(df, out_dir_quality, patient_id)
self.generate_quality_value_plots(df, out_dir_qv, patient_id)
def plot_patient_mixed_vital_raw(self, in_dir, out_dir, in_file_name, keys, start_idx, end_idx):
patient_id = in_file_name[start_idx:end_idx]
if not os.path.exists(out_dir):
os.mkdir(out_dir)
df = self.loader.load_everion_patient_data(in_dir, in_file_name, ';')
if df.empty:
return
df['hour'] = df['timestamp'].dt.hour
df['day'] = df['timestamp'].dt.day
df['month'] = df['timestamp'].dt.month
self.plot_keys(df, patient_id, os.path.join(out_dir, patient_id + '.png'), keys)
def generate_subset_plots(self, df, out_dir_subset, patient_id):
keys = {'heart_rate', 'heart_rate_variability', 'oxygen_saturation', 'core_temperature', 'respiration_rate'}
self.plot_keys(df, patient_id, os.path.join(out_dir_subset, patient_id + '_subset' + '.png'), keys)
def generate_all_plots(self, df, out_dir_all, patient_id):
self.plot_keys(df, patient_id, os.path.join(out_dir_all, patient_id + '_all' + '.png'), everion_keys.ALL_VITAL)
def generate_quality_plots(self, df, out_dir_quality, patient_id):
keys = {'core_temperature_quality', 'respiration_rate_quality', 'heart_rate_variability_quality',
'energy_quality', 'activity_classification_quality', 'oxygen_saturation_quality', 'heart_rate_quality'}
self.plot_keys(df, patient_id, os.path.join(out_dir_quality, patient_id + '_quality' + '.png'), keys)
def generate_quality_value_plots(self, df, out_dir_qv, patient_id):
keys = {'core_temperature', 'core_temperature_quality'}
self.plot_keys(df, patient_id, os.path.join(out_dir_qv, patient_id + '_t_qv' + '.png'), keys)
keys = {'respiration_rate', 'respiration_rate_quality'}
self.plot_keys(df, patient_id, os.path.join(out_dir_qv, patient_id + '_rr_qv' + '.png'), keys)
keys = {'heart_rate_variability', 'heart_rate_variability_quality'}
self.plot_keys(df, patient_id, os.path.join(out_dir_qv, patient_id + '_hrv_qv' + '.png'), keys)
keys = {'heart_rate', 'heart_rate_quality'}
self.plot_keys(df, patient_id, os.path.join(out_dir_qv, patient_id + '_hr_qv' + '.png'), keys)
keys = {'oxygen_saturation', 'oxygen_saturation_quality'}
self.plot_keys(df, patient_id, os.path.join(out_dir_qv, patient_id + '_spo2_qv' + '.png'), keys)
def plot_keys(self, df, patient_id, out_filename, keys):
fig, ax = plt.subplots(figsize=[20, 6])
mdates = d.date2num(df['timestamp'])
for key in keys:
plt.plot_date(mdates, df[key], tz=None, xdate=True, linewidth=0.5,
fmt="-")
formatter = d.DateFormatter('%m/%d/%y %H:%M:%S')
ax.xaxis.set_major_formatter(formatter)
ax.xaxis.set_tick_params(rotation=30, labelsize=10)
plt.legend(bbox_to_anchor=(0.95, 1.15), loc='upper left', labels = keys)
plt.xlabel('Time [sec]')
plt.title('Patient: ' + patient_id)
fig.savefig(out_filename, bbox_inches='tight')
plt.close(fig)
def plot_hourly_lines(self, properties):
for filename in os.listdir(properties.in_dir):
if not (filename.endswith('csv')):
continue
patient_id = filename[properties.start_idx:properties.end_idx]
print("processing file " + filename + " with pid=" + patient_id + " ...")
df = self.loader.load_everion_patient_data(properties.in_dir, filename, ';')
if not df.empty:
df_hm = self.aggregator.aggregate_data_hourly(df, properties)
out_file_path = os.path.join(properties.out_dir, 'Hourly_lines_' + patient_id + '.png')
PlotterHelper.save_custom_plots(out_file_path, df, df_hm, patient_id, self.custom_line_plot,
self.custom_plot_fct_empty, 0, 15, 3)
def custom_line_plot(self, ax, custom_plot_fct, df_hm, font_size, x, x_daily_lines, x_ticks):
color_dict = {'HR': 'limegreen', 'HRV': 'silver', 'RR': 'yellow', 'SpO2': 'deepskyblue', 'Temp': 'white'}
ax0 = df_hm.plot(xticks=x_ticks, figsize=(15,3), color=[color_dict.get(x, '#333333') for x in df_hm.columns])
ax0.set_facecolor('dimgray')
plt.grid(color='silver', linestyle='--', linewidth=0.7)
def custom_plot_fct_empty(self):
return
def plot_hourly_lines_subplots(self, properties):
for filename in os.listdir(properties.in_dir):
if not (filename.endswith('csv')):
continue
patient_id = filename[properties.start_idx:properties.end_idx]
print("processing file " + filename + " with pid=" + patient_id + " ...")
df = self.loader.load_everion_patient_data(properties.in_dir, filename, ';')
if not df.empty:
df_hm = self.aggregator.aggregate_data_hourly(df, properties)
out_file_path = os.path.join(properties.out_dir, 'Hourly_lines2_' + patient_id + '.png')
PlotterHelper.save_custom_plots(out_file_path, df, df_hm, patient_id, PlotterHelper.custom_subplots,
self.custom_plot_fct, 0, 15, 3)
def custom_plot_fct(self, ax, font_size, key, signal, x):
ax.plot(x, signal.transpose())
ax.set_ylabel(key, fontsize=font_size)
ax.set_yticks([])
def plot_signals_and_labels(self, properties):
for filename in os.listdir(properties.in_dir):
if not (filename.endswith('csv')):
continue
patient_id = filename[properties.start_idx:properties.end_idx]
print("processing file " + filename + " with pid=" + patient_id + " ...")
in_file_suffix = '_storage-vital'
filename_l = patient_id + 'L' + in_file_suffix + '.csv'
filename_r = patient_id + 'R' + in_file_suffix + '.csv'
df_l = self.loader.load_everion_patient_data(properties.in_dir, filename_l, ';')
df_r = self.loader.load_everion_patient_data(properties.in_dir, filename_r, ';')
keys = ['HR', 'DeMorton', 'DeMortonLabel']
if not df_l.empty:
df_left = df_l.set_index("timestamp")
df_left = df_left[keys]
df_left = pd.concat([df_left], keys=["left"], axis=1)
df_left = df_left.reorder_levels([1, 0], axis=1)
else:
df_left = df_l
if not df_r.empty:
df_right = df_r.set_index("timestamp")
df_right = df_right[keys]
df_right = | pd.concat([df_right], keys=["right"], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 7 09:40:49 2018
@author: yuwei
"""
import pandas as pd
import numpy as np
import math
import random
import time
import scipy as sp
import xgboost as xgb
def loadData():
"下载数据"
trainSet = pd.read_table('round1_ijcai_18_train_20180301.txt',sep=' ')
testSet = pd.read_table('round1_ijcai_18_test_a_20180301.txt',sep=' ')
return trainSet,testSet
def splitData(trainSet,testSet):
"按时间划分验证集"
#转化测试集时间戳为标准时间
time_local = testSet.context_timestamp.map(lambda x :time.localtime(x))
time_local = time_local.map(lambda x :time.strftime("%Y-%m-%d %H:%M:%S",x))
testSet['context_timestamp'] = time_local
#转化训练集时间戳为标准时间
time_local = trainSet.context_timestamp.map(lambda x :time.localtime(x))
time_local = time_local.map(lambda x :time.strftime("%Y-%m-%d %H:%M:%S",x))
trainSet['context_timestamp'] = time_local
del time_local
#处理训练集item_category_list属性
trainSet['item_category_list'] = trainSet.item_category_list.map(lambda x :x.split(';'))
trainSet['item_category_list_2'] = trainSet.item_category_list.map(lambda x :x[1])
trainSet['item_category_list_3'] = trainSet.item_category_list.map(lambda x :x[2] if len(x) >2 else -1)
trainSet['item_category_list_2'] = list(map(lambda x,y : x if (y == -1) else y,trainSet['item_category_list_2'],trainSet['item_category_list_3']))
#处理测试集item_category_list属性
testSet['item_category_list'] = testSet.item_category_list.map(lambda x :x.split(';'))
testSet['item_category_list_2'] = testSet.item_category_list.map(lambda x :x[1])
testSet['item_category_list_3'] = testSet.item_category_list.map(lambda x :x[2] if len(x) >2 else -1)
testSet['item_category_list_2'] = list(map(lambda x,y : x if (y == -1) else y,testSet['item_category_list_2'],testSet['item_category_list_3']))
del trainSet['item_category_list_3'];del testSet['item_category_list_3'];
#处理predict_category_property的排名
trainSet['predict_category'] = trainSet['predict_category_property'].map(lambda x :[y.split(':')[0] for y in x.split(';')])
trainSet['predict_category_property_rank'] = list(map(lambda x,y:y.index(x) if x in y else -1,trainSet['item_category_list_2'],trainSet['predict_category']))
testSet['predict_category'] = testSet['predict_category_property'].map(lambda x :[y.split(':')[0] for y in x.split(';')])
testSet['predict_category_property_rank'] = list(map(lambda x,y:y.index(x) if x in y else -1,testSet['item_category_list_2'],testSet['predict_category']))
#统计item_category_list中和predict_category共同的个数
trainSet['item_category_count'] = list(map(lambda x,y:len(set(x)&set(y)),trainSet.item_category_list,trainSet.predict_category))
testSet['item_category_count'] = list(map(lambda x,y:len(set(x)&set(y)),testSet.item_category_list,testSet.predict_category))
#不同个数
trainSet['item_category_count'] = list(map(lambda x,y:len(set(x)) - len(set(x)&set(y)),trainSet.item_category_list,trainSet.predict_category))
testSet['item_category_count'] = list(map(lambda x,y:len(set(x)) - len(set(x)&set(y)),testSet.item_category_list,testSet.predict_category))
del trainSet['predict_category']; del testSet['predict_category']
"划分数据集"
#测试集 23-24号特征提取,25号打标
test = testSet
testFeat = trainSet[trainSet['context_timestamp']>'2018-09-23']
#验证集 22-23号特征提取,24号打标
validate = trainSet[trainSet['context_timestamp']>'2018-09-24']
validateFeat = trainSet[(trainSet['context_timestamp']>'2018-09-22') & (trainSet['context_timestamp']<'2018-09-24')]
#训练集 21-22号特征提取,23号打标;20-21号特征提取,22号打标;19-20号特征提取,21号打标;18-19号特征提取,20号打标
#标签区间
train1 = trainSet[(trainSet['context_timestamp']>'2018-09-23') & (trainSet['context_timestamp']<'2018-09-24')]
train2 = trainSet[(trainSet['context_timestamp']>'2018-09-22') & (trainSet['context_timestamp']<'2018-09-23')]
train3 = trainSet[(trainSet['context_timestamp']>'2018-09-21') & (trainSet['context_timestamp']<'2018-09-22')]
train4 = trainSet[(trainSet['context_timestamp']>'2018-09-20') & (trainSet['context_timestamp']<'2018-09-21')]
#特征区间
trainFeat1 = trainSet[(trainSet['context_timestamp']>'2018-09-21') & (trainSet['context_timestamp']<'2018-09-23')]
trainFeat2 = trainSet[(trainSet['context_timestamp']>'2018-09-20') & (trainSet['context_timestamp']<'2018-09-22')]
trainFeat3 = trainSet[(trainSet['context_timestamp']>'2018-09-19') & (trainSet['context_timestamp']<'2018-09-21')]
trainFeat4 = trainSet[(trainSet['context_timestamp']>'2018-09-18') & (trainSet['context_timestamp']<'2018-09-20')]
return test,testFeat,validate,validateFeat,train1,trainFeat1,train2,trainFeat2,train3,trainFeat3,train4,trainFeat4
def modelXgb(train,test):
"xgb模型"
train_y = train['is_trade'].values
# train_x = train.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade'
# ],axis=1).values
# test_x = test.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade'
# ],axis=1).values
# test_x = test.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property'
# ],axis=1).values
#根据皮卡尔相关系数,drop相关系数低于-0.2的属性
train_x = train.drop(['item_brand_id',
'item_city_id','user_id','shop_id','context_id',
'instance_id', 'item_id','item_category_list',
'item_property_list', 'context_timestamp',
'predict_category_property','is_trade',
'item_price_level','user_rank_down',
'item_category_list_2_not_buy_count',
'item_category_list_2_count',
'user_first'
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service',
],axis=1).values
# test_x = test.drop(['item_brand_id',
# 'item_city_id','user_id','shop_id','context_id',
# 'instance_id', 'item_id','item_category_list',
# 'item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade',
# 'item_price_level','user_rank_down',
# 'item_category_list_2_not_buy_count',
# 'item_category_list_2_count',
# 'user_first',
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service'
# ],axis=1).values
test_x = test.drop(['item_brand_id',
'item_city_id','user_id','shop_id','context_id',
'instance_id', 'item_id','item_category_list',
'item_property_list', 'context_timestamp',
'predict_category_property',
'item_price_level','user_rank_down',
'item_category_list_2_not_buy_count',
'item_category_list_2_count',
'user_first',
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service'
],axis=1).values
dtrain = xgb.DMatrix(train_x, label=train_y)
dtest = xgb.DMatrix(test_x)
# 模型参数
params = {'booster': 'gbtree',
'objective':'binary:logistic',
'eval_metric':'logloss',
'eta': 0.03,
'max_depth': 5, # 6
'colsample_bytree': 0.8,#0.8
'subsample': 0.8,
'scale_pos_weight': 1,
'min_child_weight': 18 # 2
}
# 训练
watchlist = [(dtrain,'train')]
bst = xgb.train(params, dtrain, num_boost_round=700,evals=watchlist)
# 预测
predict = bst.predict(dtest)
# test_xy = test[['instance_id','is_trade']]
test_xy = test[['instance_id']]
test_xy['predicted_score'] = predict
return test_xy
def get_item_feat(data,dataFeat):
"item的特征提取"
result = pd.DataFrame(dataFeat['item_id'])
result = result.drop_duplicates(['item_id'],keep='first')
"1.统计item出现次数"
dataFeat['item_count'] = dataFeat['item_id']
feat = pd.pivot_table(dataFeat,index=['item_id'],values='item_count',aggfunc='count').reset_index()
del dataFeat['item_count']
result = pd.merge(result,feat,on=['item_id'],how='left')
"2.统计item历史被购买的次数"
dataFeat['item_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_id'],values='item_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_buy_count']
result = pd.merge(result,feat,on=['item_id'],how='left')
"3.统计item转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_buy_count,result.item_count))
result['item_buy_ratio'] = buy_ratio
"4.统计item历史未被够买的次数"
result['item_not_buy_count'] = result['item_count'] - result['item_buy_count']
return result
def get_user_feat(data,dataFeat):
"user的特征提取"
result = pd.DataFrame(dataFeat['user_id'])
result = result.drop_duplicates(['user_id'],keep='first')
"1.统计user出现次数"
dataFeat['user_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id'],values='user_count',aggfunc='count').reset_index()
del dataFeat['user_count']
result = pd.merge(result,feat,on=['user_id'],how='left')
"2.统计user历史被购买的次数"
dataFeat['user_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id'],values='user_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_buy_count']
result = pd.merge(result,feat,on=['user_id'],how='left')
"3.统计user转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_buy_count,result.user_count))
result['user_buy_ratio'] = buy_ratio
"4.统计user历史未被够买的次数"
result['user_not_buy_count'] = result['user_count'] - result['user_buy_count']
return result
def get_context_feat(data,dataFeat):
"context的特征提取"
result = pd.DataFrame(dataFeat['context_id'])
result = result.drop_duplicates(['context_id'],keep='first')
"1.统计context出现次数"
dataFeat['context_count'] = dataFeat['context_id']
feat = pd.pivot_table(dataFeat,index=['context_id'],values='context_count',aggfunc='count').reset_index()
del dataFeat['context_count']
result = pd.merge(result,feat,on=['context_id'],how='left')
"2.统计context历史被购买的次数"
dataFeat['context_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['context_id'],values='context_buy_count',aggfunc='sum').reset_index()
del dataFeat['context_buy_count']
result = pd.merge(result,feat,on=['context_id'],how='left')
"3.统计context转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.context_buy_count,result.context_count))
result['context_buy_ratio'] = buy_ratio
"4.统计context历史未被够买的次数"
result['context_not_buy_count'] = result['context_count'] - result['context_buy_count']
return result
def get_shop_feat(data,dataFeat):
"shop的特征提取"
result = pd.DataFrame(dataFeat['shop_id'])
result = result.drop_duplicates(['shop_id'],keep='first')
"1.统计shop出现次数"
dataFeat['shop_count'] = dataFeat['shop_id']
feat = pd.pivot_table(dataFeat,index=['shop_id'],values='shop_count',aggfunc='count').reset_index()
del dataFeat['shop_count']
result = pd.merge(result,feat,on=['shop_id'],how='left')
"2.统计shop历史被购买的次数"
dataFeat['shop_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['shop_id'],values='shop_buy_count',aggfunc='sum').reset_index()
del dataFeat['shop_buy_count']
result = pd.merge(result,feat,on=['shop_id'],how='left')
"3.统计shop转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.shop_buy_count,result.shop_count))
result['shop_buy_ratio'] = buy_ratio
"4.统计shop历史未被够买的次数"
result['shop_not_buy_count'] = result['shop_count'] - result['shop_buy_count']
return result
def get_timestamp_feat(data,dataFeat):
"context_timestamp的特征提取"
result = pd.DataFrame(dataFeat['context_timestamp'])
result = result.drop_duplicates(['context_timestamp'],keep='first')
"1.统计context_timestamp出现次数"
dataFeat['context_timestamp_count'] = dataFeat['context_timestamp']
feat = pd.pivot_table(dataFeat,index=['context_timestamp'],values='context_timestamp_count',aggfunc='count').reset_index()
del dataFeat['context_timestamp_count']
result = pd.merge(result,feat,on=['context_timestamp'],how='left')
"2.统计context_timestamp历史被购买的次数"
dataFeat['context_timestamp_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['context_timestamp'],values='context_timestamp_buy_count',aggfunc='sum').reset_index()
del dataFeat['context_timestamp_buy_count']
result = pd.merge(result,feat,on=['context_timestamp'],how='left')
"3.统计context_timestamp转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.context_timestamp_buy_count,result.context_timestamp_count))
result['context_timestamp_buy_ratio'] = buy_ratio
"4.统计context_timestamp历史未被够买的次数"
result['context_timestamp_not_buy_count'] = result['context_timestamp_count'] - result['context_timestamp_buy_count']
return result
def get_item_brand_feat(data,dataFeat):
"item_brand的特征提取"
result = pd.DataFrame(dataFeat['item_brand_id'])
result = result.drop_duplicates(['item_brand_id'],keep='first')
"1.统计item_brand出现次数"
dataFeat['item_brand_count'] = dataFeat['item_brand_id']
feat = pd.pivot_table(dataFeat,index=['item_brand_id'],values='item_brand_count',aggfunc='count').reset_index()
del dataFeat['item_brand_count']
result = pd.merge(result,feat,on=['item_brand_id'],how='left')
"2.统计item_brand历史被购买的次数"
dataFeat['item_brand_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_brand_id'],values='item_brand_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_brand_buy_count']
result = pd.merge(result,feat,on=['item_brand_id'],how='left')
"3.统计item_brand转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_brand_buy_count,result.item_brand_count))
result['item_brand_buy_ratio'] = buy_ratio
"4.统计item_brand历史未被够买的次数"
result['item_brand_not_buy_count'] = result['item_brand_count'] - result['item_brand_buy_count']
return result
def get_item_city_feat(data,dataFeat):
"item_city的特征提取"
result = pd.DataFrame(dataFeat['item_city_id'])
result = result.drop_duplicates(['item_city_id'],keep='first')
"1.统计item_city出现次数"
dataFeat['item_city_count'] = dataFeat['item_city_id']
feat = pd.pivot_table(dataFeat,index=['item_city_id'],values='item_city_count',aggfunc='count').reset_index()
del dataFeat['item_city_count']
result = pd.merge(result,feat,on=['item_city_id'],how='left')
"2.统计item_city历史被购买的次数"
dataFeat['item_city_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_city_id'],values='item_city_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_city_buy_count']
result = pd.merge(result,feat,on=['item_city_id'],how='left')
"3.统计item_city转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_city_buy_count,result.item_city_count))
result['item_city_buy_ratio'] = buy_ratio
"4.统计item_city历史未被够买的次数"
result['item_city_not_buy_count'] = result['item_city_count'] - result['item_city_buy_count']
return result
def get_user_gender_feat(data,dataFeat):
"user_gender的特征提取"
result = pd.DataFrame(dataFeat['user_gender_id'])
result = result.drop_duplicates(['user_gender_id'],keep='first')
"1.统计user_gender出现次数"
dataFeat['user_gender_count'] = dataFeat['user_gender_id']
feat = pd.pivot_table(dataFeat,index=['user_gender_id'],values='user_gender_count',aggfunc='count').reset_index()
del dataFeat['user_gender_count']
result = pd.merge(result,feat,on=['user_gender_id'],how='left')
"2.统计user_gender历史被购买的次数"
dataFeat['user_gender_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_gender_id'],values='user_gender_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_gender_buy_count']
result = pd.merge(result,feat,on=['user_gender_id'],how='left')
"3.统计user_gender转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_gender_buy_count,result.user_gender_count))
result['user_gender_buy_ratio'] = buy_ratio
"4.统计user_gender历史未被够买的次数"
result['user_gender_not_buy_count'] = result['user_gender_count'] - result['user_gender_buy_count']
return result
def get_user_occupation_feat(data,dataFeat):
"user_occupation的特征提取"
result = pd.DataFrame(dataFeat['user_occupation_id'])
result = result.drop_duplicates(['user_occupation_id'],keep='first')
"1.统计user_occupation出现次数"
dataFeat['user_occupation_count'] = dataFeat['user_occupation_id']
feat = pd.pivot_table(dataFeat,index=['user_occupation_id'],values='user_occupation_count',aggfunc='count').reset_index()
del dataFeat['user_occupation_count']
result = pd.merge(result,feat,on=['user_occupation_id'],how='left')
"2.统计user_occupation历史被购买的次数"
dataFeat['user_occupation_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_occupation_id'],values='user_occupation_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_occupation_buy_count']
result = pd.merge(result,feat,on=['user_occupation_id'],how='left')
"3.统计user_occupation转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_occupation_buy_count,result.user_occupation_count))
result['user_occupation_buy_ratio'] = buy_ratio
"4.统计user_occupation历史未被够买的次数"
result['user_occupation_not_buy_count'] = result['user_occupation_count'] - result['user_occupation_buy_count']
return result
def get_context_page_feat(data,dataFeat):
"context_page的特征提取"
result = pd.DataFrame(dataFeat['context_page_id'])
result = result.drop_duplicates(['context_page_id'],keep='first')
"1.统计context_page出现次数"
dataFeat['context_page_count'] = dataFeat['context_page_id']
feat = pd.pivot_table(dataFeat,index=['context_page_id'],values='context_page_count',aggfunc='count').reset_index()
del dataFeat['context_page_count']
result = pd.merge(result,feat,on=['context_page_id'],how='left')
"2.统计context_page历史被购买的次数"
dataFeat['context_page_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['context_page_id'],values='context_page_buy_count',aggfunc='sum').reset_index()
del dataFeat['context_page_buy_count']
result = pd.merge(result,feat,on=['context_page_id'],how='left')
"3.统计context_page转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.context_page_buy_count,result.context_page_count))
result['context_page_buy_ratio'] = buy_ratio
"4.统计context_page历史未被够买的次数"
result['context_page_not_buy_count'] = result['context_page_count'] - result['context_page_buy_count']
return result
def get_shop_review_num_level_feat(data,dataFeat):
"context_page的特征提取"
result = pd.DataFrame(dataFeat['shop_review_num_level'])
result = result.drop_duplicates(['shop_review_num_level'],keep='first')
"1.统计shop_review_num_level出现次数"
dataFeat['shop_review_num_level_count'] = dataFeat['shop_review_num_level']
feat = | pd.pivot_table(dataFeat,index=['shop_review_num_level'],values='shop_review_num_level_count',aggfunc='count') | pandas.pivot_table |
import collections
import dask
from dask import delayed
from dask.diagnostics import ProgressBar
import logging
import multiprocessing
import pandas as pd
import numpy as np
import re
import six
import string
import py_stringsimjoin as ssj
from py_stringsimjoin.filter.overlap_filter import OverlapFilter
from py_stringmatching.tokenizer.qgram_tokenizer import QgramTokenizer
from py_stringmatching.tokenizer.whitespace_tokenizer import WhitespaceTokenizer
from py_stringsimjoin.utils.missing_value_handler import get_pairs_with_missing_value
import py_entitymatching.catalog.catalog_manager as cm
from py_entitymatching.utils.catalog_helper import log_info, get_name_for_key, \
add_key_column
from py_entitymatching.blocker.blocker import Blocker
import py_entitymatching.utils.generic_helper as gh
from py_entitymatching.utils.validation_helper import validate_object_type
from py_entitymatching.dask.utils import validate_chunks, get_num_partitions, \
get_num_cores, wrap
logger = logging.getLogger(__name__)
class DaskOverlapBlocker(Blocker):
def __init__(self):
self.stop_words = ['a', 'an', 'and', 'are', 'as', 'at',
'be', 'by', 'for', 'from',
'has', 'he', 'in', 'is', 'it',
'its', 'on', 'that', 'the', 'to',
'was', 'were', 'will', 'with']
logger.warning(
"WARNING THIS BLOCKER IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN "
"RISK.")
self.regex_punctuation = re.compile('[%s]' % re.escape(string.punctuation))
super(DaskOverlapBlocker, self).__init__()
def block_tables(self, ltable, rtable, l_overlap_attr, r_overlap_attr,
rem_stop_words=False, q_val=None, word_level=True, overlap_size=1,
l_output_attrs=None, r_output_attrs=None,
l_output_prefix='ltable_', r_output_prefix='rtable_',
allow_missing=False, verbose=False, show_progress=True,
n_ltable_chunks=1, n_rtable_chunks=1):
"""
WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK.
Blocks two tables based on the overlap of token sets of attribute
values. Finds tuple pairs from left and right tables such that the overlap
between (a) the set of tokens obtained by tokenizing the value of
attribute l_overlap_attr of a tuple from the left table, and (b) the
set of tokens obtained by tokenizing the value of attribute
r_overlap_attr of a tuple from the right table, is above a certain
threshold.
Args:
ltable (DataFrame): The left input table.
rtable (DataFrame): The right input table.
l_overlap_attr (string): The overlap attribute in left table.
r_overlap_attr (string): The overlap attribute in right table.
rem_stop_words (boolean): A flag to indicate whether stop words
(e.g., a, an, the) should be removed from the token sets of the
overlap attribute values (defaults to False).
q_val (int): The value of q to use if the overlap attributes
values are to be tokenized as qgrams (defaults to None).
word_level (boolean): A flag to indicate whether the overlap
attributes should be tokenized as words (i.e, using whitespace
as delimiter) (defaults to True).
overlap_size (int): The minimum number of tokens that must
overlap (defaults to 1).
l_output_attrs (list): A list of attribute names from the left
table to be included in the output candidate set (defaults
to None).
r_output_attrs (list): A list of attribute names from the right
table to be included in the output candidate set (defaults
to None).
l_output_prefix (string): The prefix to be used for the attribute names
coming from the left table in the output
candidate set (defaults to 'ltable\_').
r_output_prefix (string): The prefix to be used for the attribute names
coming from the right table in the output
candidate set (defaults to 'rtable\_').
allow_missing (boolean): A flag to indicate whether tuple pairs
with missing value in at least one of the
blocking attributes should be included in
the output candidate set (defaults to
False). If this flag is set to True, a
tuple in ltable with missing value in the
blocking attribute will be matched with
every tuple in rtable and vice versa.
verbose (boolean): A flag to indicate whether the debug
information should be logged (defaults to False).
show_progress (boolean): A flag to indicate whether progress should
be displayed to the user (defaults to True).
n_ltable_chunks (int): The number of partitions to split the left table (
defaults to 1). If it is set to -1, then the number of
partitions is set to the number of cores in the
machine.
n_rtable_chunks (int): The number of partitions to split the right table (
defaults to 1). If it is set to -1, then the number of
partitions is set to the number of cores in the
machine.
Returns:
A candidate set of tuple pairs that survived blocking (DataFrame).
Raises:
AssertionError: If `ltable` is not of type pandas
DataFrame.
AssertionError: If `rtable` is not of type pandas
DataFrame.
AssertionError: If `l_overlap_attr` is not of type string.
AssertionError: If `r_overlap_attr` is not of type string.
AssertionError: If `l_output_attrs` is not of type of
list.
AssertionError: If `r_output_attrs` is not of type of
list.
AssertionError: If the values in `l_output_attrs` is not of type
string.
AssertionError: If the values in `r_output_attrs` is not of type
string.
AssertionError: If `l_output_prefix` is not of type
string.
AssertionError: If `r_output_prefix` is not of type
string.
AssertionError: If `q_val` is not of type int.
AssertionError: If `word_level` is not of type boolean.
AssertionError: If `overlap_size` is not of type int.
AssertionError: If `verbose` is not of type
boolean.
AssertionError: If `allow_missing` is not of type boolean.
AssertionError: If `show_progress` is not of type
boolean.
AssertionError: If `n_ltable_chunks` is not of type
int.
AssertionError: If `n_rtable_chunks` is not of type
int.
AssertionError: If `l_overlap_attr` is not in the ltable
columns.
AssertionError: If `r_block_attr` is not in the rtable columns.
AssertionError: If `l_output_attrs` are not in the ltable.
AssertionError: If `r_output_attrs` are not in the rtable.
SyntaxError: If `q_val` is set to a valid value and
`word_level` is set to True.
SyntaxError: If `q_val` is set to None and
`word_level` is set to False.
Examples:
>>> from py_entitymatching.dask.dask_overlap_blocker import DaskOverlapBlocker
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='ID')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='ID')
>>> ob = DaskOverlapBlocker()
# Use all cores
# # Use word-level tokenizer
>>> C1 = ob.block_tables(A, B, 'address', 'address', l_output_attrs=['name'], r_output_attrs=['name'], word_level=True, overlap_size=1, n_ltable_chunks=-1, n_rtable_chunks=-1)
# # Use q-gram tokenizer
>>> C2 = ob.block_tables(A, B, 'address', 'address', l_output_attrs=['name'], r_output_attrs=['name'], word_level=False, q_val=2, n_ltable_chunks=-1, n_rtable_chunks=-1)
# # Include all possible missing values
>>> C3 = ob.block_tables(A, B, 'address', 'address', l_output_attrs=['name'], r_output_attrs=['name'], allow_missing=True, n_ltable_chunks=-1, n_rtable_chunks=-1)
"""
logger.warning(
"WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN "
"RISK.")
# Input validations
self.validate_types_params_tables(ltable, rtable, l_output_attrs,
r_output_attrs, l_output_prefix,
r_output_prefix, verbose, n_ltable_chunks, n_rtable_chunks)
self.validate_types_other_params(l_overlap_attr, r_overlap_attr,
rem_stop_words, q_val, word_level, overlap_size)
self.validate_allow_missing(allow_missing)
self.validate_show_progress(show_progress)
self.validate_overlap_attrs(ltable, rtable, l_overlap_attr, r_overlap_attr)
self.validate_output_attrs(ltable, rtable, l_output_attrs, r_output_attrs)
self.validate_word_level_qval(word_level, q_val)
log_info(logger, 'Required metadata: ltable key, rtable key', verbose)
l_key, r_key = cm.get_keys_for_ltable_rtable(ltable, rtable, logger, verbose)
# validate metadata
cm._validate_metadata_for_table(ltable, l_key, 'ltable', logger, verbose)
cm._validate_metadata_for_table(rtable, r_key, 'rtable', logger, verbose)
# validate input table chunks
validate_object_type(n_ltable_chunks, int, 'Parameter n_ltable_chunks')
validate_object_type(n_rtable_chunks, int,
'Parameter n_rtable_chunks')
validate_chunks(n_ltable_chunks)
validate_chunks(n_rtable_chunks)
if n_ltable_chunks == -1:
n_ltable_chunks = multiprocessing.cpu_count()
ltable_chunks = np.array_split(ltable, n_ltable_chunks)
# preprocess/tokenize ltable
if word_level == True:
tokenizer = WhitespaceTokenizer(return_set=True)
else:
tokenizer = QgramTokenizer(qval=q_val, return_set=True)
preprocessed_tokenized_ltbl = []
# Construct DAG for preprocessing/tokenizing ltable chunks
start_row_id = 0
for i in range(len(ltable_chunks)):
result = delayed(self.process_tokenize_block_attr)(ltable_chunks[i][
l_overlap_attr],
start_row_id,
rem_stop_words, tokenizer)
preprocessed_tokenized_ltbl.append(result)
start_row_id += len(ltable_chunks[i])
preprocessed_tokenized_ltbl = delayed(wrap)(preprocessed_tokenized_ltbl)
# Execute the DAG
if show_progress:
with ProgressBar():
logger.info('Preprocessing/tokenizing ltable')
preprocessed_tokenized_ltbl_vals = preprocessed_tokenized_ltbl.compute(
scheduler="processes", num_workers=multiprocessing.cpu_count())
else:
preprocessed_tokenized_ltbl_vals = preprocessed_tokenized_ltbl.compute(
scheduler="processes", num_workers=multiprocessing.cpu_count())
ltable_processed_dict = {}
for i in range(len(preprocessed_tokenized_ltbl_vals)):
ltable_processed_dict.update(preprocessed_tokenized_ltbl_vals[i])
# build inverted index
inverted_index = self.build_inverted_index(ltable_processed_dict)
if n_rtable_chunks == -1:
n_rtable_chunks = multiprocessing.cpu_count()
rtable_chunks = np.array_split(rtable, n_rtable_chunks)
# Construct the DAG for probing
probe_result = []
start_row_id = 0
for i in range(len(rtable_chunks)):
result = delayed(self.probe)(rtable_chunks[i][r_overlap_attr],
inverted_index, start_row_id, rem_stop_words,
tokenizer, overlap_size)
probe_result.append(result)
start_row_id += len(rtable_chunks[i])
probe_result = delayed(wrap)(probe_result)
# Execute the DAG for probing
if show_progress:
with ProgressBar():
logger.info('Probing using rtable')
probe_result = probe_result.compute(scheduler="processes",
num_workers=multiprocessing.cpu_count())
else:
probe_result = probe_result.compute(scheduler="processes",
num_workers=multiprocessing.cpu_count())
# construct a minimal dataframe that can be used to add more attributes
flat_list = [item for sublist in probe_result for item in sublist]
tmp = pd.DataFrame(flat_list, columns=['fk_ltable_rid', 'fk_rtable_rid'])
fk_ltable = ltable.iloc[tmp.fk_ltable_rid][l_key].values
fk_rtable = rtable.iloc[tmp.fk_rtable_rid][r_key].values
id_vals = list(range(len(flat_list)))
candset = pd.DataFrame.from_dict(
{'_id': id_vals, l_output_prefix+l_key: fk_ltable, r_output_prefix+r_key: fk_rtable})
# set the properties for the candidate set
cm.set_key(candset, '_id')
cm.set_fk_ltable(candset, 'ltable_'+l_key)
cm.set_fk_rtable(candset, 'rtable_'+r_key)
cm.set_ltable(candset, ltable)
cm.set_rtable(candset, rtable)
ret_candset = gh.add_output_attributes(candset, l_output_attrs=l_output_attrs,
r_output_attrs=r_output_attrs,
l_output_prefix=l_output_prefix,
r_output_prefix=r_output_prefix,
validate=False)
# handle missing values
if allow_missing:
missing_value_pairs = get_pairs_with_missing_value(ltable, rtable, l_key,
r_key, l_overlap_attr,
r_overlap_attr,
l_output_attrs,
r_output_attrs,
l_output_prefix,
r_output_prefix, False, False)
missing_value_pairs.insert(0, '_id', range(len(ret_candset),
len(ret_candset)+len(missing_value_pairs)))
if len(missing_value_pairs) > 0:
ret_candset = pd.concat([ret_candset, missing_value_pairs], ignore_index=True, sort=False)
cm.set_key(ret_candset, '_id')
cm.set_fk_ltable(ret_candset, 'ltable_' + l_key)
cm.set_fk_rtable(ret_candset, 'rtable_' + r_key)
cm.set_ltable(ret_candset, ltable)
cm.set_rtable(ret_candset, rtable)
# Return the final candidate set to user.
return ret_candset
def block_candset(self, candset, l_overlap_attr, r_overlap_attr,
rem_stop_words=False, q_val=None, word_level=True,
overlap_size=1, allow_missing=False,
verbose=False, show_progress=True, n_chunks=-1):
"""
WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK.
Blocks an input candidate set of tuple pairs based on the overlap
of token sets of attribute values. Finds tuple pairs from an input
candidate set of tuple pairs such that
the overlap between (a) the set of tokens obtained by tokenizing the
value of attribute l_overlap_attr of the left tuple in a tuple pair,
and (b) the set of tokens obtained by tokenizing the value of
attribute r_overlap_attr of the right tuple in the tuple pair,
is above a certain threshold.
Args:
candset (DataFrame): The input candidate set of tuple pairs.
l_overlap_attr (string): The overlap attribute in left table.
r_overlap_attr (string): The overlap attribute in right table.
rem_stop_words (boolean): A flag to indicate whether stop words
(e.g., a, an, the) should be removed
from the token sets of the overlap
attribute values (defaults to False).
q_val (int): The value of q to use if the overlap attributes values
are to be tokenized as qgrams (defaults to None).
word_level (boolean): A flag to indicate whether the overlap
attributes should be tokenized as words
(i.e, using whitespace as delimiter)
(defaults to True).
overlap_size (int): The minimum number of tokens that must overlap
(defaults to 1).
allow_missing (boolean): A flag to indicate whether tuple pairs
with missing value in at least one of the
blocking attributes should be included in
the output candidate set (defaults to
False). If this flag is set to True, a
tuple pair with missing value in either
blocking attribute will be retained in the
output candidate set.
verbose (boolean): A flag to indicate whether the debug information
should be logged (defaults to False).
show_progress (boolean): A flag to indicate whether progress should
be displayed to the user (defaults to True).
n_chunks (int): The number of partitions to split the candidate set. If it
is set to -1, the number of partitions will be set to the
number of cores in the machine.
Returns:
A candidate set of tuple pairs that survived blocking (DataFrame).
Raises:
AssertionError: If `candset` is not of type pandas
DataFrame.
AssertionError: If `l_overlap_attr` is not of type string.
AssertionError: If `r_overlap_attr` is not of type string.
AssertionError: If `q_val` is not of type int.
AssertionError: If `word_level` is not of type boolean.
AssertionError: If `overlap_size` is not of type int.
AssertionError: If `verbose` is not of type
boolean.
AssertionError: If `allow_missing` is not of type boolean.
AssertionError: If `show_progress` is not of type
boolean.
AssertionError: If `n_chunks` is not of type
int.
AssertionError: If `l_overlap_attr` is not in the ltable
columns.
AssertionError: If `r_block_attr` is not in the rtable columns.
SyntaxError: If `q_val` is set to a valid value and
`word_level` is set to True.
SyntaxError: If `q_val` is set to None and
`word_level` is set to False.
Examples:
>>> import py_entitymatching as em
>>> from py_entitymatching.dask.dask_overlap_blocker import DaskOverlapBlocker
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='ID')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='ID')
>>> ob = DaskOverlapBlocker()
>>> C = ob.block_tables(A, B, 'address', 'address', l_output_attrs=['name'], r_output_attrs=['name'])
>>> D1 = ob.block_candset(C, 'name', 'name', allow_missing=True)
# Include all possible tuple pairs with missing values
>>> D2 = ob.block_candset(C, 'name', 'name', allow_missing=True)
# Execute blocking using multiple cores
>>> D3 = ob.block_candset(C, 'name', 'name', n_chunks=-1)
# Use q-gram tokenizer
>>> D2 = ob.block_candset(C, 'name', 'name', word_level=False, q_val=2)
"""
logger.warning(
"WARNING THIS BLOCKER IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN "
"RISK.")
# Validate input parameters
self.validate_types_params_candset(candset, verbose, show_progress, n_chunks)
self.validate_types_other_params(l_overlap_attr, r_overlap_attr,
rem_stop_words, q_val, word_level, overlap_size)
# get and validate metadata
log_info(logger,
'Required metadata: cand.set key, fk ltable, fk rtable, '
'ltable, rtable, ltable key, rtable key', verbose)
# # get metadata
key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key = cm.get_metadata_for_candset(
candset, logger, verbose)
# # validate metadata
cm._validate_metadata_for_candset(candset, key, fk_ltable, fk_rtable,
ltable, rtable, l_key, r_key,
logger, verbose)
# validate overlap attrs
self.validate_overlap_attrs(ltable, rtable, l_overlap_attr,
r_overlap_attr)
# validate word_level and q_val
self.validate_word_level_qval(word_level, q_val)
# validate number of chunks
validate_object_type(n_chunks, int, 'Parameter n_chunks')
validate_chunks(n_chunks)
# # do projection before merge
l_df = ltable[[l_key, l_overlap_attr]]
r_df = rtable[[r_key, r_overlap_attr]]
# # set index for convenience
l_df = l_df.set_index(l_key, drop=False)
r_df = r_df.set_index(r_key, drop=False)
# # case the overlap attribute to string if required.
l_df.is_copy, r_df.is_copy = False, False # to avoid setwithcopy warning
ssj.dataframe_column_to_str(l_df, l_overlap_attr, inplace=True)
ssj.dataframe_column_to_str(r_df, r_overlap_attr, inplace=True)
if word_level == True:
tokenizer = WhitespaceTokenizer(return_set=True)
else:
tokenizer = QgramTokenizer(return_set=True)
n_chunks = get_num_partitions(n_chunks, len(candset))
c_splits = np.array_split(candset, n_chunks)
valid_splits = []
# Create DAG
for i in range(n_chunks):
result = delayed(self._block_candset_split)(c_splits[i], l_df, r_df, l_key,
r_key, l_overlap_attr,
r_overlap_attr, fk_ltable,
fk_rtable, allow_missing,
rem_stop_words, tokenizer, overlap_size)
valid_splits.append(result)
valid_splits = delayed(wrap)(valid_splits)
# Execute the DAG
if show_progress:
with ProgressBar():
valid_splits = valid_splits.compute(scheduler="processes",
num_workers=get_num_cores())
else:
valid_splits = valid_splits.compute(scheduler="processes",
num_workers=get_num_cores())
valid = sum(valid_splits, [])
# construct output table
if len(candset) > 0:
out_table = candset[valid]
else:
out_table = pd.DataFrame(columns=candset.columns)
# update the catalog
cm.set_candset_properties(out_table, key, fk_ltable, fk_rtable,
ltable, rtable)
# return the output table
return out_table
# ------------ helper functions ------- #
def _block_candset_split(self, c_df, l_df, r_df, l_key, r_key, l_block_attr,
r_block_attr, fk_ltable, fk_rtable, allow_missing,
rem_stop_words, tokenizer, overlap_threshold):
valid = []
col_names = list(c_df.columns)
lkey_idx = col_names.index(fk_ltable)
rkey_idx = col_names.index(fk_rtable)
l_dict = {}
r_dict = {}
for row in c_df.itertuples(index=False):
row_lkey = row[lkey_idx]
if row_lkey not in l_dict:
l_dict[row_lkey] = l_df.loc[row_lkey, l_block_attr]
l_val = l_dict[row_lkey]
row_rkey = row[rkey_idx]
if row_rkey not in r_dict:
r_dict[row_rkey] = r_df.loc[row_rkey, r_block_attr]
r_val = r_dict[row_rkey]
if allow_missing:
if pd.isnull(l_val) or pd.isnull(r_val) or l_val == r_val:
valid.append(True)
else:
valid.append(False)
else:
if pd.notnull(l_val) and pd.notnull(r_val):
l_tokens = self._process_tokenize_block_str(l_val, rem_stop_words,
tokenizer)
r_tokens = self._process_tokenize_block_str(r_val, rem_stop_words,
tokenizer)
overlap = len(set(l_tokens).intersection(r_tokens))
if overlap > overlap_threshold:
valid.append(True)
else:
valid.append(False)
else:
valid.append(False)
return valid
def process_tokenize_block_attr(self, block_column_values,
start_row_id, should_rem_stop_words, tokenizer):
result_vals = {}
row_id = start_row_id
for s in block_column_values:
if not s or pd.isnull(s):
row_id += 1
continue
val = self._process_tokenize_block_str(s, should_rem_stop_words, tokenizer)
result_vals[row_id] = val
row_id += 1
return result_vals
def build_inverted_index(self, process_tok_vals):
index = collections.defaultdict(set)
for key in process_tok_vals:
for val in process_tok_vals[key]:
index[val].add(key)
return index
def _find_candidates(self, probe_tokens, inverted_index):
overlap_candidates = {}
for token in probe_tokens:
candidates = inverted_index.get(token, [])
for candidate in candidates:
overlap_candidates[candidate] = overlap_candidates.get(candidate, 0) + 1
return overlap_candidates
def probe(self, block_column_values, inverted_index, start_row_id,
should_rem_stop_words, tokenizer, overlap_threshold):
result = []
row_id = start_row_id
for s in block_column_values:
if not s:
row_id += 1
continue
probe_tokens = self._process_tokenize_block_str(s, should_rem_stop_words,
tokenizer)
candidates = self._find_candidates(probe_tokens, inverted_index)
for cand, overlap in candidates.items():
if overlap >= overlap_threshold:
result.append((cand, row_id))
row_id += 1
return result
def _process_tokenize_block_str(self, s, should_rem_stop_words, tokenizer):
if not s or pd.isnull(s):
return s
if isinstance(s, bytes):
s = s.decode('utf-8', 'ignore')
s = s.lower()
s = self.regex_punctuation.sub('', s)
tokens = list(set(s.strip().split()))
if should_rem_stop_words:
tokens = [token for token in tokens if token not in self.stop_words]
s = ' '.join(tokens)
tokenized_str = tokenizer.tokenize(s)
return tokenized_str
def validate_types_params_tables(self, ltable, rtable,
l_output_attrs, r_output_attrs, l_output_prefix,
r_output_prefix, verbose, n_ltable_chunks,
n_rtable_chunks):
validate_object_type(ltable, pd.DataFrame, error_prefix='Input left table')
validate_object_type(rtable, pd.DataFrame, error_prefix='Input right table')
if l_output_attrs:
validate_object_type(l_output_attrs, list, 'Output attributes of left table')
for x in l_output_attrs:
validate_object_type(x, six.string_types, 'An output attribute name of left table')
if r_output_attrs:
validate_object_type(r_output_attrs, list, 'Output attributes of right table')
for x in r_output_attrs:
validate_object_type(x, six.string_types, 'An output attribute name of right table')
validate_object_type(l_output_prefix, six.string_types, 'Output prefix of left table')
validate_object_type(r_output_prefix, six.string_types, 'Output prefix of right table')
validate_object_type(verbose, bool, 'Parameter verbose')
validate_object_type(n_ltable_chunks, int, 'Parameter n_ltable_chunks')
validate_object_type(n_rtable_chunks, int, 'Parameter n_rtable_chunks')
def validate_types_other_params(self, l_overlap_attr, r_overlap_attr,
rem_stop_words, q_val,
word_level, overlap_size):
validate_object_type(l_overlap_attr, six.string_types, error_prefix='Overlap attribute name of left table')
validate_object_type(r_overlap_attr, six.string_types, error_prefix='Overlap attribute name of right table')
validate_object_type(rem_stop_words, bool, error_prefix='Parameter rem_stop_words')
if q_val != None and not isinstance(q_val, int):
logger.error('Parameter q_val is not of type int')
raise AssertionError('Parameter q_val is not of type int')
validate_object_type(word_level, bool, error_prefix='Parameter word_level')
validate_object_type(overlap_size, int, error_prefix='Parameter overlap_size')
# validate the overlap attrs
def validate_overlap_attrs(self, ltable, rtable, l_overlap_attr,
r_overlap_attr):
if not isinstance(l_overlap_attr, list):
l_overlap_attr = [l_overlap_attr]
assert set(l_overlap_attr).issubset(
ltable.columns) is True, 'Left block attribute is not in the left table'
if not isinstance(r_overlap_attr, list):
r_overlap_attr = [r_overlap_attr]
assert set(r_overlap_attr).issubset(
rtable.columns) is True, 'Right block attribute is not in the right table'
# validate word_level and q_val
def validate_word_level_qval(self, word_level, q_val):
if word_level == True and q_val != None:
raise SyntaxError(
'Parameters word_level and q_val cannot be set together; Note that word_level is '
'set to True by default, so explicity set word_level=false to use qgram with the '
'specified q_val')
if word_level == False and q_val == None:
raise SyntaxError(
'Parameters word_level and q_val cannot be unset together; Note that q_val is '
'set to None by default, so if you want to use qgram then '
'explictiy set word_level=False and specify the q_val')
def block_tuples(self, ltuple, rtuple, l_overlap_attr, r_overlap_attr,
rem_stop_words=False, q_val=None, word_level=True,
overlap_size=1, allow_missing=False):
"""Blocks a tuple pair based on the overlap of token sets of attribute values.
Args:
ltuple (Series): The input left tuple.
rtuple (Series): The input right tuple.
l_overlap_attr (string): The overlap attribute in left tuple.
r_overlap_attr (string): The overlap attribute in right tuple.
rem_stop_words (boolean): A flag to indicate whether stop words
(e.g., a, an, the) should be removed
from the token sets of the overlap
attribute values (defaults to False).
q_val (int): A value of q to use if the overlap attributes values
are to be tokenized as qgrams (defaults to None).
word_level (boolean): A flag to indicate whether the overlap
attributes should be tokenized as words
(i.e, using whitespace as delimiter)
(defaults to True).
overlap_size (int): The minimum number of tokens that must overlap
(defaults to 1).
allow_missing (boolean): A flag to indicate whether a tuple pair
with missing value in at least one of the
blocking attributes should be blocked
(defaults to False). If this flag is set
to True, the pair will be kept if either
ltuple has missing value in l_block_attr
or rtuple has missing value in r_block_attr
or both.
Returns:
A status indicating if the tuple pair is blocked (boolean).
Examples:
>>> import py_entitymatching as em
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='ID')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='ID')
>>> ob = em.OverlapBlocker()
>>> status = ob.block_tuples(A.loc[0], B.loc[0], 'address', 'address')
"""
# validate data types of input parameters specific to overlap blocker
self.validate_types_other_params(l_overlap_attr, r_overlap_attr,
rem_stop_words, q_val,
word_level, overlap_size)
# validate word_level and q_val
self.validate_word_level_qval(word_level, q_val)
# determine which tokenizer to use
if word_level == True:
# # create a whitespace tokenizer
tokenizer = WhitespaceTokenizer(return_set=True)
else:
# # create a qgram tokenizer
tokenizer = QgramTokenizer(qval=q_val, return_set=True)
# # cleanup the tuples from non-ascii characters, punctuations, and stop words
l_val = self.cleanup_tuple_val(ltuple[l_overlap_attr], rem_stop_words)
r_val = self.cleanup_tuple_val(rtuple[r_overlap_attr], rem_stop_words)
# create a filter for overlap similarity
overlap_filter = OverlapFilter(tokenizer, overlap_size,
allow_missing=allow_missing)
return overlap_filter.filter_pair(l_val, r_val)
def cleanup_tuple_val(self, val, rem_stop_words):
if | pd.isnull(val) | pandas.isnull |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 1 09:21:40 2018
@author: @gary.allison
This code is used to take ODNR files for Brine disposal fee and
eventually create a file to be used to show overall injection volumes.
The ODNR data have several limitations that we must find and account for:
- data type consistency,
- lumped API numbers
- typos
- different file formats across the years etc.
"""
import pandas as pd
import numpy as np
import pandas.api.types as ptypes
from validAPI import getAPI10
##### --------------------------------------------------
#### Input file definitions
##### --------------------------------------------------
# set data dirs for input files and for resulting output files
datadir = './sources/'
outdir = './out/'
indir = datadir+'OH_injection/'
pre_proc_out = outdir+'injection_tall_pre.csv'
# input files are in four different formats:
# for the oldest, specify filename, year and quarter: tuple (filename,yr,q)
# all columns are named the same!!
fn_old = [('OH_1ST QUARTER 2011 BRINE DISPOSAL FEES.xls',2011,1),
('OH_2ND QUARTER 2011 BRINE DISPOSAL FEES.xls',2011,2),
('OH_3RD QUARTER 2011 BRINE DISPOSAL FEES-1.xls',2011,3),
('OH_4TH QUARTER 2010 BRINE DISPOSAL FEES.xls',2010,4),
('OH_4TH QUARTER 2011 BRINE DISPOSAL FEES.xls',2011,4),
('OH_Brine Disposal Fee - 3rd Quarter 2010-2.xls',2010,3)]
# the 2012 file is ina funky state - the set of worksheets have two different formats: a blend of old and main
# so we have to process it separately
fn_2012 = 'OH_BRINE DISPOSAL FEES FOR 2012.xls'
# bulk of the data are here - first four worksheets are quarters.
# Total worksheet (the fifth one) is ignored
# specify the filename and the year: tuple: (filename,year)
fn_2013_17 = [('BRINE DISPOSAL FEES FOR 2013.xlsx',2013),
('BRINE DISPOSAL FEES FOR 2014.xlsx',2014),
('BRINE DISPOSAL FEES FOR 2015.xlsx',2015),
('BRINE DISPOSAL FEES FOR 2016.xlsx',2016),
('BRINE DISPOSAL FEES FOR 2017.xlsx',2017)]
# Finally, the current file is of a different format and must also
# be treated separately. It currently includes all quarters of the
# year (even if they are in the future) and on a single worksheet
fn_2018_plus = [('BRINE DISPOSAL FEES FOR 2018.xlsx',2018),
('BRINE DISPOSAL FEES FOR 2019.xlsx',2019)]
# The text file with the records to collapse into one
aggfn = 'aggregateAPI.txt'
# We define these temporary file to examine output in progress
tempf = outdir+'temp.csv'
tempf1 = outdir+'temp1.csv'
def fetchAggregateList(fn=aggfn):
agglist = []
aggaction = {}
with open(fn) as f:
f.readline() # ignore header
for ln in f.readlines():
lst = ln.split('|')
key = (lst[0],int(lst[1]),int(lst[2]))
agglist.append(key)
aggaction[key] = lst[3] # what to do when you find a match?
return agglist, aggaction
agglist, aggaction = fetchAggregateList()
#print(agglist)
def is_on_AggregateList(API10,yr,q):
if (API10,yr,q) in agglist:
#print(f'Aggregating {API10}')
return True
return False
def getCollapseSet(ser,yr,q):
# return list of APIs from the AggList to colllapse
clst = []
for index,row in ser.iteritems():
if is_on_AggregateList(row,yr,q):
clst.append(row)
cset = set(clst)
return cset
##### --------------------------------------------------
##### Input file readers
##### --------------------------------------------------
#### -------------------------2010 - 2011 ---------------
def read_old(fn,yr,quar,review=False,flag_problems=True):
# read excel file and produce a pandas dataframe
# we keep only 4 columns from the sheet, ignore the header,
# and skip several rows at the top.
# Unlike later files, these have only 1 volume column with a
# label column to specify if it is from in-district or out-of-district
# We must combine the two (in and out) into a single record
d = pd.read_excel(indir+fn,skiprows=5,header=None,usecols=[7,8,10,11],
names=['CompanyName','APIstr','Vol','In_Out'])
# some volumes cells contain the work 'zero',
d.Vol = d.Vol.where(d.Vol.str.lower().str.strip()!='zero',0)
d.Vol = pd.to_numeric(d.Vol)
# make all of in-out into lowercase
d.In_Out = d.In_Out.str.lower()
# some In_Out cells have 'zero' in them: assign them to In
d.In_Out = d.In_Out.where(d.In_Out.str.lower().str.strip()!='zero','in')
assert ptypes.is_numeric_dtype(d.Vol)
assert ptypes.is_string_dtype(d.CompanyName)
assert ptypes.is_string_dtype(d.APIstr)
api10 = []
for index, row in d.iterrows():
api10.append(getAPI10(row[1],yr,quar,flag_problems=flag_problems))
if review:
print(f'{api10[-1]}, {row[1]},{yr},{quar}')
d['API10'] = api10
### ---------- handle multiple entries for a given API ---------
cset = getCollapseSet(d.API10,yr,quar)
#print(cset)
for capi in cset:
tmp = d[d.API10 == capi]
action = aggaction[(capi,yr,quar)]
if action == 'sum':
vol = tmp.groupby(['API10','In_Out'])['Vol'].sum()
else:
print(f'UNRECOGINIZED ACTION for {capi}')
# make into df
vol = pd.DataFrame(vol)
# always take last of collapsed - assuming it is most recent
other = tmp.groupby(['API10','In_Out'])['APIstr','CompanyName'].last()
mg = pd.merge(vol,other,left_index=True,right_index=True,
validate='1:1')
mg.reset_index(level=[0,1],inplace=True)
tmp = d[d.API10 != capi] # drop the old
d = pd.concat([tmp,mg],sort=True) # add the new
### --------------------------------------------------------------
### --------------- Make a meta df ----------------------------
meta = d.copy().filter(['API10','APIstr','CompanyName'])
meta = meta.groupby(['API10'],as_index=False)['APIstr','CompanyName'].first()
### ----------------- snag all in-district records
dIn = d[d.In_Out.str.lower().str[0]=='i'] #'In district'
dIn = dIn.filter(['API10','Vol'])
dIn.columns = ['API10','Vol_InDist']
# =============================================================================
# print(f'{len(dIn)}, {len(dIn.API10.unique())}')
# print(dIn[dIn.API10.duplicated()==True])
# dIn.sort_values(by='API10').to_csv(tempf)
# =============================================================================
assert len(dIn)==len(dIn.API10.unique())
# put together with all
meta = | pd.merge(meta,dIn,how='left',on='API10',validate='1:1') | pandas.merge |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing import sequence
from deprecated import deprecated
import os
import numpy as np
from tqdm import tqdm
from PIL import Image
import pickle
import pandas as pd
import re
import string
from collections import Counter
from lib.utils_xrh import *
class BatchDataGenerator:
"""
数据批量生成器
当数据量过大时, 受限于内存空间, 不能每次都将全部数据喂给模型, 而是分批输入
Author: xrh
Date: 2021-9-25
"""
def __init__(self, dataset_dir='cache_data/train_dataset.json'):
self.dataset_dir = dataset_dir
def read_all(self, n_a, n_vocab, m, batch_size=32, dataset=None):
"""
从磁盘中读取整个数据集(json)到内存, 每次随机采样一批数据, 喂入模型进行训练
:param n_a:
:param n_vocab:
:param m: 数据集的样本总数
:param batch_size:
:param dataset: 以 DataFrame 存储的数据集
:return:
"""
# 只执行一次
if dataset is None:
dataset = pd.read_json(self.dataset_dir)
image_feature = np.array(dataset['image_feature'].tolist())
caption_encoding = np.array(dataset['caption_encoding'].tolist())
while True: # 每次调用 next() 执行下面的语句
mask = np.random.choice(m, batch_size) # 从 range(m) 中随机采样batch_size 组成list, N - 样本总数
batch_image_feature = image_feature[mask]
batch_caption_encoding = caption_encoding[mask]
m_batch = np.shape(batch_caption_encoding)[0] # 一个批次的样本的数量
c0 = np.zeros((m_batch, n_a))
# 语言模型的输入 和 输出要错开一个时刻,
# eg.
# output: 今天 /是 /个/好日子/<end>
# input: <start>/今天/是/个 /好日子/
caption_out = batch_caption_encoding[:, 1:] # shape(N,39)
caption_in = batch_caption_encoding[:, :-1] # shape(N,39)
outputs = ArrayUtils.one_hot_array(caption_out, n_vocab)
yield ((caption_in, batch_image_feature, c0),
outputs) # 必须是 tuple 否则 ValueError: No gradients provided for any variable (Keras 2.4, Tensorflow 2.3.0)
@deprecated()
def read_by_chunk(self, image_feature_dir,caption_encoding_dir,n_a, n_vocab, m, batch_size=32):
"""
读取预处理后的数据集(csv)时, 使用分批次的方式读入内存
:param n_a:
:param n_vocab:
:param m: 数据集的样本总数
:param batch_size:
:return:
"""
# 只执行一次
image_feature = pd.read_csv(image_feature_dir, header=None, iterator=True) # csv 是如此之大, 无法一次读入内存
caption_encoding = pd.read_csv(caption_encoding_dir, header=None, iterator=True)
steps_per_epoch = m // batch_size # 每一个 epoch 要生成的多少批数据
# N - 样本总数
count = 0
while True: # 每次调用 next() 执行下面的语句
batch_image_feature = image_feature.get_chunk(batch_size).iloc[:, 1:] # 排除第一列(索引列)
batch_caption_encoding = caption_encoding.get_chunk(batch_size).iloc[:, 1:]
batch_image_feature = batch_image_feature.to_numpy()
batch_caption_encoding = batch_caption_encoding.to_numpy()
N_batch = np.shape(batch_caption_encoding)[0] # 一个批次的样本的数量
c0 = np.zeros((N_batch, n_a))
# 语言模型的输入 和 输出要错开一个时刻,
# eg.
# output: 今天 /是 /个/好日子/<end>
# input: <start>/今天/是/个 /好日子/
caption_out = batch_caption_encoding[:, 1:] # shape(N,39)
caption_in = batch_caption_encoding[:, :-1] # shape(N,39)
outputs = ArrayUtils.one_hot_array(caption_out, n_vocab)
yield ((caption_in, batch_image_feature, c0),
outputs) # 必须是 tuple 否则 ValueError: No gradients provided for any variable (Keras 2.4, Tensorflow 2.3.0)
count += 1
if count > steps_per_epoch: # 所有批次已经走了一遍
image_feature = | pd.read_csv(image_feature_dir, header=None, iterator=True) | pandas.read_csv |
import logging
import pandas as pd
from nltk import tokenize
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from lib.settings import DATA_DIR, LOG_LEVEL
from lib.characters import findAllMentionedCharacters
comments = | pd.read_csv(DATA_DIR / 'comments.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 6 09:44:04 2021
@author: <NAME>
"""
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from wu_rainfall import WuRainfall
import datetime
VER = "_01b" # Version, any string
f = 'sw_tph_g_data_LVR.csv' # Data file, csv format
df1 = pd.read_csv('sw_tph_g_data_LVR.csv', infer_datetime_format = True)
df1["date_time"] = | pd.to_datetime(df1.Date) | pandas.to_datetime |
import collections
import pandas as pd
import warnings
import PIL
from typing import Union, Optional, List, Dict, Tuple
from ..constants import (
NULL, CATEGORICAL, NUMERICAL, TEXT,
IMAGE_PATH, MULTICLASS, BINARY, REGRESSION,
)
def is_categorical_column(
data: pd.Series,
valid_data: pd.Series,
threshold: int = None,
ratio: Optional[float] = None,
oov_ratio_threshold: Optional[float] = None,
is_label: bool = False,
) -> bool:
"""
Identify whether a column is one categorical column.
If the number of unique elements in the column is smaller than
min(#Total Sample * ratio, threshold),
it will be treated as a categorical column.
Parameters
----------
data
One column of a multimodal pd.DataFrame for training.
valid_data
One column of a multimodal pd.DataFrame for validation.
threshold
The threshold for detecting categorical column.
ratio
The ratio detecting categorical column.
oov_ratio_threshold
The out-of-vocabulary ratio between training and validation.
This is used to determine if the column is a categorical column.
Usually, a categorical column can tolerate a small OOV ratio.
is_label
Whether the column is a label column.
Returns
-------
Whether the column is a categorical column.
"""
if data.dtype.name == 'category':
return True
else:
if threshold is None:
if is_label:
threshold = 100
oov_ratio_threshold = 0
ratio = 0.1
else:
threshold = 20
oov_ratio_threshold = 0
ratio = 0.1
threshold = min(int(len(data) * ratio), threshold)
data_value_counts = data.value_counts(dropna=False)
key_set = set(data_value_counts.keys())
if len(data_value_counts) < threshold:
valid_value_counts = valid_data.value_counts(dropna=False)
total_valid_num = len(valid_data)
oov_num = 0
for k, v in zip(valid_value_counts.keys(), valid_value_counts.values):
if k not in key_set:
oov_num += v
if is_label and oov_num != 0:
return False
if oov_num / total_valid_num > oov_ratio_threshold:
return False
return True
return False
def is_numerical_column(
data: pd.Series,
valid_data: Optional[pd.Series] = None,
) -> bool:
"""
Identify if a column is a numerical column.
Here it uses a very simple rule to verify if this is a numerical column.
Parameters
----------
data
One column of a multimodal pd.DataFrame for training.
valid_data
One column of a multimodal pd.DataFrame for validation.
Returns
-------
Whether the column is a numerical column.
"""
try:
numerical_data = | pd.to_numeric(data) | pandas.to_numeric |
from convokit.model import Corpus, Conversation, User, Utterance
from typing import List, Callable, Union
from convokit import Transformer, CorpusObject
import pandas as pd
class Ranker(Transformer):
def __init__(self, obj_type: str,
score_func: Callable[[CorpusObject], Union[int, float]],
selector: Callable[[CorpusObject], bool] = lambda obj: True,
score_feat_name: str = "score", rank_feat_name: str = None):
"""
:param obj_type: type of Corpus object to rank: 'conversation', 'user', or 'utterance'
:param score_func: function for computing the score of a given object
:param selector: function to select for Corpus objects to transform
:param score_feat_name: metadata feature name to use in annotation for score value, default: "score"
:param rank_feat_name: metadata feature name to use in annotation for the rank value, default: "[score_feat_name]_rank"
"""
self.obj_type = obj_type
self.score_func = score_func
self.score_feat_name = score_feat_name
self.rank_feat_name = score_feat_name + "_rank" if rank_feat_name is None else rank_feat_name
self.selector = selector
def transform(self, corpus: Corpus) -> Corpus:
"""
Annotate corpus objects with scores and rankings
:param corpus: target corpus
:return: annotated corpus
"""
obj_iters = {"conversation": corpus.iter_conversations,
"user": corpus.iter_users,
"utterance": corpus.iter_utterances}
obj_scores = [(obj.id, self.score_func(obj)) for obj in obj_iters[self.obj_type](self.selector)]
df = pd.DataFrame(obj_scores, columns=["id", self.score_feat_name]) \
.set_index('id').sort_values(self.score_feat_name, ascending=False)
df[self.rank_feat_name] = [idx+1 for idx, _ in enumerate(df.index)]
for obj in corpus.iter_objs(obj_type=self.obj_type):
if obj.id in df.index:
obj.add_meta(self.score_feat_name, df.loc[obj.id][self.score_feat_name])
obj.add_meta(self.rank_feat_name, df.loc[obj.id][self.rank_feat_name])
else:
obj.add_meta(self.score_feat_name, None)
obj.add_meta(self.rank_feat_name, None)
return corpus
def transform_objs(self, objs: List[CorpusObject]):
"""
Annotate list of Corpus objects with scores and rankings
:param objs: target list of Corpus objects
:return: list of annotated COrpus objects
"""
obj_scores = [(obj.id, self.score_func(obj)) for obj in objs]
df = pd.DataFrame(obj_scores, columns=["id", self.score_feat_name]) \
.set_index('id').sort_values(self.score_feat_name, ascending=False)
df[self.rank_feat_name] = [idx+1 for idx, _ in enumerate(df.index)]
for obj in objs:
obj.add_meta(self.score_feat_name, df.loc[obj.id][self.score_feat_name])
obj.add_meta(self.rank_feat_name, df.loc[obj.id][self.rank_feat_name])
return objs
def summarize(self, corpus: Corpus = None, objs: List[CorpusObject] = None):
"""
Generate a dataframe indexed by object id, containing score + rank, and sorted by rank (in ascending order)
of the objects in an annotated corpus, or a list of corpus objects
:param corpus: annotated target corpus
:param objs: list of annotated corpus objects
:return: a pandas DataFrame
"""
if ((corpus is None) and (objs is None)) or ((corpus is not None) and (objs is not None)):
raise ValueError("summarize() takes in either a Corpus or a list of users / utterances / conversations")
if objs is None:
obj_iters = {"conversation": corpus.iter_conversations,
"user": corpus.iter_users,
"utterance": corpus.iter_utterances}
obj_scores_ranks = [(obj.id, obj.meta[self.score_feat_name], obj.meta[self.rank_feat_name])
for obj in obj_iters[self.obj_type](self.selector)]
else:
obj_scores_ranks = [(obj.id, obj.meta[self.score_feat_name], obj.meta[self.rank_feat_name]) for obj in objs]
df = | pd.DataFrame(obj_scores_ranks, columns=["id", self.score_feat_name, self.rank_feat_name]) | pandas.DataFrame |
#!/usr/bin/env python3
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Ridge, Lasso
import pickle
import os
import yaml
import numpy as np
import scipy.signal as signal
import pandas as pd
from scipy.stats import pearsonr
from datetime import datetime
from urllib import request
from pipeline.helpers import get_vi_f, get_vi_nf
def download_data(fn, data_dir):
url = "https://zenodo.org/record/3897289/files/{}?download=1".format(fn)
fn = os.path.join(data_dir, fn)
if not os.path.isfile(fn):
print("Downloading data from", url)
request.urlretrieve(url, fn)
def preprocess_data(df, augment):
rm_idx = np.concatenate((np.arange(0, 300, dtype=np.int), np.arange(len(df) - 300, len(df))))
df = df.drop(index=rm_idx)
if augment:
df_vi_f = get_vi_f(df, c_offset=2, verbose=False)
df = pd.concat([df, df_vi_f], axis=1, sort=False)
df_vi_nf = get_vi_nf(df, c_offset=2 + 41, verbose=False)
df = | pd.concat([df, df_vi_nf], axis=1, sort=False) | pandas.concat |
import numpy as np
import pandas as pd
import pytest
from tabmat.categorical_matrix import CategoricalMatrix
@pytest.fixture
def cat_vec():
m = 10
seed = 0
rng = np.random.default_rng(seed)
return rng.choice([0, 1, 2, np.inf, -np.inf], size=m)
@pytest.mark.parametrize("vec_dtype", [np.float64, np.float32, np.int64, np.int32])
@pytest.mark.parametrize("drop_first", [True, False])
def test_recover_orig(cat_vec, vec_dtype, drop_first):
orig_recovered = CategoricalMatrix(cat_vec, drop_first=drop_first).recover_orig()
np.testing.assert_equal(orig_recovered, cat_vec)
@pytest.mark.parametrize("vec_dtype", [np.float64, np.float32, np.int64, np.int32])
@pytest.mark.parametrize("drop_first", [True, False])
def test_csr_matvec_categorical(cat_vec, vec_dtype, drop_first):
mat = | pd.get_dummies(cat_vec, drop_first=drop_first) | pandas.get_dummies |
import matplotlib.pyplot as plt
import seaborn as sns
import pdb
import requests
import re
import threading
import concurrent.futures
import numpy as np
import pandas as pd
from functools import reduce
from collections import Counter
from sklearn.preprocessing import normalize, StandardScaler, Normalizer, RobustScaler, MinMaxScaler, MaxAbsScaler
import networkx as nx
# import signal
import warnings
warnings.filterwarnings("ignore")
from url_utils import *
from wiki_scrapper import WikiScrapper
from WikiMultiQuery import wiki_multi_query
from graph_helpers import create_dispersion_df, dict_values_to_df, sort_dict_values, format_categories, compare_categories, rank_order, similarity_rank
################
# GraphCreator #
################
class GraphCreator:
"""
Retrieves data from the Wikipedia API and constructs a graph network of article relations.
Allows for the fast creation of a graph based recommender system.
Input:
------
entry (required, string)
A string containing the title of a Wikipedia article or a valid Wikipedia URL.
include_see_also (defaul: True, bool)
If True, marks any see also links as important and related to the main topic (default).
If False, does nothing to the see also links. Mark as False if validating recommendations
max_recursive_requests (default: 50, int)
The maximum number of times an API call will repeat to get all information. This can be an important parameter to set if efficiency is an issue.
Lower values will be more efficient, but may miss important information. Higher values are less efficient, but gather more data.
"""
def __init__(self, entry, include_see_also=True, max_recursive_requests=50):
self.graph = nx.DiGraph()
self.entry = get_title(entry) # from url_utils
self.max_requests = max_recursive_requests
ws = WikiScrapper(f"https://en.wikipedia.org/wiki/{self.entry}")
ws.parse_intro_links()
self.primary_nodes = {title : True for title in ws.get_primary_links(include_see_also=include_see_also)}
# see also articles to be used as targets for evaluation
self.see_also_articles = ws.see_also_link_titles
self.visited = {self.entry}
self.next_links = []
self.categories = {}
self.redirect_targets = []
self.redirect_sources = {}
self.query_articles([self.entry])
# setup timeout function
# def handle_alarm(signum, frame):
# raise RuntimeError
# signal.signal(signal.SIGALRM, handle_alarm)
######################################
# GRAPH SETUP & MAINTAINANCE METHODS #
######################################
def _add_edges(self, articles):
"""
Given a list of articles, adds nodes and connections (edges) to the network.
It can be called manually, but the expected use is within an internal graph update call.
"""
for article in articles:
self.categories[article['title']] = format_categories([category.split("Category:")[1] for category in article['categories'] if not bool(re.findall(r"(articles)|(uses)|(commons)|(category\:use)", category, re.I))])
self.graph.add_edges_from(
[(article['title'], link) for link in article['links']])
self.graph.add_edges_from(
[(linkhere, article['title']) for linkhere in article['linkshere']])
def update_edge_weights(self):
"""
Edges are weighted by the number of categories two connect nodes share. This method will look at each node and its neighbors and adjust in and outbound edge weights as needed.
"""
for edge in self.graph.out_edges:
weight = compare_categories(edge[0], edge[1], self.categories)
self.graph.add_edge(edge[0], edge[1], weight=weight)
for edge in self.graph.in_edges:
weight = compare_categories(edge[0], edge[1], self.categories)
self.graph.add_edge(edge[0], edge[1], weight=weight)
def get_edge_weights(self):
"""
A getter method to view the edge weights of each node (in and outbound).
"""
edge_weights = []
for edge in self.graph.edges:
edge_weights.append((edge[0], edge[1], self.graph.get_edge_data(edge[0], edge[1])['weight']))
return pd.DataFrame(edge_weights, columns=["source_node", "target_node", "edge_weight"]).sort_values("edge_weight", ascending=False).reset_index().drop("index", axis=1)
##############################
# FEATURE EXTRACTION METHODS #
##############################
def get_shared_categories_with_source(self):
cat_matches = {}
for node in self.graph.nodes:
cat_matches[node] = compare_categories(self.entry, node, self.categories, starting_count=0)
return dict_values_to_df(cat_matches, ['node', 'category_matches_with_source'])
def get_primary_nodes(self):
"""
Marks a node as a primary node if it appears in the article introduction or the See Also section. Primary nodes are considered to be more related to the main topics than others.
"""
primary_nodes = {}
for node in self.graph.nodes:
if node in primary_nodes:
# allows for heavier weight to duplicates in intro and see also
primary_nodes[node] += 1
if node in self.primary_nodes:
primary_nodes[node] = 1
else:
primary_nodes[node] = 0
return dict_values_to_df(primary_nodes, ["node", "primary_link"])
def get_degrees(self):
"""
Get all edges of a node and its neighbors (both in and outbound).
"""
return dict_values_to_df(dict(self.graph.degree()), ["node", "degree"])
def get_shared_neighbors_with_entry_score(self):
"""
A score comprised of the total number of shared neighbors with the target OVER the total number of neighbors
of each node
"""
entry_neighbors = list(set(nx.all_neighbors(self.graph, self.entry)))
shared_neighbors_score = {}
for node in self.graph.nodes:
target_neighbors = list(set(nx.all_neighbors(self.graph, node)))
shared_neighbors = len(entry_neighbors) + len(target_neighbors) - len(set(entry_neighbors + target_neighbors))
# score is neighbors shared over how many possible unique neighbors could have been shared.
shared_neighbors_score[node] = shared_neighbors / len(set(entry_neighbors + target_neighbors))
return dict_values_to_df(shared_neighbors_score, ["node", "shared_neighbors_with_entry_score"])
def get_edges(self):
"""
Gets the in and outbound edges of each node separately. Different from `get_degrees` as it return two columns with in and outbound edges separated.
"""
edges = []
for node in self.graph.nodes:
node_in_edges = len(self.graph.in_edges(node))
node_out_edges = len(self.graph.out_edges(node))
edges.append({"node": node, "in_edges": node_in_edges, "out_edges": node_out_edges})
return pd.DataFrame(edges)
def get_centrality(self):
"""
Gets the eigenvector centrality of each node.
"""
return dict_values_to_df(nx.eigenvector_centrality(self.graph, weight="weight"), ["node", "centrality"])
def get_dispersion(self, comparison_node=None, max_nodes=25_000): # depreciated
"""
Gets the dispersion of the central node compared to each other node. This is depreciated, and not included in features_df because it can take a long time to calculate.
"""
if not comparison_node:
comparison_node = self.entry
if max_nodes is None or len(self.graph.nodes) <= max_nodes:
return dict_values_to_df(nx.dispersion(self.graph, u=comparison_node), ['node', 'dispersion'])
else:
# if the network is too large, perform calculation on ego graph of entry node
ego = self.create_ego()
return dict_values_to_df(nx.dispersion(ego, u=comparison_node), ['node', 'dispersion'])
def get_pageranks(self):
"""
Calculates and returns the networkx pagerank for each node.
"""
page_ranks = sorted([(key, value) for key, value in nx.algorithms.link_analysis.pagerank(
self.graph, weight='weight').items()], key=lambda x: x[1], reverse=True)
return pd.DataFrame(page_ranks, columns=["node", "page_rank"])
def get_reciprocity(self):
"""
Gets the reciprocity score for each node. Note: Reciprocity in the context or Wikipedia articles can be a misleading metric. The intended use of this method is to be called in the `get_adjusted_reciprocity` method, which accounts for how many connects a node has.
"""
return dict_values_to_df(nx.algorithms.reciprocity(self.graph, self.graph.nodes), ['node', 'reciprocity'])
def get_adjusted_reciprocity(self):
"""
Gets the adjusted reciprocity score for each node. Adjusted reciprocity accounts for how many edges a node has (vs reciprocity, which just sees how many outbount edges are returned).
"""
r = self.get_reciprocity()
d = self.get_degrees()
r_d = r.merge(d, on="node", how="inner")
r_d['adjusted_reciprocity'] = r_d.reciprocity * r_d.degree
adjusted_reci = r_d.sort_values("adjusted_reciprocity", ascending=False)
adjusted_reci.adjusted_reciprocity = normalize([adjusted_reci.adjusted_reciprocity])[0]
return adjusted_reci.reset_index().drop(["degree", "reciprocity", "index"], axis=1)
def get_shortest_path_from_entry(self):
"""
Calculates the shortest path length from the entry node to every other node. If a path does not exist, return the longest path length from the entry + 1.
"""
paths = []
for node in self.graph.nodes:
try:
path_length = nx.shortest_path_length(self.graph, source=self.entry, target=node)
paths.append({"node": node, "shortest_path_length_from_entry": path_length})
except:
paths.append({"node": node, "shortest_path_length_from_entry": np.nan})
from_entry = pd.DataFrame(paths).sort_values("shortest_path_length_from_entry", ascending=False)
return from_entry.fillna(np.max(from_entry.shortest_path_length_from_entry) + 1)
def get_shortest_path_to_entry(self):
"""
Calculates the shortest path length from each node to the entry node. If a path does not exist, return the longest path length from the entry + 1.
"""
paths = []
for node in self.graph.nodes:
try:
path_length = nx.shortest_path_length(self.graph, source=node, target=self.entry)
paths.append({"node": node, "shortest_path_length_to_entry": path_length})
except:
paths.append({"node": node, "shortest_path_length_to_entry": np.nan})
to_entry = pd.DataFrame(paths)
return to_entry.fillna(np.max(to_entry.shortest_path_length_to_entry) + 1)
def get_jaccard_similarity(self):
"""
Calculates the Jaccard similarity score for each node compared to the entry node.
"""
entry_in_edges = set([x[0] for x in self.graph.in_edges(nbunch=self.entry)])
jaccard_scores = {}
for node in self.graph.nodes:
target_in_edges = set([x[0] for x in self.graph.in_edges(nbunch=node)])
in_edge_intersect = len(entry_in_edges.intersection(target_in_edges))
in_edge_union = len(entry_in_edges.union(target_in_edges))
jaccard_scores[node] = in_edge_intersect / in_edge_union
return dict_values_to_df(jaccard_scores, ["node", "jaccard_similarity"])
def get_features_df(self, rank=False):
"""
A wrapper method for several of the other getter methods. It calls each getter method and combines the results into a pandas DataFrame.
Input:
------
rank (default: False, bool)
When True, ranks each column individually, and creates an additional column of the average ranking for each row. Default is no no average ranking. In this context, rank is not necessarily associated with the entry node, but the network structure itself.
"""
dfs = []
if rank:
dfs.append(rank_order(self.get_degrees(), 'degree', ascending=False))
dfs.append(rank_order(self.get_shared_categories_with_source(), 'category_matches_with_source', ascending=False))
dfs.append(rank_order(self.get_shared_neighbors_with_entry_score(), 'shared_neighbors_with_entry_score', ascending=False))
dfs.append(self.get_edges())
dfs.append(rank_order(self.get_centrality(), 'centrality', ascending=True))
dfs.append(rank_order(self.get_pageranks(), "page_rank", ascending=False))
dfs.append(rank_order(self.get_adjusted_reciprocity(), "adjusted_reciprocity", ascending=False))
dfs.append(rank_order(self.get_shortest_path_from_entry(), "shortest_path_length_from_entry", ascending=True))
dfs.append(rank_order(self.get_shortest_path_to_entry(), "shortest_path_length_to_entry", ascending=True))
dfs.append(rank_order(self.get_jaccard_similarity(), "jaccard_similarity", ascending=False))
dfs.append(rank_order(self.get_primary_nodes(), "primary_node", ascending=False))
else:
dfs.append(self.get_degrees())
dfs.append(self.get_shared_categories_with_source())
dfs.append(self.get_edges())
dfs.append(self.get_shared_neighbors_with_entry_score())
dfs.append(self.get_centrality())
dfs.append(self.get_pageranks())
dfs.append(self.get_adjusted_reciprocity())
dfs.append(self.get_shortest_path_from_entry())
dfs.append(self.get_shortest_path_to_entry())
dfs.append(self.get_jaccard_similarity())
dfs.append(self.get_primary_nodes())
self.features_df = reduce(lambda left, right: pd.merge(left, right, on="node", how="outer"), dfs)
return self.features_df
def rank_similarity(self):
"""
Calculates a cumulative similarity rank for each node compared to the entry node. Features are placed into bonus and penalty categories to determine how similar and favorable each node is to the entry node.
"""
degree_mean = np.mean(self.features_df.degree.unique())
self.features_df['similarity_rank'] = self.features_df.apply(
similarity_rank,
degree_mean=degree_mean,
axis=1)
def scale_features_df(self, scaler=StandardScaler, copy=True):
"""
A method to scale the values in the features_df using a sklearn scaler.
Input:
------
scaler (default: StandardScaler, sklearn scaler)
An sklearn scaler that will be fit to the numerical data of the features_df
copy (default: True, bool)
Whether or not to make a copy of the features_df. When False, overwrites the existing features_df in the class instance with the scaled version.
"""
# we cannot scale our node column (because it is object type)
# and we don't want to scale our similarity_rank as it can cause strang reordering artifacts.
nodes = self.features_df.node
sim_rank = self.features_df.similarity_rank
node_and_sim_removed = self.features_df.drop(["node", "similarity_rank"], axis=1)
columns = node_and_sim_removed.columns
scaled_features = scaler().fit_transform(node_and_sim_removed)
scaled_features = | pd.DataFrame(scaled_features, columns=columns) | pandas.DataFrame |
import numpy as np
import pandas as pd
from collections import OrderedDict
from pandas.api.types import is_numeric_dtype, is_object_dtype, is_categorical_dtype
from typing import List, Optional, Tuple, Callable
def inspect_df(df: pd.DataFrame) -> pd.DataFrame:
""" Show column types and null values in DataFrame df
"""
resdict = OrderedDict()
# Inspect nulls
null_series = df.isnull().sum()
resdict["column"] = null_series.index
resdict["null_fraction"] = np.round(null_series.values / len(df), 3)
resdict["nulls"] = null_series.values
# Inspect types
types = df.dtypes.values
type_names = [t.name for t in types]
resdict["type"] = type_names
# Is numeric?
is_numeric = []
for col in df.columns:
is_numeric.append(is_numeric_dtype(df[col]))
resdict["is_numeric"] = is_numeric
# Dataframe
resdf = pd.DataFrame(resdict)
resdf.sort_values("null_fraction", inplace=True)
resdf.reset_index(inplace=True, drop=True)
return resdf
def summarize_df(df: pd.DataFrame) -> pd.DataFrame:
""" Show stats;
- rows:
- column types
- columns
- number of columns
- number of cols containing NaN's
"""
# Original DataFrame
(nrows, _) = df.shape
# Stats of DataFrame
stats = inspect_df(df)
data_types = np.unique(stats["type"].values)
resdict = OrderedDict()
# Column: data types
resdict["type"] = data_types
ncols_type = []
ncols_nan = []
n_nans = []
n_total = []
for dt in data_types:
# Column: number of columns with type
nc = len(stats[stats["type"] == dt])
ncols_type.append(nc)
# Column: number of columns with NaNs
nan_cols = stats[(stats["type"] == dt) & (stats["nulls"] > 0)]
ncols_nan.append(len(nan_cols))
# Column: number of NaNs
n_nans.append(nan_cols["nulls"].sum())
# Column: total number of values
n_total.append(nc * nrows)
# Prepare dict for the df
resdict["ncols"] = ncols_type
resdict["ncols_w_nans"] = ncols_nan
resdict["n_nans"] = n_nans
resdict["n_total"] = n_total
# Proportions of NaNs in each column group.
# Division by zero shouldn't occur
nan_frac = np.array(n_nans) / np.array(n_total)
resdict["nan_frac"] = np.round(nan_frac, 2)
resdf = pd.DataFrame(resdict)
resdf.sort_values("type", inplace=True)
resdf.reset_index(inplace=True, drop=True)
return resdf
def add_datefields(
df: pd.DataFrame,
column: str,
drop_original: bool = False,
inplace: bool = False,
attrs: Optional[List[str]] = None,
) -> pd.DataFrame:
""" Add attributes of the date to dataFrame df
"""
raw_date = df[column]
# Pandas datetime attributes
if attrs is None:
attributes = [
"dayofweek",
"dayofyear",
"is_month_end",
"is_month_start",
"is_quarter_end",
"is_quarter_start",
"quarter",
"week",
]
else:
attributes = attrs
# Return new?
if inplace:
resdf = df
else:
resdf = df.copy(deep=True)
# Could probably be optimized with pd.apply()
for attr in attributes:
new_column = f"{column}_{attr}"
# https://stackoverflow.com/questions/2612610/
new_vals = [getattr(d, attr) for d in raw_date]
resdf[new_column] = new_vals
if drop_original:
resdf.drop(columns=column, inplace=True)
return resdf
def add_nan_columns(
df: pd.DataFrame, inplace: bool = False, column_list: Optional[List[str]] = None
) -> pd.DataFrame:
""" For each column containing NaNs, add a boolean
column specifying if the column is NaN. Can be used
if the data is later imputated.
"""
if column_list is not None:
nan_columns = column_list
else:
# Get names of columns containing at least one NaN
temp = df.isnull().sum() != 0
nan_columns = temp.index[temp.values]
# Return new?
if inplace:
resdf = df
else:
resdf = df.copy(deep=True)
for column in nan_columns:
new_column = f"{column}_isnull"
nans = df[column].isnull()
resdf[new_column] = nans
return resdf
def numeric_nans(df: pd.DataFrame) -> pd.DataFrame:
""" Inspect numerical NaN values of a DataFrame df
"""
stats = inspect_df(df)
nan_stats = stats.loc[stats["is_numeric"] & (stats["nulls"] > 0)].copy(deep=True)
len_uniques = []
uniques = []
for row in nan_stats["column"].values:
uniq = np.unique(df[row][df[row].notnull()].values)
len_uniques.append(len(uniq))
uniques.append(uniq)
nan_stats["num_uniques"] = len_uniques
nan_stats["uniques"] = uniques
nan_stats.reset_index(inplace=True, drop=True)
return nan_stats
def categorize_df(
df: pd.DataFrame,
columns: Optional[List[str]] = None,
inplace: bool = False,
drop_original: bool = True,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
""" Categorize values in columns, and replace value with category.
If no columns are given, default to all 'object' columns
"""
if columns is not None:
cat_cols = columns
else:
cat_cols = df.columns[[dt.name == "object" for dt in df.dtypes.values]]
if inplace:
resdf = df
else:
resdf = df.copy(deep=True)
df_codes = []
df_cats = []
n_cats = []
for column in cat_cols:
new_column = f"{column}_cat"
cat_column = df[column].astype("category")
# By default, NaN is -1. We convert to zero by incrementing all.
col_codes = cat_column.cat.codes + 1
resdf[new_column] = col_codes
# DataFrame with the codes
df_codes.append(col_codes)
df_cats.append(cat_column.cat.categories)
n_cats.append(len(np.unique(col_codes)))
cat_dict = OrderedDict()
cat_dict["column"] = cat_cols
# MyPy picks up an error in the next line. Bug is where?
# Additionally, Flake8 will report the MyPy ignore as an error
cat_dict["n_categories"] = n_cats # type: ignore[assignment] # noqa: F821,F821
cat_dict["categories"] = df_cats
cat_dict["codes"] = df_codes
cat_df = pd.DataFrame(cat_dict)
if drop_original:
resdf.drop(columns=cat_cols, inplace=True)
return (resdf, cat_df)
def replace_numeric_nulls(
df: pd.DataFrame,
columns: Optional[List[str]] = None,
function: Callable = np.median,
inplace: bool = False,
) -> pd.DataFrame:
""" Replace nulls in all numerical column with the median (default) or
another callable function that works on NumPy arrays
"""
if columns is None:
columns = [
colname for colname, column in df.items() if | is_numeric_dtype(column) | pandas.api.types.is_numeric_dtype |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import re
import xlrd
import pickle
import os
import requests
# from bs4 import BeautifulSoup
# or import bs4 as bs
# import json
# In[ ]:
# In[2]:
# setting directories for file loads and saves
logs_dir = "./data/logs/"
raw_dir = "./data/raw/"
load_dir = save_dir = "./data/interim/"
final_dir = "./data/processed/"
# In[6]:
xl = pd.read_pickle(load_dir + "school_profiles_2008_2017_df.pickle")
xl.head()
# In[4]:
xl.info()
# # MySchool website, Sherwood State School
# In[5]:
url = "https://www.myschool.edu.au/school/46461/naplan/similar/2017"
# Packages the request, send the request and catch the response: r
r = requests.get(url)
# Extract the response: text
text = r.text
# Print the html
text[:500]
# In[6]:
# using BeautifulSoup, turn html into cleaner text
soup = BeautifulSoup(text, "lxml")
# In[7]:
print(soup.prettify()[:500])
# In[8]:
# Find all 'script' tags (which define scripts): scripts
scripts = soup.find_all('script')
# Print the school data <script> to the shell
#print(scripts[6])
# Save the school data <script> to a text file
# In[9]:
# adding data for POST to retrieve different SchoolYearId, DomainId and ViewModeId
payload = {"SchoolYearId" : "5", "DomainId" : "1", "ViewModeId" : "0"}
# retrieving the above from the same page as r
r510 = requests.post(url, data = payload)
# Extract the response: text
text510 = r510.text
# using BeautifulSoup, turn html into cleaner text
soup510 = BeautifulSoup(text510, "lxml")
# In[10]:
# Find all 'script' tags (which define scripts): scripts
scripts510 = soup510.find_all('script')
# Print the head of scipts to the shell
#print(scripts510)
# In[11]:
type(scripts[6])
# In[23]:
type(scripts[6].contents[0].string)
# In[27]:
# This gives a string which is writable to file
scripts[6].string[:50]
# In[26]:
# Writing to files
f = open("sherwood310.txt", mode = "w", encoding = "utf-8")
f.write(scripts[6].string)
f.close()
f = open("sherwood510.txt", mode = "w", encoding = "utf-8")
f.write(scripts510[6].string)
f.close()
# In[16]:
scripts[6].attrs
# In[29]:
s310 = scripts[6].string
type(s310)
# In[30]:
s310slash = re.sub(r'\\', '', s310)
s310colq = re.sub(r':"', ':', s310slash)
s310qcomma = re.sub(r'",', ',', s310colq)
# In[33]:
s310qcomma[:1000]
# In[35]:
s310up2data = re.sub(r'.*?data":', '', s310qcomma)
s310up2data[:500]
# In[74]:
s310apostro = re.sub(',"plotOptions(.*)', '', s310up2data)
s310datalist = re.sub("u0027", "'", s310apostro)
print(s310up2data[-500:])
s310datalist[-500:]
# In[ ]:
f = open('s310datalist.txt', mode = 'w', encoding = 'utf-8')
f.write(s310datalist)
# In[79]:
grade = 3
domain = 1
view = 0
year = 2017
# In[80]:
schoolIdList = re.findall('schoolId":(\d\d\d\d\d)', s310datalist)
meanList = re.findall('mean":(\d\d\d)', s310datalist)
lowerList = re.findall('lowerMargin":(\d\d\d)', s310datalist)
upperList = re.findall('upperMargin":(\d\d\d)', s310datalist)
sherwood_310_df = pd.DataFrame({'schoolId' : schoolIdList, 'grade' : grade,
'year' : year, 'mean' : meanList, 'lower' : lowerList,
'upper' : upperList})
sherwood_310_df.head()
# In[ ]:
sherwood_310_df.to_csv('sherwood_310_df.csv')
# # Generic school, from the all school IDs list
# In[81]:
# adding data for POST to retrieve similar school list (ViewModeId = 1)
payload = {"SchoolYearId" : "5", "DomainId" : "1", "ViewModeId" : "1"}
# retrieving the above from the same page as r
r511 = requests.post(url, data = payload)
# Extract the response: text
text511 = r511.text
# using BeautifulSoup, turn html into cleaner text
soup511 = BeautifulSoup(text510, "lxml")
# In[84]:
f = open('soup511_pretty.html', mode = 'w', encoding = 'utf-8')
f.write(soup511.prettify())
f.close()
# In[103]:
scripts511 = soup511.find_all('script')
s311schoolList = scripts511[6].string
# In[113]:
scripts511[6].string[-500:]
# In[117]:
simimlar_schools_2017 = re.findall(r'schoolId\\":(\d\d\d\d\d)', scripts511[6].string)
#simimlar_schools_2017
# ## School profiles 2008 - 2017
# In[50]:
file = load_dir + "school-profile-2008-2017.xlsx"
xl = | pd.read_excel(file, sheet_name="School Profile") | pandas.read_excel |
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import decimal
import json
import multiprocessing as mp
from collections import OrderedDict
from datetime import date, datetime, time, timedelta
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.util.testing as tm
import pytest
import pyarrow as pa
import pyarrow.types as patypes
from pyarrow.compat import PY2
from .pandas_examples import dataframe_with_arrays, dataframe_with_lists
def _alltypes_example(size=100):
return pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
# TODO(wesm): Pandas only support ns resolution, Arrow supports s, ms,
# us, ns
'datetime': np.arange("2016-01-01T00:00:00.001", size,
dtype='datetime64[ms]'),
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
def _check_pandas_roundtrip(df, expected=None, use_threads=True,
expected_schema=None,
check_dtype=True, schema=None,
preserve_index=False,
as_batch=False):
klass = pa.RecordBatch if as_batch else pa.Table
table = klass.from_pandas(df, schema=schema,
preserve_index=preserve_index,
nthreads=2 if use_threads else 1)
result = table.to_pandas(use_threads=use_threads)
if expected_schema:
# all occurences of _check_pandas_roundtrip passes expected_schema
# without the pandas generated key-value metadata, so we need to
# add it before checking schema equality
expected_schema = expected_schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
if expected is None:
expected = df
tm.assert_frame_equal(result, expected, check_dtype=check_dtype,
check_index_type=('equiv' if preserve_index
else False))
def _check_series_roundtrip(s, type_=None, expected_pa_type=None):
arr = pa.array(s, from_pandas=True, type=type_)
if type_ is not None and expected_pa_type is None:
expected_pa_type = type_
if expected_pa_type is not None:
assert arr.type == expected_pa_type
result = pd.Series(arr.to_pandas(), name=s.name)
if patypes.is_timestamp(arr.type) and arr.type.tz is not None:
result = (result.dt.tz_localize('utc')
.dt.tz_convert(arr.type.tz))
tm.assert_series_equal(s, result)
def _check_array_roundtrip(values, expected=None, mask=None,
type=None):
arr = pa.array(values, from_pandas=True, mask=mask, type=type)
result = arr.to_pandas()
values_nulls = pd.isnull(values)
if mask is None:
assert arr.null_count == values_nulls.sum()
else:
assert arr.null_count == (mask | values_nulls).sum()
if mask is None:
tm.assert_series_equal(pd.Series(result), pd.Series(values),
check_names=False)
else:
expected = pd.Series(np.ma.masked_array(values, mask=mask))
tm.assert_series_equal(pd.Series(result), expected,
check_names=False)
def _check_array_from_pandas_roundtrip(np_array, type=None):
arr = pa.array(np_array, from_pandas=True, type=type)
result = arr.to_pandas()
npt.assert_array_equal(result, np_array)
class TestConvertMetadata(object):
"""
Conversion tests for Pandas metadata & indices.
"""
def test_non_string_columns(self):
df = pd.DataFrame({0: [1, 2, 3]})
table = pa.Table.from_pandas(df)
assert table.column(0).name == '0'
def test_from_pandas_with_columns(self):
df = pd.DataFrame({0: [1, 2, 3], 1: [1, 3, 3], 2: [2, 4, 5]})
table = pa.Table.from_pandas(df, columns=[0, 1])
expected = pa.Table.from_pandas(df[[0, 1]])
assert expected.equals(table)
record_batch_table = pa.RecordBatch.from_pandas(df, columns=[0, 1])
record_batch_expected = pa.RecordBatch.from_pandas(df[[0, 1]])
assert record_batch_expected.equals(record_batch_table)
def test_column_index_names_are_preserved(self):
df = pd.DataFrame({'data': [1, 2, 3]})
df.columns.names = ['a']
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns(self):
columns = pd.MultiIndex.from_arrays([
['one', 'two'], ['X', 'Y']
])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_with_dtypes(self):
columns = pd.MultiIndex.from_arrays(
[
['one', 'two'],
pd.DatetimeIndex(['2017-08-01', '2017-08-02']),
],
names=['level_1', 'level_2'],
)
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_unicode(self):
columns = pd.MultiIndex.from_arrays([[u'あ', u'い'], ['X', 'Y']])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_integer_index_column(self):
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')])
_check_pandas_roundtrip(df, preserve_index=True)
def test_index_metadata_field_name(self):
# test None case, and strangely named non-index columns
df = pd.DataFrame(
[(1, 'a', 3.1), (2, 'b', 2.2), (3, 'c', 1.3)],
index=pd.MultiIndex.from_arrays(
[['c', 'b', 'a'], [3, 2, 1]],
names=[None, 'foo']
),
columns=['a', None, '__index_level_0__'],
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
col1, col2, col3, idx0, foo = js['columns']
assert col1['name'] == 'a'
assert col1['name'] == col1['field_name']
assert col2['name'] is None
assert col2['field_name'] == 'None'
assert col3['name'] == '__index_level_0__'
assert col3['name'] == col3['field_name']
idx0_name, foo_name = js['index_columns']
assert idx0_name == '__index_level_0__'
assert idx0['field_name'] == idx0_name
assert idx0['name'] is None
assert foo_name == 'foo'
assert foo['field_name'] == foo_name
assert foo['name'] == foo_name
def test_categorical_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), dtype='category')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'categorical'
assert column_indexes['numpy_type'] == 'int8'
md = column_indexes['metadata']
assert md['num_categories'] == 3
assert md['ordered'] is False
def test_string_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), name='stringz')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] == 'stringz'
assert column_indexes['name'] == column_indexes['field_name']
assert column_indexes['pandas_type'] == ('bytes' if PY2 else 'unicode')
assert column_indexes['numpy_type'] == 'object'
md = column_indexes['metadata']
if not PY2:
assert len(md) == 1
assert md['encoding'] == 'UTF-8'
else:
assert md is None or 'encoding' not in md
def test_datetimetz_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'datetimetz'
assert column_indexes['numpy_type'] == 'datetime64[ns]'
md = column_indexes['metadata']
assert md['timezone'] == 'America/New_York'
def test_datetimetz_row_index(self):
df = pd.DataFrame({
'a': pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
})
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_categorical_row_index(self):
df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]})
df['a'] = df.a.astype('category')
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_duplicate_column_names_does_not_crash(self):
df = pd.DataFrame([(1, 'a'), (2, 'b')], columns=list('aa'))
with pytest.raises(ValueError):
pa.Table.from_pandas(df)
def test_dictionary_indices_boundscheck(self):
# ARROW-1658. No validation of indices leads to segfaults in pandas
indices = [[0, 1], [0, -1]]
for inds in indices:
arr = pa.DictionaryArray.from_arrays(inds, ['a'], safe=False)
batch = pa.RecordBatch.from_arrays([arr], ['foo'])
table = pa.Table.from_batches([batch, batch, batch])
with pytest.raises(pa.ArrowInvalid):
arr.to_pandas()
with pytest.raises(pa.ArrowInvalid):
table.to_pandas()
def test_unicode_with_unicode_column_and_index(self):
df = pd.DataFrame({u'あ': [u'い']}, index=[u'う'])
_check_pandas_roundtrip(df, preserve_index=True)
def test_mixed_unicode_column_names(self):
df = pd.DataFrame({u'あ': [u'い'], b'a': 1}, index=[u'う'])
# TODO(phillipc): Should this raise?
with pytest.raises(AssertionError):
_check_pandas_roundtrip(df, preserve_index=True)
def test_binary_column_name(self):
column_data = [u'い']
key = u'あ'.encode('utf8')
data = {key: column_data}
df = pd.DataFrame(data)
# we can't use _check_pandas_roundtrip here because our metdata
# is always decoded as utf8: even if binary goes in, utf8 comes out
t = pa.Table.from_pandas(df, preserve_index=True)
df2 = t.to_pandas()
assert df.values[0] == df2.values[0]
assert df.index.values[0] == df2.index.values[0]
assert df.columns[0] == key
def test_multiindex_duplicate_values(self):
num_rows = 3
numbers = list(range(num_rows))
index = pd.MultiIndex.from_arrays(
[['foo', 'foo', 'bar'], numbers],
names=['foobar', 'some_numbers'],
)
df = pd.DataFrame({'numbers': numbers}, index=index)
table = pa.Table.from_pandas(df)
result_df = table.to_pandas()
tm.assert_frame_equal(result_df, df)
def test_metadata_with_mixed_types(self):
df = pd.DataFrame({'data': [b'some_bytes', u'some_unicode']})
table = pa.Table.from_pandas(df)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'bytes'
assert data_column['numpy_type'] == 'object'
def test_list_metadata(self):
df = pd.DataFrame({'data': [[1], [2, 3, 4], [5] * 7]})
schema = pa.schema([pa.field('data', type=pa.list_(pa.int64()))])
table = pa.Table.from_pandas(df, schema=schema)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'list[int64]'
assert data_column['numpy_type'] == 'object'
def test_decimal_metadata(self):
expected = pd.DataFrame({
'decimals': [
decimal.Decimal('394092382910493.12341234678'),
-decimal.Decimal('314292388910493.12343437128'),
]
})
table = pa.Table.from_pandas(expected)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'decimal'
assert data_column['numpy_type'] == 'object'
assert data_column['metadata'] == {'precision': 26, 'scale': 11}
def test_table_column_subset_metadata(self):
# ARROW-1883
df = pd.DataFrame({
'a': [1, 2, 3],
'b': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')})
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']])
# non-default index
for index in [
pd.Index(['a', 'b', 'c'], name='index'),
pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')]:
df = pd.DataFrame({'a': [1, 2, 3],
'b': [.1, .2, .3]}, index=index)
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']].reset_index(drop=True))
def test_empty_list_metadata(self):
# Create table with array of empty lists, forced to have type
# list(string) in pyarrow
c1 = [["test"], ["a", "b"], None]
c2 = [[], [], []]
arrays = OrderedDict([
('c1', pa.array(c1, type=pa.list_(pa.string()))),
('c2', pa.array(c2, type=pa.list_(pa.string()))),
])
rb = pa.RecordBatch.from_arrays(
list(arrays.values()),
list(arrays.keys())
)
tbl = pa.Table.from_batches([rb])
# First roundtrip changes schema, because pandas cannot preserve the
# type of empty lists
df = tbl.to_pandas()
tbl2 = pa.Table.from_pandas(df, preserve_index=True)
md2 = json.loads(tbl2.schema.metadata[b'pandas'].decode('utf8'))
# Second roundtrip
df2 = tbl2.to_pandas()
expected = pd.DataFrame(OrderedDict([('c1', c1), ('c2', c2)]))
tm.assert_frame_equal(df2, expected)
assert md2['columns'] == [
{
'name': 'c1',
'field_name': 'c1',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[unicode]',
},
{
'name': 'c2',
'field_name': 'c2',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[empty]',
},
{
'name': None,
'field_name': '__index_level_0__',
'metadata': None,
'numpy_type': 'int64',
'pandas_type': 'int64',
}
]
class TestConvertPrimitiveTypes(object):
"""
Conversion tests for primitive (e.g. numeric) types.
"""
def test_float_no_nulls(self):
data = {}
fields = []
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
num_values = 100
for numpy_dtype, arrow_dtype in dtypes:
values = np.random.randn(num_values)
data[numpy_dtype] = values.astype(numpy_dtype)
fields.append(pa.field(numpy_dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_float_nulls(self):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
names = ['f2', 'f4', 'f8']
expected_cols = []
arrays = []
fields = []
for name, arrow_dtype in dtypes:
values = np.random.randn(num_values).astype(name)
arr = pa.array(values, from_pandas=True, mask=null_mask)
arrays.append(arr)
fields.append(pa.field(name, arrow_dtype))
values[null_mask] = np.nan
expected_cols.append(values)
ex_frame = pd.DataFrame(dict(zip(names, expected_cols)),
columns=names)
table = pa.Table.from_arrays(arrays, names)
assert table.schema.equals(pa.schema(fields))
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_nulls_to_ints(self):
# ARROW-2135
df = pd.DataFrame({"a": [1.0, 2.0, pd.np.NaN]})
schema = pa.schema([pa.field("a", pa.int16(), nullable=True)])
table = pa.Table.from_pandas(df, schema=schema, safe=False)
assert table[0].to_pylist() == [1, 2, None]
tm.assert_frame_equal(df, table.to_pandas())
def test_integer_no_nulls(self):
data = OrderedDict()
fields = []
numpy_dtypes = [
('i1', pa.int8()), ('i2', pa.int16()),
('i4', pa.int32()), ('i8', pa.int64()),
('u1', pa.uint8()), ('u2', pa.uint16()),
('u4', pa.uint32()), ('u8', pa.uint64()),
('longlong', pa.int64()), ('ulonglong', pa.uint64())
]
num_values = 100
for dtype, arrow_dtype in numpy_dtypes:
info = np.iinfo(dtype)
values = np.random.randint(max(info.min, np.iinfo(np.int_).min),
min(info.max, np.iinfo(np.int_).max),
size=num_values)
data[dtype] = values.astype(dtype)
fields.append(pa.field(dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_integer_types(self):
# Test all Numpy integer aliases
data = OrderedDict()
numpy_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',
'byte', 'ubyte', 'short', 'ushort', 'intc', 'uintc',
'int_', 'uint', 'longlong', 'ulonglong']
for dtype in numpy_dtypes:
data[dtype] = np.arange(12, dtype=dtype)
df = pd.DataFrame(data)
_check_pandas_roundtrip(df)
# Do the same with pa.array()
# (for some reason, it doesn't use the same code paths at all)
for np_arr in data.values():
arr = pa.array(np_arr)
assert arr.to_pylist() == np_arr.tolist()
def test_integer_with_nulls(self):
# pandas requires upcast to float dtype
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
arrays = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
arr = pa.array(values, mask=null_mask)
arrays.append(arr)
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),
columns=int_dtypes)
table = pa.Table.from_arrays(arrays, int_dtypes)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_array_from_pandas_type_cast(self):
arr = np.arange(10, dtype='int64')
target_type = pa.int8()
result = pa.array(arr, type=target_type)
expected = pa.array(arr.astype('int8'))
assert result.equals(expected)
def test_boolean_no_nulls(self):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_boolean_nulls(self):
# pandas requires upcast to object dtype
num_values = 100
np.random.seed(0)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
arr = pa.array(values, mask=mask)
expected = values.astype(object)
expected[mask] = None
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
ex_frame = pd.DataFrame({'bools': expected})
table = pa.Table.from_arrays([arr], ['bools'])
assert table.schema.equals(schema)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_object_nulls(self):
arr = np.array([None, 1.5, np.float64(3.5)] * 5, dtype=object)
df = pd.DataFrame({'floats': arr})
expected = pd.DataFrame({'floats': pd.to_numeric(arr)})
field = pa.field('floats', pa.float64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_int_object_nulls(self):
arr = np.array([None, 1, np.int64(3)] * 5, dtype=object)
df = pd.DataFrame({'ints': arr})
expected = pd.DataFrame({'ints': pd.to_numeric(arr)})
field = pa.field('ints', pa.int64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_boolean_object_nulls(self):
arr = np.array([False, None, True] * 100, dtype=object)
df = pd.DataFrame({'bools': arr})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_nulls_cast_numeric(self):
arr = np.array([None], dtype=object)
def _check_type(t):
a2 = pa.array(arr, type=t)
assert a2.type == t
assert a2[0].as_py() is None
_check_type(pa.int32())
_check_type(pa.float64())
def test_half_floats_from_numpy(self):
arr = np.array([1.5, np.nan], dtype=np.float16)
a = pa.array(arr, type=pa.float16())
x, y = a.to_pylist()
assert isinstance(x, np.float16)
assert x == 1.5
assert isinstance(y, np.float16)
assert np.isnan(y)
a = pa.array(arr, type=pa.float16(), from_pandas=True)
x, y = a.to_pylist()
assert isinstance(x, np.float16)
assert x == 1.5
assert y is None
@pytest.mark.parametrize('dtype',
['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])
def test_array_integer_object_nulls_option(dtype):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 100, size=num_values, dtype=dtype)
array = pa.array(values, mask=null_mask)
if null_mask.any():
expected = values.astype('O')
expected[null_mask] = None
else:
expected = values
result = array.to_pandas(integer_object_nulls=True)
np.testing.assert_equal(result, expected)
@pytest.mark.parametrize('dtype',
['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])
def test_table_integer_object_nulls_option(dtype):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 100, size=num_values, dtype=dtype)
array = pa.array(values, mask=null_mask)
if null_mask.any():
expected = values.astype('O')
expected[null_mask] = None
else:
expected = values
expected = pd.DataFrame({dtype: expected})
table = pa.Table.from_arrays([array], [dtype])
result = table.to_pandas(integer_object_nulls=True)
tm.assert_frame_equal(result, expected)
class TestConvertDateTimeLikeTypes(object):
"""
Conversion tests for datetime- and timestamp-like types (date64, etc.).
"""
def test_timestamps_notimezone_no_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_notimezone_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_with_timezone(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123',
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
})
df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')
.to_frame())
_check_pandas_roundtrip(df)
_check_series_roundtrip(df['datetime64'])
# drop-in a null and ns instead of ms
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')
.to_frame())
_check_pandas_roundtrip(df)
def test_python_datetime(self):
# ARROW-2106
date_array = [datetime.today() + timedelta(days=x) for x in range(10)]
df = pd.DataFrame({
'datetime': pd.Series(date_array, dtype=object)
})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({
'datetime': date_array
})
tm.assert_frame_equal(expected_df, result)
def test_python_datetime_subclass(self):
class MyDatetime(datetime):
# see https://github.com/pandas-dev/pandas/issues/21142
nanosecond = 0.0
date_array = [MyDatetime(2000, 1, 1, 1, 1, 1)]
df = pd.DataFrame({"datetime": pd.Series(date_array, dtype=object)})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({"datetime": date_array})
# https://github.com/pandas-dev/pandas/issues/21142
expected_df["datetime"] = pd.to_datetime(expected_df["datetime"])
tm.assert_frame_equal(expected_df, result)
def test_python_date_subclass(self):
class MyDate(date):
pass
date_array = [MyDate(2000, 1, 1)]
df = pd.DataFrame({"date": pd.Series(date_array, dtype=object)})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.Date32Array)
result = table.to_pandas()
expected_df = pd.DataFrame(
{"date": np.array(["2000-01-01"], dtype="datetime64[ns]")}
)
tm.assert_frame_equal(expected_df, result)
def test_datetime64_to_date32(self):
# ARROW-1718
arr = pa.array([date(2017, 10, 23), None])
c = pa.Column.from_array("d", arr)
s = c.to_pandas()
arr2 = pa.Array.from_pandas(s, type=pa.date32())
assert arr2.equals(arr.cast('date32'))
@pytest.mark.parametrize('mask', [
None,
np.array([True, False, False]),
])
def test_pandas_datetime_to_date64(self, mask):
s = pd.to_datetime([
'2018-05-10T00:00:00',
'2018-05-11T00:00:00',
'2018-05-12T00:00:00',
])
arr = pa.Array.from_pandas(s, type=pa.date64(), mask=mask)
data = np.array([
date(2018, 5, 10),
date(2018, 5, 11),
date(2018, 5, 12)
])
expected = pa.array(data, mask=mask, type=pa.date64())
assert arr.equals(expected)
@pytest.mark.parametrize('mask', [
None,
np.array([True, False, False])
])
def test_pandas_datetime_to_date64_failures(self, mask):
s = pd.to_datetime([
'2018-05-10T10:24:01',
'2018-05-11T10:24:01',
'2018-05-12T10:24:01',
])
expected_msg = 'Timestamp value had non-zero intraday milliseconds'
with pytest.raises(pa.ArrowInvalid, match=expected_msg):
pa.Array.from_pandas(s, type=pa.date64(), mask=mask)
def test_array_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected = np.array(['2000-01-01',
None,
'1970-01-01',
'2040-02-26'], dtype='datetime64')
arr = pa.array(data)
assert arr.equals(pa.array(expected))
result = arr.to_pandas()
assert result.dtype == expected.dtype
npt.assert_array_equal(arr.to_pandas(), expected)
result = arr.to_pandas(date_as_object=True)
expected = expected.astype(object)
assert result.dtype == expected.dtype
npt.assert_array_equal(result, expected)
def test_chunked_array_convert_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected = np.array(['2000-01-01',
None,
'1970-01-01',
'2040-02-26'], dtype='datetime64')
carr = pa.chunked_array([data])
result = carr.to_pandas()
assert result.dtype == expected.dtype
npt.assert_array_equal(carr.to_pandas(), expected)
result = carr.to_pandas(date_as_object=True)
expected = expected.astype(object)
assert result.dtype == expected.dtype
npt.assert_array_equal(result, expected)
def test_column_convert_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected = np.array(['2000-01-01',
None,
'1970-01-01',
'2040-02-26'], dtype='datetime64')
arr = pa.array(data)
column = pa.column('date', arr)
result = column.to_pandas()
npt.assert_array_equal(column.to_pandas(), expected)
result = column.to_pandas(date_as_object=True)
expected = expected.astype(object)
assert result.dtype == expected.dtype
npt.assert_array_equal(result, expected)
def test_table_convert_date_as_object(self):
df = pd.DataFrame({
'date': [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]})
table = pa.Table.from_pandas(df, preserve_index=False)
df_datetime = table.to_pandas()
df_object = table.to_pandas(date_as_object=True)
tm.assert_frame_equal(df.astype('datetime64[ns]'), df_datetime,
check_dtype=True)
tm.assert_frame_equal(df, df_object, check_dtype=True)
def test_date_infer(self):
df = pd.DataFrame({
'date': [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]})
table = pa.Table.from_pandas(df, preserve_index=False)
field = pa.field('date', pa.date32())
# schema's metadata is generated by from_pandas conversion
expected_schema = pa.schema([field], metadata=table.schema.metadata)
assert table.schema.equals(expected_schema)
result = table.to_pandas()
expected = df.copy()
expected['date'] = pd.to_datetime(df['date'])
tm.assert_frame_equal(result, expected)
def test_date_mask(self):
arr = np.array([date(2017, 4, 3), date(2017, 4, 4)],
dtype='datetime64[D]')
mask = [True, False]
result = pa.array(arr, mask=np.array(mask))
expected = np.array([None, date(2017, 4, 4)], dtype='datetime64[D]')
expected = pa.array(expected, from_pandas=True)
assert expected.equals(result)
def test_date_objects_typed(self):
arr = np.array([
date(2017, 4, 3),
None,
date(2017, 4, 4),
date(2017, 4, 5)], dtype=object)
arr_i4 = np.array([17259, -1, 17260, 17261], dtype='int32')
arr_i8 = arr_i4.astype('int64') * 86400000
mask = np.array([False, True, False, False])
t32 = pa.date32()
t64 = pa.date64()
a32 = pa.array(arr, type=t32)
a64 = pa.array(arr, type=t64)
a32_expected = pa.array(arr_i4, mask=mask, type=t32)
a64_expected = pa.array(arr_i8, mask=mask, type=t64)
assert a32.equals(a32_expected)
assert a64.equals(a64_expected)
# Test converting back to pandas
colnames = ['date32', 'date64']
table = pa.Table.from_arrays([a32, a64], colnames)
table_pandas = table.to_pandas()
ex_values = (np.array(['2017-04-03', '2017-04-04', '2017-04-04',
'2017-04-05'],
dtype='datetime64[D]')
.astype('datetime64[ns]'))
ex_values[1] = pd.NaT.value
expected_pandas = pd.DataFrame({'date32': ex_values,
'date64': ex_values},
columns=colnames)
tm.assert_frame_equal(table_pandas, expected_pandas)
def test_dates_from_integers(self):
t1 = pa.date32()
t2 = pa.date64()
arr = np.array([17259, 17260, 17261], dtype='int32')
arr2 = arr.astype('int64') * 86400000
a1 = pa.array(arr, type=t1)
a2 = pa.array(arr2, type=t2)
expected = date(2017, 4, 3)
assert a1[0].as_py() == expected
assert a2[0].as_py() == expected
@pytest.mark.xfail(reason="not supported ATM",
raises=NotImplementedError)
def test_timedelta(self):
# TODO(jreback): Pandas only support ns resolution
# Arrow supports ??? for resolution
df = pd.DataFrame({
'timedelta': np.arange(start=0, stop=3 * 86400000,
step=86400000,
dtype='timedelta64[ms]')
})
pa.Table.from_pandas(df)
def test_pytime_from_pandas(self):
pytimes = [time(1, 2, 3, 1356),
time(4, 5, 6, 1356)]
# microseconds
t1 = pa.time64('us')
aobjs = np.array(pytimes + [None], dtype=object)
parr = pa.array(aobjs)
assert parr.type == t1
assert parr[0].as_py() == pytimes[0]
assert parr[1].as_py() == pytimes[1]
assert parr[2] is pa.NA
# DataFrame
df = pd.DataFrame({'times': aobjs})
batch = pa.RecordBatch.from_pandas(df)
assert batch[0].equals(parr)
# Test ndarray of int64 values
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
a1 = pa.array(arr, type=pa.time64('us'))
assert a1[0].as_py() == pytimes[0]
a2 = pa.array(arr * 1000, type=pa.time64('ns'))
assert a2[0].as_py() == pytimes[0]
a3 = pa.array((arr / 1000).astype('i4'),
type=pa.time32('ms'))
assert a3[0].as_py() == pytimes[0].replace(microsecond=1000)
a4 = pa.array((arr / 1000000).astype('i4'),
type=pa.time32('s'))
assert a4[0].as_py() == pytimes[0].replace(microsecond=0)
def test_arrow_time_to_pandas(self):
pytimes = [time(1, 2, 3, 1356),
time(4, 5, 6, 1356),
time(0, 0, 0)]
expected = np.array(pytimes[:2] + [None])
expected_ms = np.array([x.replace(microsecond=1000)
for x in pytimes[:2]] +
[None])
expected_s = np.array([x.replace(microsecond=0)
for x in pytimes[:2]] +
[None])
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
null_mask = np.array([False, False, True], dtype=bool)
a1 = pa.array(arr, mask=null_mask, type=pa.time64('us'))
a2 = pa.array(arr * 1000, mask=null_mask,
type=pa.time64('ns'))
a3 = pa.array((arr / 1000).astype('i4'), mask=null_mask,
type=pa.time32('ms'))
a4 = pa.array((arr / 1000000).astype('i4'), mask=null_mask,
type=pa.time32('s'))
names = ['time64[us]', 'time64[ns]', 'time32[ms]', 'time32[s]']
batch = pa.RecordBatch.from_arrays([a1, a2, a3, a4], names)
arr = a1.to_pandas()
assert (arr == expected).all()
arr = a2.to_pandas()
assert (arr == expected).all()
arr = a3.to_pandas()
assert (arr == expected_ms).all()
arr = a4.to_pandas()
assert (arr == expected_s).all()
df = batch.to_pandas()
expected_df = pd.DataFrame({'time64[us]': expected,
'time64[ns]': expected,
'time32[ms]': expected_ms,
'time32[s]': expected_s},
columns=names)
tm.assert_frame_equal(df, expected_df)
def test_numpy_datetime64_columns(self):
datetime64_ns = np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
_check_array_from_pandas_roundtrip(datetime64_ns)
datetime64_us = np.array([
'2007-07-13T01:23:34.123456',
None,
'2006-01-13T12:34:56.432539',
'2010-08-13T05:46:57.437699'],
dtype='datetime64[us]')
_check_array_from_pandas_roundtrip(datetime64_us)
datetime64_ms = np.array([
'2007-07-13T01:23:34.123',
None,
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
_check_array_from_pandas_roundtrip(datetime64_ms)
datetime64_s = np.array([
'2007-07-13T01:23:34',
None,
'2006-01-13T12:34:56',
'2010-08-13T05:46:57'],
dtype='datetime64[s]')
_check_array_from_pandas_roundtrip(datetime64_s)
@pytest.mark.parametrize('dtype', [pa.date32(), pa.date64()])
def test_numpy_datetime64_day_unit(self, dtype):
datetime64_d = np.array([
'2007-07-13',
None,
'2006-01-15',
'2010-08-19'],
dtype='datetime64[D]')
_check_array_from_pandas_roundtrip(datetime64_d, type=dtype)
def test_array_from_pandas_date_with_mask(self):
m = np.array([True, False, True])
data = pd.Series([
date(1990, 1, 1),
date(1991, 1, 1),
date(1992, 1, 1)
])
result = pa.Array.from_pandas(data, mask=m)
expected = pd.Series([None, date(1991, 1, 1), None])
assert pa.Array.from_pandas(expected).equals(result)
def test_fixed_offset_timezone(self):
df = pd.DataFrame({
'a': [
pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.NaT
]
})
_check_pandas_roundtrip(df)
_check_serialize_components_roundtrip(df)
# ----------------------------------------------------------------------
# Conversion tests for string and binary types.
class TestConvertStringLikeTypes(object):
def test_pandas_unicode(self):
repeats = 1000
values = [u'foo', None, u'bar', u'mañana', np.nan]
df = pd.DataFrame({'strings': values * repeats})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_bytes_to_binary(self):
values = [u'qux', b'foo', None, bytearray(b'barz'), 'qux', np.nan]
df = pd.DataFrame({'strings': values})
table = pa.Table.from_pandas(df)
assert table[0].type == pa.binary()
values2 = [b'qux', b'foo', None, b'barz', b'qux', np.nan]
expected = pd.DataFrame({'strings': values2})
_check_pandas_roundtrip(df, expected)
@pytest.mark.large_memory
def test_bytes_exceed_2gb(self):
v1 = b'x' * 100000000
v2 = b'x' * 147483646
# ARROW-2227, hit exactly 2GB on the nose
df = pd.DataFrame({
'strings': [v1] * 20 + [v2] + ['x'] * 20
})
arr = pa.array(df['strings'])
assert isinstance(arr, pa.ChunkedArray)
assert arr.num_chunks == 2
arr = None
table = pa.Table.from_pandas(df)
assert table[0].data.num_chunks == 2
def test_fixed_size_bytes(self):
values = [b'foo', None, bytearray(b'bar'), None, None, b'hey']
df = pd.DataFrame({'strings': values})
schema = pa.schema([pa.field('strings', pa.binary(3))])
table = pa.Table.from_pandas(df, schema=schema)
assert table.schema[0].type == schema[0].type
assert table.schema[0].name == schema[0].name
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_fixed_size_bytes_does_not_accept_varying_lengths(self):
values = [b'foo', None, b'ba', None, None, b'hey']
df = pd.DataFrame({'strings': values})
schema = pa.schema([pa.field('strings', pa.binary(3))])
with pytest.raises(pa.ArrowInvalid):
pa.Table.from_pandas(df, schema=schema)
def test_variable_size_bytes(self):
s = pd.Series([b'123', b'', b'a', None])
_check_series_roundtrip(s, type_=pa.binary())
def test_binary_from_bytearray(self):
s = pd.Series([bytearray(b'123'), bytearray(b''), bytearray(b'a'),
None])
# Explicitly set type
_check_series_roundtrip(s, type_=pa.binary())
# Infer type from bytearrays
_check_series_roundtrip(s, expected_pa_type=pa.binary())
def test_table_empty_str(self):
values = ['', '', '', '', '']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result1 = table.to_pandas(strings_to_categorical=False)
expected1 = pd.DataFrame({'strings': values})
tm.assert_frame_equal(result1, expected1, check_dtype=True)
result2 = table.to_pandas(strings_to_categorical=True)
expected2 = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result2, expected2, check_dtype=True)
def test_selective_categoricals(self):
values = ['', '', '', '', '']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
expected_str = pd.DataFrame({'strings': values})
expected_cat = pd.DataFrame({'strings': pd.Categorical(values)})
result1 = table.to_pandas(categories=['strings'])
tm.assert_frame_equal(result1, expected_cat, check_dtype=True)
result2 = table.to_pandas(categories=[])
tm.assert_frame_equal(result2, expected_str, check_dtype=True)
result3 = table.to_pandas(categories=('strings',))
tm.assert_frame_equal(result3, expected_cat, check_dtype=True)
result4 = table.to_pandas(categories=tuple())
tm.assert_frame_equal(result4, expected_str, check_dtype=True)
def test_table_str_to_categorical_without_na(self):
values = ['a', 'a', 'b', 'b', 'c']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas(strings_to_categorical=True)
expected = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result, expected, check_dtype=True)
with pytest.raises(pa.ArrowInvalid):
table.to_pandas(strings_to_categorical=True,
zero_copy_only=True)
def test_table_str_to_categorical_with_na(self):
values = [None, 'a', 'b', np.nan]
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas(strings_to_categorical=True)
expected = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result, expected, check_dtype=True)
with pytest.raises(pa.ArrowInvalid):
table.to_pandas(strings_to_categorical=True,
zero_copy_only=True)
# Regression test for ARROW-2101
def test_array_of_bytes_to_strings(self):
converted = pa.array(np.array([b'x'], dtype=object), pa.string())
assert converted.type == pa.string()
# Make sure that if an ndarray of bytes is passed to the array
# constructor and the type is string, it will fail if those bytes
# cannot be converted to utf-8
def test_array_of_bytes_to_strings_bad_data(self):
with pytest.raises(
pa.lib.ArrowInvalid,
match="was not a utf8 string"):
pa.array(np.array([b'\x80\x81'], dtype=object), pa.string())
def test_numpy_string_array_to_fixed_size_binary(self):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')
converted = pa.array(arr, type=pa.binary(3))
expected = pa.array(list(arr), type=pa.binary(3))
assert converted.equals(expected)
mask = np.array([True, False, True])
converted = pa.array(arr, type=pa.binary(3), mask=mask)
expected = pa.array([b'foo', None, b'baz'], type=pa.binary(3))
assert converted.equals(expected)
with pytest.raises(pa.lib.ArrowInvalid,
match=r'Got bytestring of length 3 \(expected 4\)'):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')
pa.array(arr, type=pa.binary(4))
with pytest.raises(
pa.lib.ArrowInvalid,
match=r'Got bytestring of length 12 \(expected 3\)'):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|U3')
pa.array(arr, type=pa.binary(3))
class TestConvertDecimalTypes(object):
"""
Conversion test for decimal types.
"""
decimal32 = [
decimal.Decimal('-1234.123'),
decimal.Decimal('1234.439')
]
decimal64 = [
decimal.Decimal('-129934.123331'),
decimal.Decimal('129534.123731')
]
decimal128 = [
decimal.Decimal('394092382910493.12341234678'),
decimal.Decimal('-314292388910493.12343437128')
]
@pytest.mark.parametrize(('values', 'expected_type'), [
pytest.param(decimal32, pa.decimal128(7, 3), id='decimal32'),
pytest.param(decimal64, pa.decimal128(12, 6), id='decimal64'),
pytest.param(decimal128, pa.decimal128(26, 11), id='decimal128')
])
def test_decimal_from_pandas(self, values, expected_type):
expected = pd.DataFrame({'decimals': values})
table = pa.Table.from_pandas(expected, preserve_index=False)
field = pa.field('decimals', expected_type)
# schema's metadata is generated by from_pandas conversion
expected_schema = pa.schema([field], metadata=table.schema.metadata)
assert table.schema.equals(expected_schema)
@pytest.mark.parametrize('values', [
pytest.param(decimal32, id='decimal32'),
pytest.param(decimal64, id='decimal64'),
pytest.param(decimal128, id='decimal128')
])
def test_decimal_to_pandas(self, values):
expected = pd.DataFrame({'decimals': values})
converted = pa.Table.from_pandas(expected)
df = converted.to_pandas()
tm.assert_frame_equal(df, expected)
def test_decimal_fails_with_truncation(self):
data1 = [decimal.Decimal('1.234')]
type1 = pa.decimal128(10, 2)
with pytest.raises(pa.ArrowInvalid):
pa.array(data1, type=type1)
data2 = [decimal.Decimal('1.2345')]
type2 = pa.decimal128(10, 3)
with pytest.raises(pa.ArrowInvalid):
pa.array(data2, type=type2)
def test_decimal_with_different_precisions(self):
data = [
decimal.Decimal('0.01'),
decimal.Decimal('0.001'),
]
series = pd.Series(data)
array = pa.array(series)
assert array.to_pylist() == data
assert array.type == pa.decimal128(3, 3)
array = pa.array(data, type=pa.decimal128(12, 5))
expected = [decimal.Decimal('0.01000'), decimal.Decimal('0.00100')]
assert array.to_pylist() == expected
def test_decimal_with_None_explicit_type(self):
series = pd.Series([decimal.Decimal('3.14'), None])
_check_series_roundtrip(series, type_=pa.decimal128(12, 5))
# Test that having all None values still produces decimal array
series = pd.Series([None] * 2)
_check_series_roundtrip(series, type_=pa.decimal128(12, 5))
def test_decimal_with_None_infer_type(self):
series = pd.Series([decimal.Decimal('3.14'), None])
_check_series_roundtrip(series, expected_pa_type=pa.decimal128(3, 2))
def test_strided_objects(self, tmpdir):
# see ARROW-3053
data = {
'a': {0: 'a'},
'b': {0: decimal.Decimal('0.0')}
}
# This yields strided objects
df = pd.DataFrame.from_dict(data)
_check_pandas_roundtrip(df)
class TestListTypes(object):
"""
Conversion tests for list<> types.
"""
def test_column_of_arrays(self):
df, schema = dataframe_with_arrays()
_check_pandas_roundtrip(df, schema=schema, expected_schema=schema)
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
# schema's metadata is generated by from_pandas conversion
expected_schema = schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
for column in df.columns:
field = schema.field_by_name(column)
_check_array_roundtrip(df[column], type=field.type)
def test_column_of_arrays_to_py(self):
# Test regression in ARROW-1199 not caught in above test
dtype = 'i1'
arr = np.array([
np.arange(10, dtype=dtype),
np.arange(5, dtype=dtype),
None,
np.arange(1, dtype=dtype)
])
type_ = pa.list_(pa.int8())
parr = pa.array(arr, type=type_)
assert parr[0].as_py() == list(range(10))
assert parr[1].as_py() == list(range(5))
assert parr[2].as_py() is None
assert parr[3].as_py() == [0]
def test_column_of_lists(self):
df, schema = dataframe_with_lists()
_check_pandas_roundtrip(df, schema=schema, expected_schema=schema)
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
# schema's metadata is generated by from_pandas conversion
expected_schema = schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
for column in df.columns:
field = schema.field_by_name(column)
_check_array_roundtrip(df[column], type=field.type)
def test_column_of_lists_first_empty(self):
# ARROW-2124
num_lists = [[], [2, 3, 4], [3, 6, 7, 8], [], [2]]
series = pd.Series([np.array(s, dtype=float) for s in num_lists])
arr = pa.array(series)
result = pd.Series(arr.to_pandas())
tm.assert_series_equal(result, series)
def test_column_of_lists_chunked(self):
# ARROW-1357
df = pd.DataFrame({
'lists': np.array([
[1, 2],
None,
[2, 3],
[4, 5],
[6, 7],
[8, 9]
], dtype=object)
})
schema = pa.schema([
pa.field('lists', pa.list_(pa.int64()))
])
t1 = pa.Table.from_pandas(df[:2], schema=schema)
t2 = pa.Table.from_pandas(df[2:], schema=schema)
table = pa.concat_tables([t1, t2])
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_column_of_lists_chunked2(self):
data1 = [[0, 1], [2, 3], [4, 5], [6, 7], [10, 11],
[12, 13], [14, 15], [16, 17]]
data2 = [[8, 9], [18, 19]]
a1 = pa.array(data1)
a2 = pa.array(data2)
t1 = pa.Table.from_arrays([a1], names=['a'])
t2 = pa.Table.from_arrays([a2], names=['a'])
concatenated = pa.concat_tables([t1, t2])
result = concatenated.to_pandas()
expected = pd.DataFrame({'a': data1 + data2})
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
import argparse
import config
import todoist
import mystrings as s
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import os
def initialize_todoist_api():
# Used an access token obtained from https://developer.todoist.com/appconsole.html
# Stored this access token "access_token" in file config.py (not included in the repo)
todoist_api = todoist.TodoistAPI(config.access_token)
todoist_api.sync()
return todoist_api
def get_projects_df(todoist_api):
# Creating DataFrame of projects from 'projects' list of dicts
# .data attribute retrieves a python dictionary rather than todoist.models.Project
projects = [project.data for project in todoist_api.state[s.PROJECTS]]
df = pd.DataFrame(projects)
return df
def get_tasks_df(todoist_api):
tasks = [task.data for task in todoist_api.state[s.ITEMS]]
df = pd.DataFrame(tasks)
return df
def add_parent_projects_to_tasks(df_p, df_t):
# Adds "project parent" column to projects dataframe
map_project = dict(df_p[[s.ID, s.NAME]].values)
df_p[s.PARENT_PROJECT] = df_p.parent_id.map(map_project)
map_project_parent = dict(df_p[[s.ID, s.PARENT_PROJECT]].values)
# Use these mappings to create task columns with tasks' project name and parent project
df_t[s.PROJECT_NAME] = df_t.project_id.map(map_project)
df_t[s.PARENT_PROJECT] = df_t.project_id.map(map_project_parent)
return df_t
def add_parent_tasks_to_tasks(df_t):
# Create maps to create task columns with parent task, and parent task priority
map_task = dict(df_t[[s.ID, s.CONTENT]].values)
map_priorities = dict(df_t[[s.ID, s.PRIORITY]].values)
df_t[s.PARENT_TASK] = df_t.parent_id.map(map_task)
df_t[s.PARENT_PRIORITY] = df_t.parent_id.map(map_priorities)
# Fill in values when task is top leel
df_t[s.PARENT_PRIORITY] = np.where(pd.isnull(df_t[s.PARENT_TASK]), df_t[s.PRIORITY], df_t[s.PARENT_PRIORITY])
df_t[s.PARENT_TASK] = np.where(pd.isnull(df_t[s.PARENT_TASK]), df_t[s.CONTENT], df_t[s.PARENT_TASK])
df_t[s.PARENT_ID] = np.where(pd.isnull(df_t[s.PARENT_ID]), 0, df_t[s.PARENT_ID])
return df_t
def convert_datetimes_to_tz(df, timezone, datetime_format):
# Convert Date strings (in UTC by default) to datetime and format it
df[s.DATE_ADDED] = pd.to_datetime(
(pd.to_datetime(df[s.DATE_ADDED], utc=True)
.dt.tz_convert(timezone)
.dt.strftime(datetime_format)))
df[s.DATE_COMPLETED] = pd.to_datetime(
( | pd.to_datetime(df[s.DATE_COMPLETED], utc=True) | pandas.to_datetime |
import os
import random
import math
import numpy as np
import pandas as pd
import itertools
from functools import lru_cache
##########################
## Compliance functions ##
##########################
def delayed_ramp_fun(Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current date
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start-tau_days)/pd.Timedelta('1D')
def ramp_fun(Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current date
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start)/pd.Timedelta('1D')
###############################
## Mobility update functions ##
###############################
def load_all_mobility_data(agg, dtype='fractional', beyond_borders=False):
"""
Function that fetches all available mobility data and adds it to a DataFrame with dates as indices and numpy matrices as values. Make sure to regularly update the mobility data with the notebook notebooks/preprocessing/Quick-update_mobility-matrices.ipynb to get the data for the most recent days. Also returns the average mobility over all available data, which might NOT always be desirable as a back-up mobility.
Input
-----
agg : str
Denotes the spatial aggregation at hand. Either 'prov', 'arr' or 'mun'
dtype : str
Choose the type of mobility data to return. Either 'fractional' (default), staytime (all available hours for region g spent in h), or visits (all unique visits from region g to h)
beyond_borders : boolean
If true, also include mobility abroad and mobility from foreigners
Returns
-------
all_mobility_data : pd.DataFrame
DataFrame with datetime objects as indices ('DATE') and np.arrays ('place') as value column
average_mobility_data : np.array
average mobility matrix over all available dates
"""
### Validate input ###
if agg not in ['mun', 'arr', 'prov']:
raise ValueError(
"spatial stratification '{0}' is not legitimate. Possible spatial "
"stratifications are 'mun', 'arr', or 'prov'".format(agg)
)
if dtype not in ['fractional', 'staytime', 'visits']:
raise ValueError(
"data type '{0}' is not legitimate. Possible mobility matrix "
"data types are 'fractional', 'staytime', or 'visits'".format(dtype)
)
### Load all available data ###
# Define absolute location of this file
abs_dir = os.path.dirname(__file__)
# Define data location for this particular aggregation level
data_location = f'../../../data/interim/mobility/{agg}/{dtype}'
# Iterate over all available interim mobility data
all_available_dates=[]
all_available_places=[]
directory=os.path.join(abs_dir, f'{data_location}')
for csv in os.listdir(directory):
# take YYYYMMDD information from processed CSVs. NOTE: this supposes a particular data name format!
datum = csv[-12:-4]
# Create list of datetime objects
all_available_dates.append(pd.to_datetime(datum, format="%Y%m%d"))
# Load the CSV as a np.array
if beyond_borders:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').values
else:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').drop(index='Foreigner', columns='ABROAD').values
if dtype=='fractional':
# make sure the rows sum up to 1 nicely again after dropping a row and a column
place = place / place.sum(axis=1)
# Create list of places
all_available_places.append(place)
# Create new empty dataframe with available dates. Load mobility later
df = pd.DataFrame({'DATE' : all_available_dates, 'place' : all_available_places}).set_index('DATE')
all_mobility_data = df.copy()
# Take average of all available mobility data
average_mobility_data = df['place'].values.mean()
return all_mobility_data, average_mobility_data
class make_mobility_update_function():
"""
Output the time-dependent mobility function with the data loaded in cache
Input
-----
proximus_mobility_data : DataFrame
Pandas DataFrame with dates as indices and matrices as values. Output of mobility.get_proximus_mobility_data.
proximus_mobility_data_avg : np.array
Average mobility matrix over all matrices
"""
def __init__(self, proximus_mobility_data, proximus_mobility_data_avg):
self.proximus_mobility_data = proximus_mobility_data
self.proximus_mobility_data_avg = proximus_mobility_data_avg
@lru_cache()
# Define mobility_update_func
def __call__(self, t, default_mobility=None):
"""
time-dependent function which has a mobility matrix of type dtype for every date.
Note: only works with datetime input (no integer time steps). This
Input
-----
t : timestamp
current date as datetime object
states : str
formal necessity
param : str
formal necessity
default_mobility : np.array or None
If None (default), returns average mobility over all available dates. Else, return user-defined mobility
Returns
-------
place : np.array
square matrix with mobility of type dtype (fractional, staytime or visits), dimension depending on agg
"""
t = pd.Timestamp(t.date())
try: # if there is data available for this date (if the key exists)
place = self.proximus_mobility_data['place'][t]
except:
if default_mobility: # If there is no data available and a user-defined input is given
place = self.default_mobility
else: # No data and no user input: fall back on average mobility
place = self.proximus_mobility_data_avg
return place
def mobility_wrapper_func(self, t, states, param, default_mobility=None):
t = pd.Timestamp(t.date())
if t <= pd.Timestamp('2020-03-17'):
place = self.__call__(t, default_mobility=default_mobility)
return np.eye(place.shape[0])
else:
return self.__call__(t, default_mobility=default_mobility)
###################
## VOC functions ##
###################
class make_VOC_function():
"""
Class that returns a time-dependant parameter function for COVID-19 SEIRD model parameter alpha (variant fraction).
Current implementation includes the alpha - delta strains.
If the class is initialized without arguments, a logistic model fitted to prelevance data of the alpha-gamma variant is used. The class can also be initialized with the alpha-gamma prelavence data provided by Prof. <NAME>.
A logistic model fitted to prelevance data of the delta variant is always used.
Input
-----
*df_abc: pd.dataFrame (optional)
Alpha, Beta, Gamma prelevance dataset by <NAME>, obtained using:
`from covid19model.data import VOC`
`df_abc = VOC.get_abc_data()`
`VOC_function = make_VOC_function(df_abc)`
Output
------
__class__ : function
Default variant function
"""
def __init__(self, *df_abc):
self.df_abc = df_abc
self.data_given = False
if self.df_abc != ():
self.df_abc = df_abc[0] # First entry in list of optional arguments (dataframe)
self.data_given = True
@lru_cache()
def VOC_abc_data(self,t):
return self.df_abc.iloc[self.df_abc.index.get_loc(t, method='nearest')]['baselinesurv_f_501Y.V1_501Y.V2_501Y.V3']
@lru_cache()
def VOC_abc_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-02-14')
k = 0.07
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
@lru_cache()
def VOC_delta_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-06-25')
k = 0.11
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
# Default VOC function includes British and Indian variants
def __call__(self, t, states, param):
# Convert time to timestamp
t = pd.Timestamp(t.date())
# Introduction Indian variant
t1 = pd.Timestamp('2021-05-01')
# Construct alpha
if t <= t1:
if self.data_given:
return np.array([1-self.VOC_abc_data(t), self.VOC_abc_data(t), 0])
else:
return np.array([1-self.VOC_abc_logistic(t), self.VOC_abc_logistic(t), 0])
else:
return np.array([0, 1-self.VOC_delta_logistic(t), self.VOC_delta_logistic(t)])
###########################
## Vaccination functions ##
###########################
from covid19model.data.model_parameters import construct_initN
class make_vaccination_function():
"""
Class that returns a two-fold time-dependent parameter function for the vaccination strategy by default. First, first dose data by sciensano are used. In the future, a hypothetical scheme is used. If spatial data is given, the output consists of vaccination data per NIS code.
Input
-----
df : pd.dataFrame
*either* Sciensano public dataset, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_sciensano_COVID19_data(update=False)`
*or* public spatial vaccination data, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_public_spatial_vaccination_data(update=False,agg='arr')`
spatial : Boolean
True if df is spatially explicit. None by default.
Output
------
__class__ : function
Default vaccination function
"""
def __init__(self, df, age_classes=pd.IntervalIndex.from_tuples([(0,12),(12,18),(18,25),(25,35),(35,45),(45,55),(55,65),(65,75),(75,85),(85,120)], closed='left')):
age_stratification_size = len(age_classes)
# Assign inputs to object
self.df = df
self.age_agg = age_stratification_size
# Check if spatial data is provided
self.spatial = None
if 'NIS' in self.df.index.names:
self.spatial = True
self.space_agg = len(self.df.index.get_level_values('NIS').unique().values)
# infer aggregation (prov, arr or mun)
if self.space_agg == 11:
self.agg = 'prov'
elif self.space_agg == 43:
self.agg = 'arr'
elif self.space_agg == 581:
self.agg = 'mun'
else:
raise Exception(f"Space is {G}-fold stratified. This is not recognized as being stratification at Belgian province, arrondissement, or municipality level.")
# Check if dose data is provided
self.doses = None
if 'dose' in self.df.index.names:
self.doses = True
self.dose_agg = len(self.df.index.get_level_values('dose').unique().values)
# Define start- and enddate
self.df_start = pd.Timestamp(self.df.index.get_level_values('date').min())
self.df_end = pd.Timestamp(self.df.index.get_level_values('date').max())
# Perform age conversion
# Define dataframe with desired format
iterables=[]
for index_name in self.df.index.names:
if index_name != 'age':
iterables += [self.df.index.get_level_values(index_name).unique()]
else:
iterables += [age_classes]
index = pd.MultiIndex.from_product(iterables, names=self.df.index.names)
self.new_df = pd.Series(index=index)
# Four possibilities exist: can this be sped up?
if self.spatial:
if self.doses:
# Shorten?
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, NIS, slice(None), dose)]
self.new_df.loc[(date, NIS, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
data = self.df.loc[(date,NIS)]
self.new_df.loc[(date, NIS)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
if self.doses:
for date in self.df.index.get_level_values('date').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, slice(None), dose)]
self.new_df.loc[(date, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
else:
for date in self.df.index.get_level_values('date').unique():
data = self.df.loc[(date)]
self.new_df.loc[(date)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
self.df = self.new_df
def convert_age_stratified_vaccination_data(self, data, age_classes, agg=None, NIS=None):
"""
A function to convert the sciensano vaccination data to the desired model age groups
Parameters
----------
data: pd.Series
A series of age-stratified vaccination incidences. Index must be of type pd.Intervalindex.
age_classes : pd.IntervalIndex
Desired age groups of the vaccination dataframe.
agg: str
Spatial aggregation: prov, arr or mun
NIS : str
NIS code of consired spatial element
Returns
-------
out: pd.Series
Converted data.
"""
# Pre-allocate new series
out = pd.Series(index = age_classes, dtype=float)
# Extract demographics
if agg:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).loc[NIS,:].values
demographics = construct_initN(None, agg).loc[NIS,:].values
else:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).values
demographics = construct_initN(None, agg).values
# Loop over desired intervals
for idx,interval in enumerate(age_classes):
result = []
for age in range(interval.left, interval.right):
try:
result.append(demographics[age]/data_n_individuals[data.index.get_level_values('age').contains(age)]*data.iloc[np.where(data.index.get_level_values('age').contains(age))[0][0]])
except:
result.append(0)
out.iloc[idx] = sum(result)
return out
@lru_cache()
def get_data(self,t):
if self.spatial:
if self.doses:
try:
# Only includes doses A, B and C (so not boosters!) for now
data = np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
data[:,:,:-1] = np.array(self.df.loc[t,:,:,:].values).reshape( (self.space_agg, self.age_agg, self.dose_agg) )
return data
except:
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.space_agg, self.age_agg) )
except:
return np.zeros([self.space_agg, self.age_agg])
else:
if self.doses:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.age_agg, self.dose_agg) )
except:
return np.zeros([self.age_agg, self.dose_agg])
else:
try:
return np.array(self.df.loc[t,:].values)
except:
return np.zeros(self.age_agg)
def unidose_2021_vaccination_campaign(self, states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal):
# Compute the number of vaccine eligible individuals
VE = states['S'] + states['R']
# Initialize N_vacc
N_vacc = np.zeros(self.age_agg)
# Start vaccination loop
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses = 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx]] = daily_doses
daily_doses = 0
else:
N_vacc[vacc_order[idx]] = VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
def booster_campaign(self, states, daily_doses, vacc_order, stop_idx, refusal):
# Compute the number of booster eligible individuals
VE = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] \
+ states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Initialize N_vacc
N_vacc = np.zeros([self.age_agg,self.dose_agg])
# Booster vaccination strategy without refusal
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses= 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx],3] = daily_doses
daily_doses= 0
else:
N_vacc[vacc_order[idx],3] = VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
# Default vaccination strategy = Sciensano data + hypothetical scheme after end of data collection for unidose model only (for now)
def __call__(self, t, states, param, initN, daily_doses=60000, delay_immunity = 21, vacc_order = [8,7,6,5,4,3,2,1,0], stop_idx=9, refusal = [0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3]):
"""
time-dependent function for the Belgian vaccination strategy
First, all available first-dose data from Sciensano are used. Then, the user can specify a custom vaccination strategy of "daily_first_dose" first doses per day,
administered in the order specified by the vector "vacc_order" with a refusal propensity of "refusal" in every age group.
This vaccination strategy does not distinguish between vaccination doses, individuals are transferred to the vaccination circuit after some time delay after the first dose.
For use with the model `COVID19_SEIRD` and `COVID19_SEIRD_spatial_vacc` in `~src/models/models.py`
Parameters
----------
t : int
Simulation time
states: dict
Dictionary containing values of model states
param : dict
Model parameter dictionary
initN : list or np.array
Demographics according to the epidemiological model age bins
daily_first_dose : int
Number of doses administered per day. Default is 30000 doses/day.
delay_immunity : int
Time delay between first dose vaccination and start of immunity. Default is 21 days.
vacc_order : array
Vector containing vaccination prioritization preference. Default is old to young. Must be equal in length to the number of age bins in the model.
stop_idx : float
Index of age group at which the vaccination campaign is halted. An index of 9 corresponds to vaccinating all age groups, an index of 8 corresponds to not vaccinating the age group corresponding with vacc_order[idx].
refusal: array
Vector containing the fraction of individuals refusing a vaccine per age group. Default is 30% in every age group. Must be equal in length to the number of age bins in the model.
Return
------
N_vacc : np.array
Number of individuals to be vaccinated at simulation time "t" per age, or per [patch,age]
"""
# Convert time to suitable format
t = pd.Timestamp(t.date())
# Convert delay to a timedelta
delay = pd.Timedelta(str(int(delay_immunity))+'D')
# Compute vaccinated individuals after spring-summer 2021 vaccination campaign
check_time = pd.Timestamp('2021-10-01')
# Only for non-spatial multi-vaccindation dose model
if not self.spatial:
if self.doses:
if t == check_time:
self.fully_vaccinated_0 = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] + \
states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Use data
if t <= self.df_end + delay:
return self.get_data(t-delay)
# Projection into the future
else:
if self.spatial:
if self.doses:
# No projection implemented
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
# No projection implemented
return np.zeros([self.space_agg,self.age_agg])
else:
if self.doses:
return self.booster_campaign(states, daily_doses, vacc_order, stop_idx, refusal)
else:
return self.unidose_2021_vaccination_campaign(states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal)
###################################
## Google social policy function ##
###################################
class make_contact_matrix_function():
"""
Class that returns contact matrix based on 4 prevention parameters by default, but has other policies defined as well.
Input
-----
Nc_all : dictionnary
contact matrices for home, schools, work, transport, leisure and others
df_google : dataframe
google mobility data
Output
------
__class__ : default function
Default output function, based on contact_matrix_4prev
"""
def __init__(self, df_google, Nc_all):
self.df_google = df_google.astype(float)
self.Nc_all = Nc_all
# Compute start and endtimes of dataframe
self.df_google_start = df_google.index.get_level_values('date')[0]
self.df_google_end = df_google.index.get_level_values('date')[-1]
# Check if provincial data is provided
self.provincial = None
if 'NIS' in self.df_google.index.names:
self.provincial = True
self.space_agg = len(self.df_google.index.get_level_values('NIS').unique().values)
@lru_cache() # once the function is run for a set of parameters, it doesn't need to compile again
def __call__(self, t, prev_home=1, prev_schools=1, prev_work=1, prev_rest = 1,
school=None, work=None, transport=None, leisure=None, others=None, home=None):
"""
t : timestamp
current date
prev_... : float [0,1]
prevention parameter to estimate
school, work, transport, leisure, others : float [0,1]
level of opening of these sectors
if None, it is calculated from google mobility data
only school cannot be None!
"""
if school is None:
raise ValueError(
"Please indicate to which extend schools are open")
places_var = [work, transport, leisure, others]
places_names = ['work', 'transport', 'leisure', 'others']
GCMR_names = ['work', 'transport', 'retail_recreation', 'grocery']
if self.provincial:
if t < pd.Timestamp('2020-03-17'):
return np.ones(self.space_agg)[:,np.newaxis,np.newaxis]*self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[(t, slice(None)),:]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google.loc[(self.df_google_end - pd.Timedelta(days=14)): self.df_google_end, slice(None)].mean(level='NIS')/100
# Sort NIS codes from low to high
row.sort_index(level='NIS', ascending=True,inplace=True)
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]].values
else:
try:
test=len(place)
except:
place = place*np.ones(self.space_agg)
values_dict.update({places_names[idx]: place})
# Schools:
try:
test=len(school)
except:
school = school*np.ones(self.space_agg)
# Construct contact matrix
CM = (prev_home*np.ones(self.space_agg)[:, np.newaxis,np.newaxis]*self.Nc_all['home'] +
(prev_schools*school)[:, np.newaxis,np.newaxis]*self.Nc_all['schools'] +
(prev_work*values_dict['work'])[:,np.newaxis,np.newaxis]*self.Nc_all['work'] +
(prev_rest*values_dict['transport'])[:,np.newaxis,np.newaxis]*self.Nc_all['transport'] +
(prev_rest*values_dict['leisure'])[:,np.newaxis,np.newaxis]*self.Nc_all['leisure'] +
(prev_rest*values_dict['others'])[:,np.newaxis,np.newaxis]*self.Nc_all['others'])
else:
if t < pd.Timestamp('2020-03-17'):
return self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[t]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google[-14:-1].mean()/100
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]]
values_dict.update({places_names[idx]: place})
# Construct contact matrix
CM = (prev_home*self.Nc_all['home'] +
prev_schools*school*self.Nc_all['schools'] +
prev_work*values_dict['work']*self.Nc_all['work'] +
prev_rest*values_dict['transport']*self.Nc_all['transport'] +
prev_rest*values_dict['leisure']*self.Nc_all['leisure'] +
prev_rest*values_dict['others']*self.Nc_all['others'])
return CM
def all_contact(self):
return self.Nc_all['total']
def all_contact_no_schools(self):
return self.Nc_all['total'] - self.Nc_all['schools']
def ramp_fun(self, Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start)/pd.Timedelta('1D') )
def delayed_ramp_fun(self, Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start-tau_days)/pd.Timedelta('1D') )
####################
## National model ##
####################
def policies_all(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array (9x9)
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-03') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-06-01') # Start of lockdown relaxation
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-10-01') # Flanders releases all measures
t23 = pd.Timestamp('2021-11-01') # Start of autumn break
t24 = pd.Timestamp('2021-11-07') # End of autumn break
t25 = pd.Timestamp('2021-12-26') # Start of Christmass break
t26 = pd.Timestamp('2022-01-06') # End of Christmass break
t27 = pd.Timestamp('2022-02-28') # Start of Spring Break
t28 = pd.Timestamp('2022-03-06') # End of Spring Break
t29 = pd.Timestamp('2022-04-04') # Start of Easter Break
t30 = pd.Timestamp('2022-04-17') # End of Easter Break
t31 = pd.Timestamp('2022-07-01') # Start of summer holidays
t32 = pd.Timestamp('2022-09-01') # End of summer holidays
t33 = pd.Timestamp('2022-09-21') # Opening of universities
t34 = pd.Timestamp('2022-10-31') # Start of autumn break
t35 = pd.Timestamp('2022-11-06') # End of autumn break
if t <= t1:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t1 < t <= t1 + l1_days:
t = pd.Timestamp(t.date())
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
return self.ramp_fun(policy_old, policy_new, t, t1, l1)
elif t1 + l1_days < t <= t2:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
elif t2 < t <= t3:
l = (t3 - t2)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t2, l)
elif t3 < t <= t4:
l = (t4 - t3)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t3, l)
elif t4 < t <= t5:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
elif t5 < t <= t6:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
# Second wave
elif t6 < t <= t7:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0.7)
elif t7 < t <= t8:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t8 < t <= t8 + l2_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_schools, prev_work, prev_rest_lockdown, school=1)
return self.ramp_fun(policy_old, policy_new, t, t8, l2)
elif t8 + l2_days < t <= t9:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t9 < t <= t10:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t10 < t <= t11:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t11 < t <= t12:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t12 < t <= t13:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t13 < t <= t14:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t14 < t <= t15:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t15 < t <= t16:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t16 < t <= t17:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t17 < t <= t18:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t18 < t <= t19:
l = (t19 - t18)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=1)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=1)
return self.ramp_fun(policy_old, policy_new, t, t18, l)
elif t19 < t <= t20:
l = (t20 - t19)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t19, l)
elif t20 < t <= t21:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0.7)
elif t21 < t <= t22:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.70*prev_rest_relaxation, school=1)
elif t22 < t <= t23:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t23 < t <= t24:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
elif t24 < t <= t25:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t25 < t <= t26:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t26 < t <= t27:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t27 < t <= t28:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
leisure=1.1, work=0.9, transport=1, others=1, school=0)
elif t28 < t <= t29:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t29 < t <= t30:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t30 < t <= t31:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t31 < t <= t32:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t32 < t <= t33:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=0.8)
elif t33 < t <= t34:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t34 < t <= t35:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.9, leisure=1.1, transport=1, others=1, school=0)
else:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
def policies_all_WAVE4(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home, date_measures, scenario):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-03') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-06-01') # Start of lockdown relaxation
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-10-01') # Flanders releases all measures
t23 = pd.Timestamp('2021-11-01') # Start of autumn break
t24 = pd.Timestamp('2021-11-07') # End of autumn break
# Fourth WAVE
t25 = pd.Timestamp('2021-11-22') # Start of mandatory telework + start easing in leisure restrictions
t26 = pd.Timestamp('2021-12-18') # Start of Christmass break for schools
t27 = pd.Timestamp('2021-12-26') # Start of Christmass break for general population
t28 = pd.Timestamp('2022-01-06') # End of Christmass break
t29 = pd.Timestamp('2022-01-28') # End of measures
t30 = pd.Timestamp('2022-02-28') # Start of Spring Break
t31 = pd.Timestamp('2022-03-06') # End of Spring Break
t32 = pd.Timestamp('2022-04-04') # Start of Easter Break
t33 = pd.Timestamp('2022-04-17') # End of Easter Break
t34 = pd.Timestamp('2022-07-01') # Start of summer holidays
t35 = pd.Timestamp('2022-09-01') # End of summer holidays
t36 = pd.Timestamp('2022-09-21') # Opening of universities
t37 = pd.Timestamp('2022-10-31') # Start of autumn break
t38 = pd.Timestamp('2022-11-06') # End of autumn break
scenarios_work = [1, 0.7, 0.7, 0.7, 0.7]
scenarios_schools = [1, 1, 1, 1, 1]
scenarios_leisure = [1, 1, 0.75, 0.50, 0.25]
if t <= t1:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t1 < t <= t1 + l1_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
return self.ramp_fun(policy_old, policy_new, t, t1, l1)
elif t1 + l1_days < t <= t2:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
elif t2 < t <= t3:
l = (t3 - t2)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t2, l)
elif t3 < t <= t4:
l = (t4 - t3)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t3, l)
elif t4 < t <= t5:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
elif t5 < t <= t6:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
# Second wave
elif t6 < t <= t7:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0.7)
elif t7 < t <= t8:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t8 < t <= t8 + l2_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_schools, prev_work, prev_rest_lockdown, school=1)
return self.ramp_fun(policy_old, policy_new, t, t8, l2)
elif t8 + l2_days < t <= t9:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t9 < t <= t10:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t10 < t <= t11:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t11 < t <= t12:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t12 < t <= t13:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t13 < t <= t14:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t14 < t <= t15:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t15 < t <= t16:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t16 < t <= t17:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t17 < t <= t18:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t18 < t <= t19:
l = (t19 - t18)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=1)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=1)
return self.ramp_fun(policy_old, policy_new, t, t18, l)
elif t19 < t <= t20:
l = (t20 - t19)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t19, l)
elif t20 < t <= t21:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0.7)
elif t21 < t <= t22:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.70*prev_rest_relaxation, school=1)
elif t22 < t <= t23:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t23 < t <= t24:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
elif t24 < t <= t25:
# End of autumn break --> Date of measures
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t25 < t <= t25 + pd.Timedelta(5, unit='D'):
# Date of measures --> End easing in leisure restrictions
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, work=scenarios_work[scenario], school=1)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, work=scenarios_work[scenario], leisure=scenarios_leisure[scenario], school=scenarios_schools[scenario])
return self.ramp_fun(policy_old, policy_new, t, t25, 5)
elif t25 + pd.Timedelta(5, unit='D') < t <= t26:
# End easing in leisure restrictions --> Early schools closure before Christmas holiday
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, work=scenarios_work[scenario], leisure=scenarios_leisure[scenario], school=scenarios_schools[scenario])
elif t26 < t <= t27:
# Early schools closure before Christmas holiday --> Christmas holiday
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=scenarios_work[scenario], leisure=scenarios_leisure[scenario], school=0)
elif t27 < t <= t28:
# Christmas holiday
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=scenarios_work[scenario]-0.2, leisure=scenarios_leisure[scenario], transport=scenarios_work[scenario]-0.2, school=0)
elif t28 < t <= t29:
# Christmass holiday --> End of measures
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
leisure=scenarios_leisure[scenario], work=scenarios_work[scenario], school=1)
elif t29 < t <= t30:
# End of Measures --> Spring break
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
leisure=1, work=1, transport=1, others=1, school=1)
elif t30 < t <= t31:
# Spring Break
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=0.7, others=1, school=0)
elif t31 < t <= t32:
# Spring Break --> Easter
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t32 < t <= t33:
# Easter
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=1, others=1, school=0)
elif t33 < t <= t34:
# Easter --> Summer
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t34 < t <= t35:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=1, others=1, school=0)
elif t35 < t <= t36:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=0.7)
elif t36 < t <= t37:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t37 < t <= t38:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=1, others=1, school=0)
else:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
###################
## Spatial model ##
###################
def policies_all_spatial(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array (9x9)
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-07') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-05-07') # Start of relaxations
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-11-01') # Start of autumn break
t23 = pd.Timestamp('2021-11-07') # End of autumn break
t24 = pd.Timestamp('2021-12-26') # Start of Christmass break
t25 = pd.Timestamp('2022-01-06') # End of Christmass break
t26 = pd.Timestamp('2022-02-28') # Start of Spring Break
t27 = pd.Timestamp('2022-03-06') # End of Spring Break
t28 = pd.Timestamp('2022-04-04') # Start of Easter Break
t29 = pd.Timestamp('2022-04-17') # End of Easter Break
t30 = pd.Timestamp('2022-07-01') # Start of summer holidays
t31 = pd.Timestamp('2022-09-01') # End of summer holidays
t32 = pd.Timestamp('2022-09-21') # Opening of universities
t33 = pd.Timestamp('2022-10-31') # Start of autumn break
t34 = pd.Timestamp('2022-11-06') # End of autumn break
spatial_summer_lockdown_2020 = tuple(np.array([prev_rest_lockdown, prev_rest_lockdown, # F
prev_rest_lockdown, # W
prev_rest_lockdown, # Bxl
prev_rest_lockdown, prev_rest_lockdown, # F
prev_rest_relaxation, prev_rest_relaxation, # W
prev_rest_lockdown, # F
0.7*prev_rest_relaxation, 0.7*prev_rest_relaxation])) # W
co_F = 0.60
co_W = 0.50
co_Bxl = 0.45
spatial_summer_2021 = tuple(np.array([co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, # W
co_Bxl*prev_rest_relaxation, # Bxl
co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation, # W
co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation])) # W
co_F = 1.00
co_W = 0.50
co_Bxl = 0.45
relaxation_flanders_2021 = tuple(np.array([co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, # W
co_Bxl*prev_rest_relaxation, # Bxl
co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation, # W
co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation])) # W
if t <= t1:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1) #self.Nc_all['total']
elif t1 < t <= t1 + l1_days:
t = pd.Timestamp(t.date())
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1) #self.Nc_all['total']
policy_new = self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
return self.ramp_fun(policy_old, policy_new, t, t1, l1)
elif t1 + l1_days < t <= t2:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
elif t2 < t <= t3:
l = (t3 - t2)/pd.Timedelta(days=1)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t2, l)
# 2020
elif t3 < t <= t4:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_relaxation, school=0)
elif t4 < t <= t5:
return self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_lockdown_2020, school=0)
elif t5 < t <= t6:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
# Second wave
elif t6 < t <= t7:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0.8)
elif t7 < t <= t8:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t8 < t <= t8 + l2_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_schools, prev_work, prev_rest_lockdown, school=1)
return self.ramp_fun(policy_old, policy_new, t, t8, l2)
elif t8 + l2_days < t <= t9:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t9 < t <= t10:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t10 < t <= t11:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t11 < t <= t12:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t12 < t <= t13:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t13 < t <= t14:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t14 < t <= t15:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t15 < t <= t16:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t16 < t <= t17:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t17 < t <= t18:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t18 < t <= t19:
l = (t19 - t18)/pd.Timedelta(days=1)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_2021, school=0)
return self.ramp_fun(policy_old, policy_new, t, t18, l)
elif t19 < t <= t20:
return self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_2021, school=0)
elif t20 < t <= t21:
return self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_2021, school=0.8)
elif t21 < t <= t22:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, school=1)
elif t22 < t <= t23:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, school=0)
elif t23 < t <= t24:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, school=1)
elif t24 < t <= t25:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t25 < t <= t26:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=1, leisure=1, transport=1, others=1, school=1)
elif t26 < t <= t27:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
leisure=1.1, work=0.9, transport=1, others=1, school=0)
elif t27 < t <= t28:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=1, leisure=1, transport=1, others=1, school=1)
elif t28 < t <= t29:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t29 < t <= t30:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t30 < t <= t31:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t31 < t <= t32:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=0.8)
elif t32 < t <= t33:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t33 < t <= t34:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.9, leisure=1.1, transport=1, others=1, school=0)
else:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
def policies_all_spatial_WAVE4(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home, date_measures, scenario):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-07') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = | pd.Timestamp('2020-09-01') | pandas.Timestamp |
import argparse
from data_utils import get_n_examples_per_class
import os
import pandas as pd
from shutil import copyfile
def main():
parser = argparse.ArgumentParser()
parser.add_argument("DATA_DIRECTORY", type=str)
parser.add_argument("--N_train", type=int, nargs='+')
parser.add_argument("--N_valid", type=int, nargs='+')
parser.add_argument("--SEED", type=int, default=42)
args = parser.parse_args()
assert len(args.N_train) == len(args.N_valid)
df_full_train = pd.read_csv(os.path.join(args.DATA_DIRECTORY, "full", "train.tsv"), sep='\t')
df_full_test_name = os.path.join(args.DATA_DIRECTORY, "full", "test.tsv")
df_valid = df_full_train.loc[df_full_train["is_valid"] == True]
df_train = df_full_train.loc[df_full_train["is_valid"] == False]
label_y = df_train.columns[1]
for (n_train, n_valid) in zip(args.N_train, args.N_valid):
directory_section = os.path.join(args.DATA_DIRECTORY, str(n_train))
if not os.path.exists(directory_section):
os.makedirs(directory_section)
df_section_test_name = os.path.join(directory_section, "test.tsv")
copyfile(df_full_test_name, df_section_test_name)
_, df_reduced_train = get_n_examples_per_class(df_train, n_train, label_y)
_, df_reduced_valid = get_n_examples_per_class(df_valid, n_valid, label_y)
# FULL VALIDATION SET
df_ = | pd.concat([df_reduced_train, df_valid]) | pandas.concat |
import pytest
import numpy as np
import numpy.testing as npt
import pandas as pd
import statsmodels.api as sm
import statsmodels.formula.api as smf
from scipy.stats import logistic
from scipy.optimize import root
from delicatessen import MEstimator
from delicatessen.utilities import inverse_logit
np.random.seed(236461)
class TestMEstimation:
def test_error_nan(self):
"""Checks for an error when estimating equations return a NaN at the init values
"""
# Data set
y = np.array([5, 1, 2, 4, 2, 4, 5, 7, 11, 1, 6, 3, 4, np.nan])
def psi(theta):
return y - theta
mestimator = MEstimator(psi, init=[0, ])
with pytest.raises(ValueError, match="at least one np.nan"):
mestimator.estimate()
def test_error_rootfinder1(self):
"""Checks for an error when an invalid root finder is provided
"""
# Data set
y = np.array([5, 1, 2, 4, 2, 4, 5, 7, 11, 1, 6, 3, 4, 6])
def psi(theta):
return y - theta
mestimator = MEstimator(psi, init=[0, ])
with pytest.raises(ValueError, match="The solver 'not-avail'"):
mestimator.estimate(solver='not-avail')
def test_error_rootfinder2(self):
"""Check that user-specified solver has correct arguments
"""
# Data set
y = np.array([5, 1, 2, 4, 2, 4, 5, 7, 11, 1, 6, 3, 4, 6])
def psi(theta):
return y - theta
def custom_solver(stacked_equations):
options = {"maxiter": 1000}
opt = root(stacked_equations, x0=np.asarray([0, ]),
method='lm', tol=1e-9, options=options)
return opt.x
mestimator = MEstimator(psi, init=[0, ])
with pytest.raises(TypeError, match="The user-specified root-finding `solver` must be a function"):
mestimator.estimate(solver=custom_solver)
def test_error_rootfinder3(self):
"""Check that user-specified solver returns something besides None
"""
# Data set
y = np.array([5, 1, 2, 4, 2, 4, 5, 7, 11, 1, 6, 3, 4, 6])
def psi(theta):
return y - theta
def custom_solver(stacked_equations, init):
options = {"maxiter": 1000}
opt = root(stacked_equations, x0=np.asarray(init),
method='lm', tol=1e-9, options=options)
mestimator = MEstimator(psi, init=[0, ])
with pytest.raises(ValueError, match="must return the solution to the"):
mestimator.estimate(solver=custom_solver)
def test_mean_variance_1eq(self):
"""Tests the mean / variance with a single estimating equation.
"""
# Data set
y = np.array([5, 1, 2, 4, 2, 4, 5, 7, 11, 1, 6, 3, 4, 6])
def psi(theta):
return y - theta
mestimator = MEstimator(psi, init=[0, ])
mestimator.estimate()
# Checking mean estimate
npt.assert_allclose(mestimator.theta,
np.mean(y),
atol=1e-6)
# Checking variance estimates
npt.assert_allclose(mestimator.asymptotic_variance,
np.var(y, ddof=0),
atol=1e-6)
def test_mean_variance_1eq_lm_solver(self):
"""Tests the mean / variance with a single estimating equation.
"""
# Data set
y = np.array([5, 1, 2, 4, 2, 4, 5, 7, 11, 1, 6, 3, 4, 6])
def psi(theta):
return y - theta
mestimator = MEstimator(psi, init=[0, ])
mestimator.estimate(solver='lm')
# Checking mean estimate
npt.assert_allclose(mestimator.theta,
np.mean(y),
atol=1e-6)
# Checking variance estimates
npt.assert_allclose(mestimator.asymptotic_variance,
np.var(y, ddof=0),
atol=1e-6)
def test_mean_variance_2eq(self):
"""Tests the mean / variance with two estimating equations.
"""
# Data set
y = np.array([5, 1, 2, 4, 2, 4, 5, 7, 11, 1, 6, 3, 4, 6])
def psi(theta):
return y - theta[0], (y - theta[0]) ** 2 - theta[1]
mestimator = MEstimator(psi, init=[0, 0])
mestimator.estimate()
# Checking mean estimate
npt.assert_allclose(mestimator.theta[0],
np.mean(y),
atol=1e-6)
# Checking variance estimates
npt.assert_allclose(mestimator.theta[1],
mestimator.asymptotic_variance[0][0],
atol=1e-6)
npt.assert_allclose(mestimator.theta[1],
np.var(y, ddof=0),
atol=1e-6)
def test_mean_variance_2eq_lm_solver(self):
"""Tests the mean / variance with two estimating equations.
"""
# Data set
y = np.array([5, 1, 2, 4, 2, 4, 5, 7, 11, 1, 6, 3, 4, 6])
def psi(theta):
return y - theta[0], (y - theta[0]) ** 2 - theta[1]
mestimator = MEstimator(psi, init=[0, 0])
mestimator.estimate(solver='lm')
# Checking mean estimate
npt.assert_allclose(mestimator.theta[0],
np.mean(y),
atol=1e-6)
# Checking variance estimates
npt.assert_allclose(mestimator.theta[1],
mestimator.asymptotic_variance[0][0],
atol=1e-6)
npt.assert_allclose(mestimator.theta[1],
np.var(y, ddof=0),
atol=1e-6)
def test_ratio_estimator(self):
"""Tests the ratio with a single estimating equation.
"""
# Data sets
data = pd.DataFrame()
data['Y'] = np.array([5, 1, 2, 4, 2, 4, 5, 7, 11, 1, 6, 3, 4, 6])
data['X'] = np.array([1, 5, 3, 5, 1, 4, 1, 2, 5, 1, 2, 12, 1, 8])
def psi(theta):
return data['Y'] - data['X']*theta
mestimator = MEstimator(psi, init=[0, ])
mestimator.estimate()
# Closed form solutions from SB
theta = np.mean(data['Y']) / np.mean(data['X'])
var = (1 / np.mean(data['X']) ** 2) * np.mean((data['Y'] - theta * data['X']) ** 2)
# Checking mean estimate
npt.assert_allclose(mestimator.theta,
theta,
atol=1e-6)
# Checking variance estimates
npt.assert_allclose(mestimator.asymptotic_variance,
var,
atol=1e-6)
def test_alt_ratio_estimator(self):
"""Tests the alternative ratio with three estimating equations.
"""
# Data sets
data = pd.DataFrame()
data['Y'] = np.array([5, 1, 2, 4, 2, 4, 5, 7, 11, 1, 6, 3, 4, 6])
data['X'] = np.array([1, 5, 3, 5, 1, 4, 1, 2, 5, 1, 2, 12, 1, 8])
data['C'] = 1
def psi(theta):
return (data['Y'] - theta[0],
data['X'] - theta[1],
data['C'] * theta[0] - theta[1] * theta[2])
mestimator = MEstimator(psi, init=[0, 0, 0])
mestimator.estimate()
# Closed form solutions from SB
theta = np.mean(data['Y']) / np.mean(data['X'])
var = (1 / np.mean(data['X']) ** 2) * np.mean((data['Y'] - theta * data['X']) ** 2)
# Checking mean estimate
npt.assert_allclose(mestimator.theta[-1],
theta,
atol=1e-6)
# Checking variance estimates
npt.assert_allclose(mestimator.asymptotic_variance[-1][-1],
var,
atol=1e-5)
def test_alt_ratio_estimator_lm_solver(self):
"""Tests the alternative ratio with three estimating equations.
"""
# Data sets
data = pd.DataFrame()
data['Y'] = np.array([5, 1, 2, 4, 2, 4, 5, 7, 11, 1, 6, 3, 4, 6])
data['X'] = np.array([1, 5, 3, 5, 1, 4, 1, 2, 5, 1, 2, 12, 1, 8])
data['C'] = 1
def psi(theta):
return (data['Y'] - theta[0],
data['X'] - theta[1],
data['C'] * theta[0] - theta[1] * theta[2])
mestimator = MEstimator(psi, init=[0, 0, 0])
mestimator.estimate(solver='lm')
# Closed form solutions from SB
theta = np.mean(data['Y']) / np.mean(data['X'])
var = (1 / np.mean(data['X']) ** 2) * np.mean((data['Y'] - theta * data['X']) ** 2)
# Checking mean estimate
npt.assert_allclose(mestimator.theta[-1],
theta,
atol=1e-6)
# Checking variance estimates
npt.assert_allclose(mestimator.asymptotic_variance[-1][-1],
var,
atol=1e-5)
def test_ols(self):
"""Tests linear regression by-hand with a single estimating equation.
"""
n = 500
data = | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime
import logging
import reader.cache
import hashlib
import dateutil.parser
from pandas import DataFrame, NaT
from clubhouse import ClubhouseClient
class Clubhouse:
def __init__(self, clubhouse_config: dict, workflow: dict) -> None:
super().__init__()
self.clubhouse_config = clubhouse_config
self.project_id = clubhouse_config["project_id"]
self.workflow = workflow
def cache_name(self):
api_key = self.clubhouse_config["api_key"]
workflow = str(self.workflow)
name_hashed = hashlib.md5(
(api_key + self.project_id + workflow).encode("utf-8")
)
return name_hashed.hexdigest()
self.cache = reader.cache.Cache(cache_name(self))
def get_clubhouse_instance(self) -> ClubhouseClient:
clubhouse = ClubhouseClient(self.clubhouse_config["api_key"])
return clubhouse
def get_story_data(self, story):
logging.debug("Reading data for story %s", str(story["id"]))
story_data = {
"Key": story["id"],
"Type": story["story_type"],
"Story Points": story["estimate"],
"Creator": NaT,
"Created": dateutil.parser.parse(story["created_at"]).replace(tzinfo=None),
"Started": (
dateutil.parser.parse(story["started_at"]).replace(tzinfo=None)
if story["started"]
else NaT
),
"Done": (
dateutil.parser.parse(story["completed_at"]).replace(tzinfo=None)
if story["completed"]
else NaT
),
}
return story_data
def get_stories(self):
logging.debug("Getting stories")
clubhouse = self.get_clubhouse_instance()
stories = clubhouse.get(f"projects/{self.project_id}/stories")
return stories
def get_data(self) -> DataFrame:
logging.debug("Getting stories from Clubhouse.io")
if self.clubhouse_config["cache"] and self.cache.is_valid():
logging.debug("Getting clubhouse.io data from cache")
df_story_data = self.cache.read()
return df_story_data
stories = self.get_stories()
stories_data = [self.get_story_data(story) for story in stories]
df_stories_data = | DataFrame(stories_data) | pandas.DataFrame |
Subsets and Splits