prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from scipy.stats import pearsonr
pd.options.display.max_colwidth = 540 # show more characters when printing tables
pd.options.display.max_rows = 50
# copy the following in any new notebook: %matplotlib inline
def corr(data, significance=False, decimals=3):
'''Generates a correlation matrix with p values and sample size, just like SPSS.
Args:
data (pandas.DataFrame): Data for calculating correlations.
significance (bool): Determines whether to include asterisks in correlations.
decimals (int): Used to round values.
Returns:
pandas.DataFrame: SPSS-like correlation matrix.
'''
# (adapted from https://stackoverflow.com/questions/25571882/pandas-columns-correlation-with-statistical-significance)
# generate matrices (r, p vals, sample size)
rs = data.corr(method=lambda x, y: pearsonr(x,y)[0]).round(decimals)
pvals = data.corr(method=lambda x, y: pearsonr(x,y)[1]).round(decimals)
ns = data.corr(method=lambda x, y: len(x)).replace(1, np.nan).round(decimals) # sample size = 1 would be misleading
if(significance):
p = pvals.applymap(lambda x: ''.join(['*' for t in [0.001, 0.01, 0.05] if x <= t])) # matrix with asterisks
rs = rs.astype(str) + p
# (adapted from https://stackoverflow.com/questions/58282538/merging-pandas-dataframes-alternating-rows-without-soritng-rows)
# create new index level enumerating the number of columns
s1 = rs.assign(_col = np.arange(len(rs))).set_index('_col', append=True)
s2 = pvals.assign(_col = np.arange(len(pvals))).set_index('_col', append=True)
s3 = ns.assign(_col = np.arange(len(ns))).set_index('_col', append=True)
# merge these matrices
corr_matrix = (pd.concat([s1, s2, s3], keys=('Pearson\'s r','p value', 'Sample size')) # new index with each indicator
.sort_index(kind='merge', level=2) # sort index, so names in previous line are important
.reset_index(level=2, drop=True) # drop _col index
.swaplevel(0,1)) # invert index levels
return corr_matrix
def pie(values, labels=None, title='', slices=None, percent_only=False, explode=True, color='white'):
'''Display a pie plot.
Args:
values (list): list or pandas.Series of unique values. Using value_counts() is highly recommended.
labels (list): if None, indices of values will be used.
title (str): header for the plot.
slices (int): set a number of slices, to avoid clutter.
percent_only (bool): if False it will show count and percent.
explode (bool): slightly separate the slice with the highest value.
color (str): text color.
'''
if isinstance(values, list):
values = | pd.Series(values) | pandas.Series |
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
import math
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day
from qteasy.utilfuncs import next_market_trade_day, unify, mask_to_signal, list_or_slice, labels_to_dict
from qteasy.utilfuncs import weekday_name, prev_market_trade_day, is_number_like, list_truncate, input_to_list
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator, TimingDMA, TimingMACD, TimingCDL, TimingTRIX
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic, stock_company
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.history import stack_dataframes, dataframe_to_hp, HistoryPanel
from qteasy.database import DataSource
from qteasy.strategy import Strategy, SimpleTiming, RollingTiming, SimpleSelecting, FactoralSelecting
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
from qteasy.blender import _exp_to_token, blender_parser, signal_blend
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000., 20000., 10000.])
self.op = np.array([0., 1., -0.33333333])
self.amounts_to_sell = np.array([0., 0., -3333.3333])
self.cash_to_spend = np.array([0., 20000., 0.])
self.prices = np.array([10., 20., 10.])
self.r = qt.Cost(0.0)
def test_rate_creation(self):
"""测试对象生成"""
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
self.assertEqual(self.r.buy_fix, 0)
self.assertEqual(self.r.sell_fix, 0)
def test_rate_operations(self):
"""测试交易费率对象"""
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r.calculate(self.amounts),
[0.003, 0.003, 0.003]),
True,
'fee calculation wrong')
def test_rate_fee(self):
"""测试买卖交易费率"""
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 0.
self.r.sell_min = 0.
self.r.slipage = 0.
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1.))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
"""测试最低交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""测试最低交易费用对其他交易费率参数的影响"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
"""测试固定交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
"""测试交易滑点"""
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
Timestamp('2031-12-29'),
Timestamp('2032-12-29'),
Timestamp('2033-12-29'),
Timestamp('2034-12-29'),
Timestamp('2035-12-29'),
Timestamp('2036-12-29'),
Timestamp('2037-12-29'),
Timestamp('2038-12-29'),
Timestamp('2039-12-29'),
Timestamp('2040-12-29'),
Timestamp('2041-12-29'),
Timestamp('2042-12-29')])
self.assertEqual(cp_mul_float2.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31')])
self.assertEqual(cp_mul_float2.amounts, [20000.0,
22000.0,
24000.0,
26000.0,
28000.0,
30000.0,
32000.0,
34000.0,
36000.0,
38000.0,
40000.0,
42000.0])
class TestPool(unittest.TestCase):
def setUp(self):
self.p = ResultPool(5)
self.items = ['first', 'second', (1, 2, 3), 'this', 24]
self.perfs = [1, 2, 3, 4, 5]
self.additional_result1 = ('abc', 12)
self.additional_result2 = ([1, 2], -1)
self.additional_result3 = (12, 5)
def test_create(self):
self.assertIsInstance(self.p, ResultPool)
def test_operation(self):
self.p.in_pool(self.additional_result1[0], self.additional_result1[1])
self.p.cut()
self.assertEqual(self.p.item_count, 1)
self.assertEqual(self.p.items, ['abc'])
for item, perf in zip(self.items, self.perfs):
self.p.in_pool(item, perf)
self.assertEqual(self.p.item_count, 6)
self.assertEqual(self.p.items, ['abc', 'first', 'second', (1, 2, 3), 'this', 24])
self.p.cut()
self.assertEqual(self.p.items, ['second', (1, 2, 3), 'this', 24, 'abc'])
self.assertEqual(self.p.perfs, [2, 3, 4, 5, 12])
self.p.in_pool(self.additional_result2[0], self.additional_result2[1])
self.p.in_pool(self.additional_result3[0], self.additional_result3[1])
self.assertEqual(self.p.item_count, 7)
self.p.cut(keep_largest=False)
self.assertEqual(self.p.items, [[1, 2], 'second', (1, 2, 3), 'this', 24])
self.assertEqual(self.p.perfs, [-1, 2, 3, 4, 5])
class TestCoreSubFuncs(unittest.TestCase):
"""Test all functions in core.py"""
def setUp(self):
pass
def test_input_to_list(self):
print('Testing input_to_list() function')
input_str = 'first'
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 3), ['first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 4), ['first', 'first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 2, None), ['first', 'first'])
input_list = ['first', 'second']
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 3), ['first', 'second', None])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 4, 'padder'), ['first', 'second', 'padder', 'padder'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 1), ['first', 'second'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, -5), ['first', 'second'])
def test_point_in_space(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
p2 = (-1, 3, 10)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
self.assertFalse(p2 in sp)
print(f'point {p2} is not in space {sp}')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)], 'conti, conti, enum')
p1 = (5.5, 3.2, 8)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
def test_space_in_space(self):
print('test if a space is in another space')
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
sp2 = Space([(0., 10.), (0., 10.), (0., 10.)])
self.assertTrue(sp2 in sp)
self.assertTrue(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is in space {sp2}\n'
f'they are equal to each other\n')
sp2 = Space([(0, 5.), (2, 7.), (3., 9.)])
self.assertTrue(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'{sp2} is a sub space of {sp}\n')
sp2 = Space([(0, 5), (2, 7), (3., 9)])
self.assertFalse(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)])
self.assertFalse(sp in sp2)
self.assertFalse(sp2 in sp)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
def test_space_around_centre(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
ssp = space_around_centre(space=sp, centre=p1, radius=1.2)
print(ssp.boes)
print('\ntest multiple diameters:')
self.assertEqual(ssp.boes, [(4.3, 6.7), (2.0, 4.4), (5.8, 8.2)])
ssp = space_around_centre(space=sp, centre=p1, radius=[1, 2, 1])
print(ssp.boes)
self.assertEqual(ssp.boes, [(4.5, 6.5), (1.2000000000000002, 5.2), (6.0, 8.0)])
print('\ntest points on edge:')
p2 = (5.5, 3.2, 10)
ssp = space_around_centre(space=sp, centre=p1, radius=3.9)
print(ssp.boes)
self.assertEqual(ssp.boes, [(1.6, 9.4), (0.0, 7.1), (3.1, 10.0)])
print('\ntest enum spaces')
sp = Space([(0, 100), range(40, 3, -2)], 'discr, enum')
p1 = [34, 12]
ssp = space_around_centre(space=sp, centre=p1, radius=5, ignore_enums=False)
self.assertEqual(ssp.boes, [(29, 39), (22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(ssp.boes)
print('\ntest enum space and ignore enum axis')
ssp = space_around_centre(space=sp, centre=p1, radius=5)
self.assertEqual(ssp.boes, [(29, 39),
(40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(sp.boes)
def test_get_stock_pool(self):
print(f'start test building stock pool function\n')
share_basics = stock_basic(fields='ts_code,symbol,name,area,industry,market,list_date,exchange')
print(f'\nselect all stocks by area')
stock_pool = qt.get_stock_pool(area='上海')
print(f'{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "上海"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].eq('上海').all())
print(f'\nselect all stocks by multiple areas')
stock_pool = qt.get_stock_pool(area='贵州,北京,天津')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are in list of ["贵州", "北京", "天津"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['贵州',
'北京',
'天津']).all())
print(f'\nselect all stocks by area and industry')
stock_pool = qt.get_stock_pool(area='四川', industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "四川", and industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['四川']).all())
print(f'\nselect all stocks by industry')
stock_pool = qt.get_stock_pool(industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stocks industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
print(f'\nselect all stocks by market')
stock_pool = qt.get_stock_pool(market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
print(f'\nselect all stocks by market and list date')
stock_pool = qt.get_stock_pool(date='2000-01-01', market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板", and list date after "2000-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('2000-01-01').all())
print(f'\nselect all stocks by list date')
stock_pool = qt.get_stock_pool(date='1997-01-01')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all list date after "1997-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1997-01-01').all())
print(f'\nselect all stocks by exchange')
stock_pool = qt.get_stock_pool(exchange='SSE')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['exchange'].eq('SSE').all())
print(f'\nselect all stocks by industry, area and list date')
industry_list = ['银行', '全国地产', '互联网', '环境保护', '区域地产',
'酒店餐饮', '运输设备', '综合类', '建筑工程', '玻璃',
'家用电器', '文教休闲', '其他商业', '元器件', 'IT设备',
'其他建材', '汽车服务', '火力发电', '医药商业', '汽车配件',
'广告包装', '轻工机械', '新型电力', '多元金融', '饲料']
area_list = ['深圳', '北京', '吉林', '江苏', '辽宁', '广东',
'安徽', '四川', '浙江', '湖南', '河北', '新疆',
'山东', '河南', '山西', '江西', '青海', '湖北',
'内蒙', '海南', '重庆', '陕西', '福建', '广西',
'上海']
stock_pool = qt.get_stock_pool(date='19980101',
industry=industry_list,
area=area_list)
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1998-01-01').all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(industry_list).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(area_list).all())
self.assertRaises(KeyError, qt.get_stock_pool, industry=25)
self.assertRaises(KeyError, qt.get_stock_pool, share_name='000300.SH')
self.assertRaises(KeyError, qt.get_stock_pool, markets='SSE')
class TestEvaluations(unittest.TestCase):
"""Test all evaluation functions in core.py"""
# 以下手动计算结果在Excel文件中
def setUp(self):
"""用np.random生成测试用数据,使用cumsum()模拟股票走势"""
self.test_data1 = pd.DataFrame([5.34892759, 5.65768696, 5.79227076, 5.56266871, 5.88189632,
6.24795001, 5.92755558, 6.38748165, 6.31331899, 5.86001665,
5.61048472, 5.30696736, 5.40406792, 5.03180571, 5.37886353,
5.78608307, 6.26540339, 6.59348026, 6.90943801, 6.70911677,
6.33015954, 6.06697417, 5.9752499, 6.45786408, 6.95273763,
6.7691991, 6.70355481, 6.28048969, 6.61344541, 6.24620003,
6.47409983, 6.4522311, 6.8773094, 6.99727832, 6.59262674,
6.59014938, 6.63758237, 6.38331869, 6.09902105, 6.35390109,
6.51993567, 6.87244592, 6.83963485, 7.08797815, 6.88003144,
6.83657323, 6.97819483, 7.01600276, 7.12554256, 7.58941523,
7.61014457, 7.21224091, 7.48174399, 7.66490854, 7.51371968,
7.11586198, 6.97147399, 6.67453301, 6.2042138, 6.33967015,
6.22187938, 5.98426993, 6.37096079, 6.55897161, 6.26422645,
6.69363762, 7.12668015, 6.83232926, 7.30524081, 7.4262041,
7.54031383, 7.17545919, 7.20659257, 7.44886016, 7.37094393,
6.88011022, 7.08142491, 6.74992833, 6.5967097, 6.21336693,
6.35565105, 6.82347596, 6.44773408, 6.84538053, 6.47966466,
6.09699528, 5.63927014, 6.01081024, 6.20585303, 6.60528206,
7.01594726, 7.03684251, 6.76574977, 7.08740846, 6.65336462,
7.07126686, 6.80058956, 6.79241977, 6.47843472, 6.39245474],
columns=['value'])
self.test_data2 = pd.DataFrame([5.09276527, 4.83828592, 4.6000911, 4.63170487, 4.63566451,
4.50546921, 4.96390044, 4.64557907, 4.25787855, 3.76585551,
3.38826334, 3.76243422, 4.06365426, 3.87084726, 3.91400935,
4.13438822, 4.27064542, 4.56776104, 5.03800296, 5.31070529,
5.39902276, 5.21186286, 5.05683114, 4.68842046, 5.11895168,
5.27151571, 5.72294993, 6.09961056, 6.26569635, 6.48806151,
6.16058885, 6.2582459, 6.38934791, 6.57831057, 6.19508831,
5.70155153, 5.20435735, 5.36538825, 5.40450056, 5.2227697,
5.37828693, 5.53058991, 6.02996797, 5.76802181, 5.66166713,
6.07988994, 5.61794367, 5.63218151, 6.10728013, 6.0324168,
6.27164431, 6.27551239, 6.52329665, 7.00470007, 7.34163113,
7.33699083, 7.67661334, 8.09395749, 7.68086668, 7.58341161,
7.46219819, 7.58671899, 7.19348298, 7.40088323, 7.47562005,
7.93342043, 8.2286081, 8.3521632, 8.43590025, 8.34977395,
8.57563095, 8.81586328, 9.08738649, 9.01542031, 8.8653815,
9.21763111, 9.04233017, 8.59533999, 8.47590075, 8.70857222,
8.78890756, 8.92697606, 9.35743773, 9.68280866, 10.15622021,
10.55908549, 10.6337894, 10.55197128, 10.65435176, 10.54611045,
10.19432562, 10.48320884, 10.36176768, 10.03186854, 10.23656092,
10.0062843, 10.13669686, 10.30758958, 9.87904176, 10.05126375],
columns=['value'])
self.test_data3 = pd.DataFrame([5.02851874, 5.20700348, 5.02410709, 5.49836387, 5.06834371,
5.10956737, 5.15314979, 5.02256472, 5.09746382, 5.23909247,
4.93410336, 4.96316186, 5.40026682, 5.7353255, 5.53438319,
5.79092139, 5.67528173, 5.89840855, 5.75379463, 6.10855386,
5.77322365, 5.84538021, 5.6103973, 5.7518655, 5.49729695,
5.13610628, 5.30524121, 5.68093462, 5.73251319, 6.04420783,
6.26929843, 6.59610234, 6.09872345, 6.25475121, 6.72927396,
6.91395783, 7.00693283, 7.36217783, 7.71516676, 7.67580263,
7.62477511, 7.73600568, 7.53457914, 7.46170277, 7.83658014,
8.11481319, 8.03705544, 7.64948845, 7.52043731, 7.67247943,
7.46511982, 7.43541798, 7.58856517, 7.9392717, 8.25406287,
7.77031632, 8.03223447, 7.86799055, 7.57630999, 7.33230519,
7.22378732, 6.85972264, 7.17548456, 7.5387846, 7.2392632,
6.8455644, 6.59557185, 6.6496796, 6.73685623, 7.18598015,
7.13619128, 6.88060157, 7.1399681, 7.30308077, 6.94942434,
7.0247815, 7.37567798, 7.50080197, 7.59719284, 7.14520561,
7.29913484, 7.79551341, 8.15497781, 8.40456095, 8.86516528,
8.53042688, 8.94268762, 8.52048006, 8.80036284, 8.91602364,
9.19953385, 8.70828953, 8.24613093, 8.18770453, 7.79548389,
7.68627967, 7.23205036, 6.98302636, 7.06515819, 6.95068113],
columns=['value'])
self.test_data4 = pd.DataFrame([4.97926539, 5.44016005, 5.45122915, 5.74485615, 5.45600553,
5.44858945, 5.2435413, 5.47315161, 5.58464303, 5.36179749,
5.38236326, 5.29614981, 5.76523508, 5.75102892, 6.15316618,
6.03852528, 6.01442228, 5.70510182, 5.22748133, 5.46762379,
5.78926267, 5.8221362, 5.61236849, 5.30615725, 5.24200611,
5.41042642, 5.59940342, 5.28306781, 4.99451932, 5.08799266,
5.38865647, 5.58229139, 5.33492845, 5.48206276, 5.09721379,
5.39190493, 5.29965087, 5.0374415, 5.50798022, 5.43107577,
5.22759507, 4.991809, 5.43153084, 5.39966868, 5.59916352,
5.66412137, 6.00611838, 5.63564902, 5.66723484, 5.29863863,
4.91115153, 5.3749929, 5.75082334, 6.08308148, 6.58091182,
6.77848803, 7.19588758, 7.64862286, 7.99818347, 7.91824794,
8.30341071, 8.45984973, 7.98700002, 8.18924931, 8.60755649,
8.66233396, 8.91018407, 9.0782739, 9.33515448, 8.95870245,
8.98426422, 8.50340317, 8.64916085, 8.93592407, 8.63145745,
8.65322862, 8.39543204, 8.37969997, 8.23394504, 8.04062872,
7.91259763, 7.57252171, 7.72670114, 7.74486117, 8.06908188,
7.99166889, 7.92155906, 8.39956136, 8.80181323, 8.47464091,
8.06557064, 7.87145573, 8.0237959, 8.39481998, 8.68525692,
8.81185461, 8.98632237, 9.0989835, 8.89787405, 8.86508591],
columns=['value'])
self.test_data5 = pd.DataFrame([4.50258923, 4.35142568, 4.07459514, 3.87791297, 3.73715985,
3.98455684, 4.07587908, 4.00042472, 4.28276612, 4.01362051,
4.13713565, 4.49312372, 4.48633159, 4.4641207, 4.13444605,
3.79107217, 4.22941629, 4.56548511, 4.92472163, 5.27723158,
5.67409193, 6.00176917, 5.88889928, 5.55256103, 5.39308314,
5.2610492, 5.30738908, 5.22222408, 4.90332238, 4.57499908,
4.96097146, 4.81531011, 4.39115442, 4.63200662, 5.04588813,
4.67866025, 5.01705123, 4.83562258, 4.60381702, 4.66187576,
4.41292828, 4.86604507, 4.42280124, 4.07517294, 4.16317319,
4.10316596, 4.42913598, 4.06609666, 3.96725913, 4.15965746,
4.12379564, 4.04054068, 3.84342851, 3.45902867, 3.17649855,
3.09773586, 3.5502119, 3.66396995, 3.66306483, 3.29131401,
2.79558533, 2.88319542, 3.03671098, 3.44645857, 3.88167161,
3.57961874, 3.60180276, 3.96702102, 4.05429995, 4.40056979,
4.05653231, 3.59600456, 3.60792477, 4.09989922, 3.73503663,
4.01892626, 3.94597242, 3.81466605, 3.71417992, 3.93767156,
4.42806557, 4.06988106, 4.03713636, 4.34408673, 4.79810156,
5.18115011, 4.89798406, 5.3960077, 5.72504875, 5.61894017,
5.1958197, 4.85275896, 5.17550207, 4.71548987, 4.62408567,
4.55488535, 4.36532649, 4.26031979, 4.25225607, 4.58627048],
columns=['value'])
self.test_data6 = pd.DataFrame([5.08639513, 5.05761083, 4.76160923, 4.62166504, 4.62923183,
4.25070173, 4.13447513, 3.90890013, 3.76687608, 3.43342482,
3.67648224, 3.6274775, 3.9385404, 4.39771627, 4.03199346,
3.93265288, 3.50059789, 3.3851961, 3.29743973, 3.2544872,
2.93692949, 2.70893003, 2.55461976, 2.20922332, 2.29054475,
2.2144714, 2.03726827, 2.39007617, 2.29866155, 2.40607111,
2.40440444, 2.79374649, 2.66541922, 2.27018079, 2.08505127,
2.55478864, 2.22415625, 2.58517923, 2.58802256, 2.94870959,
2.69301739, 2.19991535, 2.69473146, 2.64704637, 2.62753542,
2.14240825, 2.38565154, 1.94592117, 2.32243877, 2.69337246,
2.51283854, 2.62484451, 2.15559054, 2.35410875, 2.31219177,
1.96018265, 2.34711266, 2.58083322, 2.40290041, 2.20439791,
2.31472425, 2.16228248, 2.16439749, 2.20080737, 1.73293206,
1.9264407, 2.25089861, 2.69269101, 2.59296687, 2.1420998,
1.67819153, 1.98419023, 2.14479494, 1.89055376, 1.96720648,
1.9916694, 2.37227761, 2.14446036, 2.34573903, 1.86162546,
2.1410721, 2.39204939, 2.52529064, 2.47079939, 2.9299031,
3.09452923, 2.93276708, 3.21731309, 3.06248964, 2.90413406,
2.67844632, 2.45621213, 2.41463398, 2.7373913, 3.14917045,
3.4033949, 3.82283446, 4.02285451, 3.7619638, 4.10346795],
columns=['value'])
self.test_data7 = pd.DataFrame([4.75233583, 4.47668283, 4.55894263, 4.61765848, 4.622892,
4.58941116, 4.32535872, 3.88112797, 3.47237806, 3.50898953,
3.82530406, 3.6718017, 3.78918195, 4.1800752, 4.01818557,
4.40822582, 4.65474654, 4.89287256, 4.40879274, 4.65505126,
4.36876403, 4.58418934, 4.75687172, 4.3689799, 4.16126498,
4.0203982, 3.77148242, 3.38198096, 3.07261764, 2.9014741,
2.5049543, 2.756105, 2.28779058, 2.16986991, 1.8415962,
1.83319008, 2.20898291, 2.00128981, 1.75747025, 1.26676663,
1.40316876, 1.11126484, 1.60376367, 1.22523829, 1.58816681,
1.49705679, 1.80244138, 1.55128293, 1.35339409, 1.50985759,
1.0808451, 1.05892796, 1.43414812, 1.43039101, 1.73631655,
1.43940867, 1.82864425, 1.71088265, 2.12015154, 2.45417128,
2.84777618, 2.7925612, 2.90975121, 3.25920745, 3.13801182,
3.52733677, 3.65468491, 3.69395211, 3.49862035, 3.24786017,
3.64463138, 4.00331929, 3.62509565, 3.78013949, 3.4174012,
3.76312271, 3.62054004, 3.67206716, 3.60596058, 3.38636199,
3.42580676, 3.32921095, 3.02976759, 3.28258676, 3.45760838,
3.24917528, 2.94618304, 2.86980011, 2.63191259, 2.39566759,
2.53159917, 2.96273967, 3.25626185, 2.97425402, 3.16412191,
3.58280763, 3.23257727, 3.62353556, 3.12806399, 2.92532313],
columns=['value'])
# 建立一个长度为 500 个数据点的测试数据, 用于测试数据点多于250个的情况下的评价过程
self.long_data = pd.DataFrame([9.879, 9.916, 10.109, 10.214, 10.361, 10.768, 10.594, 10.288,
10.082, 9.994, 10.125, 10.126, 10.384, 10.734, 10.4, 10.87,
11.338, 11.061, 11.415, 11.724, 12.077, 12.196, 12.064, 12.423,
12.19, 11.729, 11.677, 11.448, 11.485, 10.989, 11.242, 11.239,
11.113, 11.075, 11.471, 11.745, 11.754, 11.782, 12.079, 11.97,
12.178, 11.95, 12.438, 12.612, 12.804, 12.952, 12.612, 12.867,
12.832, 12.832, 13.015, 13.315, 13.249, 12.904, 12.776, 12.64,
12.543, 12.287, 12.225, 11.844, 11.985, 11.945, 11.542, 11.871,
12.245, 12.228, 12.362, 11.899, 11.962, 12.374, 12.816, 12.649,
12.252, 12.579, 12.3, 11.988, 12.177, 12.312, 12.744, 12.599,
12.524, 12.82, 12.67, 12.876, 12.986, 13.271, 13.606, 13.82,
14.161, 13.833, 13.831, 14.137, 13.705, 13.414, 13.037, 12.759,
12.642, 12.948, 13.297, 13.483, 13.836, 14.179, 13.709, 13.655,
13.198, 13.508, 13.953, 14.387, 14.043, 13.987, 13.561, 13.391,
12.923, 12.555, 12.503, 12.292, 11.877, 12.34, 12.141, 11.687,
11.992, 12.458, 12.131, 11.75, 11.739, 11.263, 11.762, 11.976,
11.578, 11.854, 12.136, 12.422, 12.311, 12.56, 12.879, 12.861,
12.973, 13.235, 13.53, 13.531, 13.137, 13.166, 13.31, 13.103,
13.007, 12.643, 12.69, 12.216, 12.385, 12.046, 12.321, 11.9,
11.772, 11.816, 11.871, 11.59, 11.518, 11.94, 11.803, 11.924,
12.183, 12.136, 12.361, 12.406, 11.932, 11.684, 11.292, 11.388,
11.874, 12.184, 12.002, 12.16, 11.741, 11.26, 11.123, 11.534,
11.777, 11.407, 11.275, 11.679, 11.62, 11.218, 11.235, 11.352,
11.366, 11.061, 10.661, 10.582, 10.899, 11.352, 11.792, 11.475,
11.263, 11.538, 11.183, 10.936, 11.399, 11.171, 11.214, 10.89,
10.728, 11.191, 11.646, 11.62, 11.195, 11.178, 11.18, 10.956,
11.205, 10.87, 11.098, 10.639, 10.487, 10.507, 10.92, 10.558,
10.119, 9.882, 9.573, 9.515, 9.845, 9.852, 9.495, 9.726,
10.116, 10.452, 10.77, 11.225, 10.92, 10.824, 11.096, 11.542,
11.06, 10.568, 10.585, 10.884, 10.401, 10.068, 9.964, 10.285,
10.239, 10.036, 10.417, 10.132, 9.839, 9.556, 9.084, 9.239,
9.304, 9.067, 8.587, 8.471, 8.007, 8.321, 8.55, 9.008,
9.138, 9.088, 9.434, 9.156, 9.65, 9.431, 9.654, 10.079,
10.411, 10.865, 10.51, 10.205, 10.519, 10.367, 10.855, 10.642,
10.298, 10.622, 10.173, 9.792, 9.995, 9.904, 9.771, 9.597,
9.506, 9.212, 9.688, 10.032, 9.723, 9.839, 9.918, 10.332,
10.236, 9.989, 10.192, 10.685, 10.908, 11.275, 11.72, 12.158,
12.045, 12.244, 12.333, 12.246, 12.552, 12.958, 13.11, 13.53,
13.123, 13.138, 13.57, 13.389, 13.511, 13.759, 13.698, 13.744,
13.467, 13.795, 13.665, 13.377, 13.423, 13.772, 13.295, 13.073,
12.718, 12.388, 12.399, 12.185, 11.941, 11.818, 11.465, 11.811,
12.163, 11.86, 11.935, 11.809, 12.145, 12.624, 12.768, 12.321,
12.277, 11.889, 12.11, 12.606, 12.943, 12.945, 13.112, 13.199,
13.664, 14.051, 14.189, 14.339, 14.611, 14.656, 15.112, 15.086,
15.263, 15.021, 15.346, 15.572, 15.607, 15.983, 16.151, 16.215,
16.096, 16.089, 16.32, 16.59, 16.657, 16.752, 16.583, 16.743,
16.373, 16.662, 16.243, 16.163, 16.491, 16.958, 16.977, 17.225,
17.637, 17.344, 17.684, 17.892, 18.036, 18.182, 17.803, 17.588,
17.101, 17.538, 17.124, 16.787, 17.167, 17.138, 16.955, 17.148,
17.135, 17.635, 17.718, 17.675, 17.622, 17.358, 17.754, 17.729,
17.576, 17.772, 18.239, 18.441, 18.729, 18.319, 18.608, 18.493,
18.069, 18.122, 18.314, 18.423, 18.709, 18.548, 18.384, 18.391,
17.988, 17.986, 17.653, 17.249, 17.298, 17.06, 17.36, 17.108,
17.348, 17.596, 17.46, 17.635, 17.275, 17.291, 16.933, 17.337,
17.231, 17.146, 17.148, 16.751, 16.891, 17.038, 16.735, 16.64,
16.231, 15.957, 15.977, 16.077, 16.054, 15.797, 15.67, 15.911,
16.077, 16.17, 15.722, 15.258, 14.877, 15.138, 15., 14.811,
14.698, 14.407, 14.583, 14.704, 15.153, 15.436, 15.634, 15.453,
15.877, 15.696, 15.563, 15.927, 16.255, 16.696, 16.266, 16.698,
16.365, 16.493, 16.973, 16.71, 16.327, 16.605, 16.486, 16.846,
16.935, 17.21, 17.389, 17.546, 17.773, 17.641, 17.485, 17.794,
17.354, 16.904, 16.675, 16.43, 16.898, 16.819, 16.921, 17.201,
17.617, 17.368, 17.864, 17.484],
columns=['value'])
self.long_bench = pd.DataFrame([9.7, 10.179, 10.321, 9.855, 9.936, 10.096, 10.331, 10.662,
10.59, 11.031, 11.154, 10.945, 10.625, 10.233, 10.284, 10.252,
10.221, 10.352, 10.444, 10.773, 10.904, 11.104, 10.797, 10.55,
10.943, 11.352, 11.641, 11.983, 11.696, 12.138, 12.365, 12.379,
11.969, 12.454, 12.947, 13.119, 13.013, 12.763, 12.632, 13.034,
12.681, 12.561, 12.938, 12.867, 13.202, 13.132, 13.539, 13.91,
13.456, 13.692, 13.771, 13.904, 14.069, 13.728, 13.97, 14.228,
13.84, 14.041, 13.963, 13.689, 13.543, 13.858, 14.118, 13.987,
13.611, 14.028, 14.229, 14.41, 14.74, 15.03, 14.915, 15.207,
15.354, 15.665, 15.877, 15.682, 15.625, 15.175, 15.105, 14.893,
14.86, 15.097, 15.178, 15.293, 15.238, 15., 15.283, 14.994,
14.907, 14.664, 14.888, 15.297, 15.313, 15.368, 14.956, 14.802,
14.506, 14.257, 14.619, 15.019, 15.049, 14.625, 14.894, 14.978,
15.434, 15.578, 16.038, 16.107, 16.277, 16.365, 16.204, 16.465,
16.401, 16.895, 17.057, 16.621, 16.225, 16.075, 15.863, 16.292,
16.551, 16.724, 16.817, 16.81, 17.192, 16.86, 16.745, 16.707,
16.552, 16.133, 16.301, 16.08, 15.81, 15.75, 15.909, 16.127,
16.457, 16.204, 16.329, 16.748, 16.624, 17.011, 16.548, 16.831,
16.653, 16.791, 16.57, 16.778, 16.928, 16.932, 17.22, 16.876,
17.301, 17.422, 17.689, 17.316, 17.547, 17.534, 17.409, 17.669,
17.416, 17.859, 17.477, 17.307, 17.245, 17.352, 17.851, 17.412,
17.144, 17.138, 17.085, 16.926, 16.674, 16.854, 17.064, 16.95,
16.609, 16.957, 16.498, 16.552, 16.175, 15.858, 15.697, 15.781,
15.583, 15.36, 15.558, 16.046, 15.968, 15.905, 16.358, 16.783,
17.048, 16.762, 17.224, 17.363, 17.246, 16.79, 16.608, 16.423,
15.991, 15.527, 15.147, 14.759, 14.792, 15.206, 15.148, 15.046,
15.429, 14.999, 15.407, 15.124, 14.72, 14.713, 15.022, 15.092,
14.982, 15.001, 14.734, 14.713, 14.841, 14.562, 15.005, 15.483,
15.472, 15.277, 15.503, 15.116, 15.12, 15.442, 15.476, 15.789,
15.36, 15.764, 16.218, 16.493, 16.642, 17.088, 16.816, 16.645,
16.336, 16.511, 16.2, 15.994, 15.86, 15.929, 16.316, 16.416,
16.746, 17.173, 17.531, 17.627, 17.407, 17.49, 17.768, 17.509,
17.795, 18.147, 18.63, 18.945, 19.021, 19.518, 19.6, 19.744,
19.63, 19.32, 18.933, 19.297, 19.598, 19.446, 19.236, 19.198,
19.144, 19.159, 19.065, 19.032, 18.586, 18.272, 18.119, 18.3,
17.894, 17.744, 17.5, 17.083, 17.092, 16.864, 16.453, 16.31,
16.681, 16.342, 16.447, 16.715, 17.068, 17.067, 16.822, 16.673,
16.675, 16.592, 16.686, 16.397, 15.902, 15.597, 15.357, 15.162,
15.348, 15.603, 15.283, 15.257, 15.082, 14.621, 14.366, 14.039,
13.957, 14.141, 13.854, 14.243, 14.414, 14.033, 13.93, 14.104,
14.461, 14.249, 14.053, 14.165, 14.035, 14.408, 14.501, 14.019,
14.265, 14.67, 14.797, 14.42, 14.681, 15.16, 14.715, 14.292,
14.411, 14.656, 15.094, 15.366, 15.055, 15.198, 14.762, 14.294,
13.854, 13.811, 13.549, 13.927, 13.897, 13.421, 13.037, 13.32,
13.721, 13.511, 13.999, 13.529, 13.418, 13.881, 14.326, 14.362,
13.987, 14.015, 13.599, 13.343, 13.307, 13.689, 13.851, 13.404,
13.577, 13.395, 13.619, 13.195, 12.904, 12.553, 12.294, 12.649,
12.425, 11.967, 12.062, 11.71, 11.645, 12.058, 12.136, 11.749,
11.953, 12.401, 12.044, 11.901, 11.631, 11.396, 11.036, 11.244,
10.864, 11.207, 11.135, 11.39, 11.723, 12.084, 11.8, 11.471,
11.33, 11.504, 11.295, 11.3, 10.901, 10.494, 10.825, 11.054,
10.866, 10.713, 10.875, 10.846, 10.947, 11.422, 11.158, 10.94,
10.521, 10.36, 10.411, 10.792, 10.472, 10.305, 10.525, 10.853,
10.556, 10.72, 10.54, 10.583, 10.299, 10.061, 10.004, 9.903,
9.796, 9.472, 9.246, 9.54, 9.456, 9.177, 9.484, 9.557,
9.493, 9.968, 9.536, 9.39, 8.922, 8.423, 8.518, 8.686,
8.771, 9.098, 9.281, 8.858, 9.027, 8.553, 8.784, 8.996,
9.379, 9.846, 9.855, 9.502, 9.608, 9.761, 9.409, 9.4,
9.332, 9.34, 9.284, 8.844, 8.722, 8.376, 8.775, 8.293,
8.144, 8.63, 8.831, 8.957, 9.18, 9.601, 9.695, 10.018,
9.841, 9.743, 9.292, 8.85, 9.316, 9.288, 9.519, 9.738,
9.289, 9.785, 9.804, 10.06, 10.188, 10.095, 9.739, 9.881,
9.7, 9.991, 10.391, 10.002],
columns=['value'])
def test_performance_stats(self):
"""test the function performance_statistics()
"""
pass
def test_fv(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_fv(self.test_data1), 6.39245474)
self.assertAlmostEqual(eval_fv(self.test_data2), 10.05126375)
self.assertAlmostEqual(eval_fv(self.test_data3), 6.95068113)
self.assertAlmostEqual(eval_fv(self.test_data4), 8.86508591)
self.assertAlmostEqual(eval_fv(self.test_data5), 4.58627048)
self.assertAlmostEqual(eval_fv(self.test_data6), 4.10346795)
self.assertAlmostEqual(eval_fv(self.test_data7), 2.92532313)
self.assertAlmostEqual(eval_fv(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
def test_max_drawdown(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_max_drawdown(self.test_data1)[0], 0.264274308)
self.assertEqual(eval_max_drawdown(self.test_data1)[1], 53)
self.assertEqual(eval_max_drawdown(self.test_data1)[2], 86)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data1)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data2)[0], 0.334690849)
self.assertEqual(eval_max_drawdown(self.test_data2)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data2)[2], 10)
self.assertEqual(eval_max_drawdown(self.test_data2)[3], 19)
self.assertAlmostEqual(eval_max_drawdown(self.test_data3)[0], 0.244452899)
self.assertEqual(eval_max_drawdown(self.test_data3)[1], 90)
self.assertEqual(eval_max_drawdown(self.test_data3)[2], 99)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data3)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data4)[0], 0.201849684)
self.assertEqual(eval_max_drawdown(self.test_data4)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4)[2], 50)
self.assertEqual(eval_max_drawdown(self.test_data4)[3], 54)
self.assertAlmostEqual(eval_max_drawdown(self.test_data5)[0], 0.534206456)
self.assertEqual(eval_max_drawdown(self.test_data5)[1], 21)
self.assertEqual(eval_max_drawdown(self.test_data5)[2], 60)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data5)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data6)[0], 0.670062689)
self.assertEqual(eval_max_drawdown(self.test_data6)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data6)[2], 70)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data6)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data7)[0], 0.783577449)
self.assertEqual(eval_max_drawdown(self.test_data7)[1], 17)
self.assertEqual(eval_max_drawdown(self.test_data7)[2], 51)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data7)[3]))
self.assertEqual(eval_max_drawdown(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
# test max drawdown == 0:
# TODO: investigate: how does divide by zero change?
self.assertAlmostEqual(eval_max_drawdown(self.test_data4 - 5)[0], 1.0770474121951792)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[2], 50)
def test_info_ratio(self):
reference = self.test_data1
self.assertAlmostEqual(eval_info_ratio(self.test_data2, reference, 'value'), 0.075553316)
self.assertAlmostEqual(eval_info_ratio(self.test_data3, reference, 'value'), 0.018949457)
self.assertAlmostEqual(eval_info_ratio(self.test_data4, reference, 'value'), 0.056328143)
self.assertAlmostEqual(eval_info_ratio(self.test_data5, reference, 'value'), -0.004270068)
self.assertAlmostEqual(eval_info_ratio(self.test_data6, reference, 'value'), 0.009198027)
self.assertAlmostEqual(eval_info_ratio(self.test_data7, reference, 'value'), -0.000890283)
def test_volatility(self):
self.assertAlmostEqual(eval_volatility(self.test_data1), 0.748646166)
self.assertAlmostEqual(eval_volatility(self.test_data2), 0.75527442)
self.assertAlmostEqual(eval_volatility(self.test_data3), 0.654188853)
self.assertAlmostEqual(eval_volatility(self.test_data4), 0.688375814)
self.assertAlmostEqual(eval_volatility(self.test_data5), 1.089989522)
self.assertAlmostEqual(eval_volatility(self.test_data6), 1.775419308)
self.assertAlmostEqual(eval_volatility(self.test_data7), 1.962758406)
self.assertAlmostEqual(eval_volatility(self.test_data1, logarithm=False), 0.750993311)
self.assertAlmostEqual(eval_volatility(self.test_data2, logarithm=False), 0.75571473)
self.assertAlmostEqual(eval_volatility(self.test_data3, logarithm=False), 0.655331424)
self.assertAlmostEqual(eval_volatility(self.test_data4, logarithm=False), 0.692683021)
self.assertAlmostEqual(eval_volatility(self.test_data5, logarithm=False), 1.09602969)
self.assertAlmostEqual(eval_volatility(self.test_data6, logarithm=False), 1.774789504)
self.assertAlmostEqual(eval_volatility(self.test_data7, logarithm=False), 2.003329156)
self.assertEqual(eval_volatility(pd.DataFrame()), -np.inf)
self.assertRaises(AssertionError, eval_volatility, [1, 2, 3])
# 测试长数据的Volatility计算
expected_volatility = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
0.39955371, 0.39974258, 0.40309866, 0.40486593, 0.4055514,
0.40710639, 0.40708157, 0.40609006, 0.4073625, 0.40835305,
0.41155304, 0.41218193, 0.41207489, 0.41300276, 0.41308415,
0.41292392, 0.41207645, 0.41238397, 0.41229291, 0.41164056,
0.41316317, 0.41348842, 0.41462249, 0.41474574, 0.41652625,
0.41649176, 0.41701556, 0.4166593, 0.41684221, 0.41491689,
0.41435209, 0.41549087, 0.41849338, 0.41998049, 0.41959106,
0.41907311, 0.41916103, 0.42120773, 0.42052391, 0.42111225,
0.42124589, 0.42356445, 0.42214672, 0.42324022, 0.42476639,
0.42621689, 0.42549439, 0.42533678, 0.42539414, 0.42545038,
0.42593637, 0.42652095, 0.42665489, 0.42699563, 0.42798159,
0.42784512, 0.42898006, 0.42868781, 0.42874188, 0.42789631,
0.4277768, 0.42776827, 0.42685216, 0.42660989, 0.42563155,
0.42618281, 0.42606281, 0.42505222, 0.42653242, 0.42555378,
0.42500842, 0.42561939, 0.42442059, 0.42395414, 0.42384356,
0.42319135, 0.42397497, 0.42488579, 0.42449729, 0.42508766,
0.42509878, 0.42456616, 0.42535577, 0.42681884, 0.42688552,
0.42779918, 0.42706058, 0.42792887, 0.42762114, 0.42894045,
0.42977398, 0.42919859, 0.42829041, 0.42780946, 0.42825318,
0.42858952, 0.42858315, 0.42805601, 0.42764751, 0.42744107,
0.42775518, 0.42707283, 0.4258592, 0.42615335, 0.42526286,
0.4248906, 0.42368986, 0.4232565, 0.42265079, 0.42263954,
0.42153046, 0.42132051, 0.41995353, 0.41916605, 0.41914271,
0.41876945, 0.41740175, 0.41583884, 0.41614026, 0.41457908,
0.41472411, 0.41310876, 0.41261041, 0.41212369, 0.41211677,
0.4100645, 0.40852504, 0.40860297, 0.40745338, 0.40698661,
0.40644546, 0.40591375, 0.40640744, 0.40620663, 0.40656649,
0.40727154, 0.40797605, 0.40807137, 0.40808913, 0.40809676,
0.40711767, 0.40724628, 0.40713077, 0.40772698, 0.40765157,
0.40658297, 0.4065991, 0.405011, 0.40537645, 0.40432626,
0.40390177, 0.40237701, 0.40291623, 0.40301797, 0.40324145,
0.40312864, 0.40328316, 0.40190955, 0.40246506, 0.40237663,
0.40198407, 0.401969, 0.40185623, 0.40198313, 0.40005643,
0.39940743, 0.39850438, 0.39845398, 0.39695093, 0.39697295,
0.39663201, 0.39675444, 0.39538699, 0.39331959, 0.39326074,
0.39193287, 0.39157266, 0.39021327, 0.39062591, 0.38917591,
0.38976991, 0.38864187, 0.38872158, 0.38868096, 0.38868377,
0.38842057, 0.38654784, 0.38649517, 0.38600464, 0.38408115,
0.38323049, 0.38260215, 0.38207663, 0.38142669, 0.38003262,
0.37969367, 0.37768092, 0.37732108, 0.37741991, 0.37617779,
0.37698504, 0.37606784, 0.37499276, 0.37533731, 0.37350437,
0.37375172, 0.37385382, 0.37384003, 0.37338938, 0.37212288,
0.37273075, 0.370559, 0.37038506, 0.37062153, 0.36964661,
0.36818564, 0.3656634, 0.36539259, 0.36428672, 0.36502487,
0.3647148, 0.36551435, 0.36409919, 0.36348181, 0.36254383,
0.36166601, 0.36142665, 0.35954942, 0.35846915, 0.35886759,
0.35813867, 0.35642888, 0.35375231, 0.35061783, 0.35078463,
0.34995508, 0.34688918, 0.34548257, 0.34633158, 0.34622833,
0.34652111, 0.34622774, 0.34540951, 0.34418809, 0.34276593,
0.34160916, 0.33811193, 0.33822709, 0.3391685, 0.33883381])
test_volatility = eval_volatility(self.long_data)
test_volatility_roll = self.long_data['volatility'].values
self.assertAlmostEqual(test_volatility, np.nanmean(expected_volatility))
self.assertTrue(np.allclose(expected_volatility, test_volatility_roll, equal_nan=True))
def test_sharp(self):
self.assertAlmostEqual(eval_sharp(self.test_data1, 5, 0), 0.06135557)
self.assertAlmostEqual(eval_sharp(self.test_data2, 5, 0), 0.167858667)
self.assertAlmostEqual(eval_sharp(self.test_data3, 5, 0), 0.09950547)
self.assertAlmostEqual(eval_sharp(self.test_data4, 5, 0), 0.154928241)
self.assertAlmostEqual(eval_sharp(self.test_data5, 5, 0.002), 0.007868673)
self.assertAlmostEqual(eval_sharp(self.test_data6, 5, 0.002), 0.018306537)
self.assertAlmostEqual(eval_sharp(self.test_data7, 5, 0.002), 0.006259971)
# 测试长数据的sharp率计算
expected_sharp = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.02346815, -0.02618783, -0.03763912, -0.03296276, -0.03085698,
-0.02851101, -0.02375842, -0.02016746, -0.01107885, -0.01426613,
-0.00787204, -0.01135784, -0.01164232, -0.01003481, -0.00022512,
-0.00046792, -0.01209378, -0.01278892, -0.01298135, -0.01938214,
-0.01671044, -0.02120509, -0.0244281, -0.02416067, -0.02763238,
-0.027579, -0.02372774, -0.02215294, -0.02467094, -0.02091266,
-0.02590194, -0.03049876, -0.02077131, -0.01483653, -0.02488144,
-0.02671638, -0.02561547, -0.01957986, -0.02479803, -0.02703162,
-0.02658087, -0.01641755, -0.01946472, -0.01647757, -0.01280889,
-0.00893643, -0.00643275, -0.00698457, -0.00549962, -0.00654677,
-0.00494757, -0.0035633, -0.00109037, 0.00750654, 0.00451208,
0.00625502, 0.01221367, 0.01326454, 0.01535037, 0.02269538,
0.02028715, 0.02127712, 0.02333264, 0.02273159, 0.01670643,
0.01376513, 0.01265342, 0.02211647, 0.01612449, 0.00856706,
-0.00077147, -0.00268848, 0.00210993, -0.00443934, -0.00411912,
-0.0018756, -0.00867461, -0.00581601, -0.00660835, -0.00861137,
-0.00678614, -0.01188408, -0.00589617, -0.00244323, -0.00201891,
-0.01042846, -0.01471016, -0.02167034, -0.02258554, -0.01306809,
-0.00909086, -0.01233746, -0.00595166, -0.00184208, 0.00750497,
0.01481886, 0.01761972, 0.01562886, 0.01446414, 0.01285826,
0.01357719, 0.00967613, 0.01636272, 0.01458437, 0.02280183,
0.02151903, 0.01700276, 0.01597368, 0.02114336, 0.02233297,
0.02585631, 0.02768459, 0.03519235, 0.04204535, 0.04328161,
0.04672855, 0.05046191, 0.04619848, 0.04525853, 0.05381529,
0.04598861, 0.03947394, 0.04665006, 0.05586077, 0.05617728,
0.06495018, 0.06205172, 0.05665466, 0.06500615, 0.0632062,
0.06084328, 0.05851466, 0.05659229, 0.05159347, 0.0432977,
0.0474047, 0.04231723, 0.03613176, 0.03618391, 0.03591012,
0.03885674, 0.0402686, 0.03846423, 0.04534014, 0.04721458,
0.05130912, 0.05026281, 0.05394312, 0.05529349, 0.05949243,
0.05463304, 0.06195165, 0.06767606, 0.06880985, 0.07048996,
0.07078815, 0.07420767, 0.06773439, 0.0658441, 0.06470875,
0.06302349, 0.06456876, 0.06411282, 0.06216669, 0.067094,
0.07055075, 0.07254976, 0.07119253, 0.06173308, 0.05393352,
0.05681246, 0.05250643, 0.06099845, 0.0655544, 0.06977334,
0.06636514, 0.06177949, 0.06869908, 0.06719767, 0.06178738,
0.05915714, 0.06882277, 0.06756821, 0.06507994, 0.06489791,
0.06553941, 0.073123, 0.07576757, 0.06805446, 0.06063571,
0.05033801, 0.05206971, 0.05540306, 0.05249118, 0.05755587,
0.0586174, 0.05051288, 0.0564852, 0.05757284, 0.06358355,
0.06130082, 0.04925482, 0.03834472, 0.04163981, 0.04648316,
0.04457858, 0.04324626, 0.04328791, 0.04156207, 0.04818652,
0.04972634, 0.06024123, 0.06489556, 0.06255485, 0.06069815,
0.06466389, 0.07081163, 0.07895358, 0.0881782, 0.09374151,
0.08336506, 0.08764795, 0.09080174, 0.08808926, 0.08641158,
0.07811943, 0.06885318, 0.06479503, 0.06851185, 0.07382819,
0.07047903, 0.06658251, 0.07638379, 0.08667974, 0.08867918,
0.08245323, 0.08961866, 0.09905298, 0.0961908, 0.08562706,
0.0839014, 0.0849072, 0.08338395, 0.08783487, 0.09463609,
0.10332336, 0.11806497, 0.11220297, 0.11589097, 0.11678405])
test_sharp = eval_sharp(self.long_data, 5, 0.00035)
self.assertAlmostEqual(np.nanmean(expected_sharp), test_sharp)
self.assertTrue(np.allclose(self.long_data['sharp'].values, expected_sharp, equal_nan=True))
def test_beta(self):
reference = self.test_data1
self.assertAlmostEqual(eval_beta(self.test_data2, reference, 'value'), -0.017148939)
self.assertAlmostEqual(eval_beta(self.test_data3, reference, 'value'), -0.042204233)
self.assertAlmostEqual(eval_beta(self.test_data4, reference, 'value'), -0.15652986)
self.assertAlmostEqual(eval_beta(self.test_data5, reference, 'value'), -0.049195532)
self.assertAlmostEqual(eval_beta(self.test_data6, reference, 'value'), -0.026995082)
self.assertAlmostEqual(eval_beta(self.test_data7, reference, 'value'), -0.01147809)
self.assertRaises(TypeError, eval_beta, [1, 2, 3], reference, 'value')
self.assertRaises(TypeError, eval_beta, self.test_data3, [1, 2, 3], 'value')
self.assertRaises(KeyError, eval_beta, self.test_data3, reference, 'not_found_value')
# 测试长数据的beta计算
expected_beta = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.04988841, -0.05127618, -0.04692104, -0.04272652, -0.04080598,
-0.0493347, -0.0460858, -0.0416761, -0.03691527, -0.03724924,
-0.03678865, -0.03987324, -0.03488321, -0.02567672, -0.02690303,
-0.03010128, -0.02437967, -0.02571932, -0.02455681, -0.02839811,
-0.03358653, -0.03396697, -0.03466321, -0.03050966, -0.0247583,
-0.01629325, -0.01880895, -0.01480403, -0.01348783, -0.00544294,
-0.00648176, -0.00467036, -0.01135331, -0.0156841, -0.02340763,
-0.02615705, -0.02730771, -0.02906174, -0.02860664, -0.02412914,
-0.02066416, -0.01744816, -0.02185133, -0.02145285, -0.02681765,
-0.02827694, -0.02394581, -0.02744096, -0.02778825, -0.02703065,
-0.03160023, -0.03615371, -0.03681072, -0.04265126, -0.04344738,
-0.04232421, -0.04705272, -0.04533344, -0.04605934, -0.05272737,
-0.05156463, -0.05134196, -0.04730733, -0.04425352, -0.03869831,
-0.04159571, -0.04223998, -0.04346747, -0.04229844, -0.04740093,
-0.04992507, -0.04621232, -0.04477644, -0.0486915, -0.04598224,
-0.04943463, -0.05006391, -0.05362256, -0.04994067, -0.05464769,
-0.05443275, -0.05513493, -0.05173594, -0.04500994, -0.04662891,
-0.03903505, -0.0419592, -0.04307773, -0.03925718, -0.03711574,
-0.03992631, -0.0433058, -0.04533641, -0.0461183, -0.05600344,
-0.05758377, -0.05959874, -0.05605942, -0.06002859, -0.06253002,
-0.06747014, -0.06427915, -0.05931947, -0.05769974, -0.04791515,
-0.05175088, -0.05748039, -0.05385232, -0.05072975, -0.05052637,
-0.05125567, -0.05005785, -0.05325104, -0.04977727, -0.04947867,
-0.05148544, -0.05739156, -0.05742069, -0.06047279, -0.0558414,
-0.06086126, -0.06265151, -0.06411129, -0.06828052, -0.06781762,
-0.07083409, -0.07211207, -0.06799162, -0.06913295, -0.06775162,
-0.0696265, -0.06678248, -0.06867502, -0.06581961, -0.07055823,
-0.06448184, -0.06097973, -0.05795587, -0.0618383, -0.06130145,
-0.06050652, -0.05936661, -0.05749424, -0.0499, -0.05050495,
-0.04962687, -0.05033439, -0.05070116, -0.05422009, -0.05369759,
-0.05548943, -0.05907353, -0.05933035, -0.05927918, -0.06227663,
-0.06011455, -0.05650432, -0.05828134, -0.05620949, -0.05715323,
-0.05482478, -0.05387113, -0.05095559, -0.05377999, -0.05334267,
-0.05220438, -0.04001521, -0.03892434, -0.03660782, -0.04282708,
-0.04324623, -0.04127048, -0.04227559, -0.04275226, -0.04347049,
-0.04125853, -0.03806295, -0.0330632, -0.03155531, -0.03277152,
-0.03304518, -0.03878731, -0.03830672, -0.03727434, -0.0370571,
-0.04509224, -0.04207632, -0.04116198, -0.04545179, -0.04584584,
-0.05287341, -0.05417433, -0.05175836, -0.05005509, -0.04268674,
-0.03442321, -0.03457309, -0.03613426, -0.03524391, -0.03629479,
-0.04361312, -0.02626705, -0.02406115, -0.03046384, -0.03181044,
-0.03375164, -0.03661673, -0.04520779, -0.04926951, -0.05726738,
-0.0584486, -0.06220608, -0.06800563, -0.06797431, -0.07562211,
-0.07481996, -0.07731229, -0.08413381, -0.09031826, -0.09691925,
-0.11018071, -0.11952675, -0.10826026, -0.11173895, -0.10756359,
-0.10775916, -0.11664559, -0.10505051, -0.10606547, -0.09855355,
-0.10004159, -0.10857084, -0.12209301, -0.11605758, -0.11105113,
-0.1155195, -0.11569505, -0.10513348, -0.09611072, -0.10719791,
-0.10843965, -0.11025856, -0.10247839, -0.10554044, -0.10927647,
-0.10645088, -0.09982498, -0.10542734, -0.09631372, -0.08229695])
test_beta_mean = eval_beta(self.long_data, self.long_bench, 'value')
test_beta_roll = self.long_data['beta'].values
self.assertAlmostEqual(test_beta_mean, np.nanmean(expected_beta))
self.assertTrue(np.allclose(test_beta_roll, expected_beta, equal_nan=True))
def test_alpha(self):
reference = self.test_data1
self.assertAlmostEqual(eval_alpha(self.test_data2, 5, reference, 'value', 0.5), 11.63072977)
self.assertAlmostEqual(eval_alpha(self.test_data3, 5, reference, 'value', 0.5), 1.886590071)
self.assertAlmostEqual(eval_alpha(self.test_data4, 5, reference, 'value', 0.5), 6.827021872)
self.assertAlmostEqual(eval_alpha(self.test_data5, 5, reference, 'value', 0.92), -1.192265168)
self.assertAlmostEqual(eval_alpha(self.test_data6, 5, reference, 'value', 0.92), -1.437142359)
self.assertAlmostEqual(eval_alpha(self.test_data7, 5, reference, 'value', 0.92), -1.781311545)
# 测试长数据的alpha计算
expected_alpha = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.09418119, -0.11188463, -0.17938358, -0.15588172, -0.1462678,
-0.13089586, -0.10780125, -0.09102891, -0.03987585, -0.06075686,
-0.02459503, -0.04104284, -0.0444565, -0.04074585, 0.02191275,
0.02255955, -0.05583375, -0.05875539, -0.06055551, -0.09648245,
-0.07913737, -0.10627829, -0.12320965, -0.12368335, -0.1506743,
-0.15768033, -0.13638829, -0.13065298, -0.14537834, -0.127428,
-0.15504529, -0.18184636, -0.12652146, -0.09190138, -0.14847221,
-0.15840648, -0.1525789, -0.11859418, -0.14700954, -0.16295761,
-0.16051645, -0.10364859, -0.11961134, -0.10258267, -0.08090148,
-0.05727746, -0.0429945, -0.04672356, -0.03581408, -0.0439215,
-0.03429495, -0.0260362, -0.01075022, 0.04931808, 0.02779388,
0.03984083, 0.08311951, 0.08995566, 0.10522428, 0.16159058,
0.14238174, 0.14759783, 0.16257712, 0.158908, 0.11302115,
0.0909566, 0.08272888, 0.15261884, 0.10546376, 0.04990313,
-0.01284111, -0.02720704, 0.00454725, -0.03965491, -0.03818265,
-0.02186992, -0.06574751, -0.04846454, -0.05204211, -0.06316498,
-0.05095099, -0.08502656, -0.04681162, -0.02362027, -0.02205091,
-0.07706374, -0.10371841, -0.14434688, -0.14797935, -0.09055402,
-0.06739549, -0.08824959, -0.04855888, -0.02291244, 0.04027138,
0.09370505, 0.11472939, 0.10243593, 0.0921445, 0.07662648,
0.07946651, 0.05450718, 0.10497677, 0.09068334, 0.15462924,
0.14231034, 0.10544952, 0.09980256, 0.14035223, 0.14942974,
0.17624102, 0.19035477, 0.2500807, 0.30724652, 0.31768915,
0.35007521, 0.38412975, 0.34356521, 0.33614463, 0.41206165,
0.33999177, 0.28045963, 0.34076789, 0.42220356, 0.42314636,
0.50790423, 0.47713348, 0.42520169, 0.50488411, 0.48705211,
0.46252601, 0.44325578, 0.42640573, 0.37986783, 0.30652822,
0.34503393, 0.2999069, 0.24928617, 0.24730218, 0.24326897,
0.26657905, 0.27861168, 0.26392824, 0.32552649, 0.34177792,
0.37837011, 0.37025267, 0.4030612, 0.41339361, 0.45076809,
0.40383354, 0.47093422, 0.52505036, 0.53614256, 0.5500943,
0.55319293, 0.59021451, 0.52358459, 0.50605947, 0.49359168,
0.47895956, 0.49320243, 0.4908336, 0.47310767, 0.51821564,
0.55105932, 0.57291504, 0.5599809, 0.46868842, 0.39620087,
0.42086934, 0.38317217, 0.45934108, 0.50048866, 0.53941991,
0.50676751, 0.46500915, 0.52993663, 0.51668366, 0.46405428,
0.44100603, 0.52726147, 0.51565458, 0.49186248, 0.49001081,
0.49367648, 0.56422294, 0.58882785, 0.51334664, 0.44386256,
0.35056709, 0.36490029, 0.39205071, 0.3677061, 0.41134736,
0.42315067, 0.35356394, 0.40324562, 0.41340007, 0.46503322,
0.44355762, 0.34854314, 0.26412842, 0.28633753, 0.32335224,
0.30761141, 0.29709569, 0.29570487, 0.28000063, 0.32802547,
0.33967726, 0.42511212, 0.46252357, 0.44244974, 0.42152907,
0.45436727, 0.50482359, 0.57339198, 0.6573356, 0.70912003,
0.60328917, 0.6395092, 0.67015805, 0.64241557, 0.62779142,
0.55028063, 0.46448736, 0.43709245, 0.46777983, 0.51789439,
0.48594916, 0.4456216, 0.52008189, 0.60548684, 0.62792473,
0.56645031, 0.62766439, 0.71829315, 0.69481356, 0.59550329,
0.58133754, 0.59014148, 0.58026655, 0.61719273, 0.67373203,
0.75573056, 0.89501633, 0.8347253, 0.87964685, 0.89015835])
test_alpha_mean = eval_alpha(self.long_data, 100, self.long_bench, 'value')
test_alpha_roll = self.long_data['alpha'].values
self.assertAlmostEqual(test_alpha_mean, np.nanmean(expected_alpha))
self.assertTrue(np.allclose(test_alpha_roll, expected_alpha, equal_nan=True))
def test_calmar(self):
"""test evaluate function eval_calmar()"""
pass
def test_benchmark(self):
reference = self.test_data1
tr, yr = eval_benchmark(self.test_data2, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data3, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data4, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data5, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data6, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data7, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
def test_evaluate(self):
pass
class TestLoop(unittest.TestCase):
"""通过一个假设但精心设计的例子来测试loop_step以及loop方法的正确性"""
def setUp(self):
# 精心设计的模拟股票名称、交易日期、以及股票价格
self.shares = ['share1', 'share2', 'share3', 'share4', 'share5', 'share6', 'share7']
self.dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08',
'2016/09/09', '2016/09/12', '2016/09/13', '2016/09/14', '2016/09/15',
'2016/09/16', '2016/09/19', '2016/09/20', '2016/09/21', '2016/09/22',
'2016/09/23', '2016/09/26', '2016/09/27', '2016/09/28', '2016/09/29',
'2016/09/30', '2016/10/10', '2016/10/11', '2016/10/12', '2016/10/13',
'2016/10/14', '2016/10/17', '2016/10/18', '2016/10/19', '2016/10/20',
'2016/10/21', '2016/10/23', '2016/10/24', '2016/10/25', '2016/10/26',
'2016/10/27', '2016/10/29', '2016/10/30', '2016/10/31', '2016/11/01',
'2016/11/02', '2016/11/05', '2016/11/06', '2016/11/07', '2016/11/08',
'2016/11/09', '2016/11/12', '2016/11/13', '2016/11/14', '2016/11/15',
'2016/11/16', '2016/11/19', '2016/11/20', '2016/11/21', '2016/11/22']
self.dates = [pd.Timestamp(date_text) for date_text in self.dates]
self.prices = np.array([[5.35, 5.09, 5.03, 4.98, 4.50, 5.09, 4.75],
[5.66, 4.84, 5.21, 5.44, 4.35, 5.06, 4.48],
[5.79, 4.60, 5.02, 5.45, 4.07, 4.76, 4.56],
[5.56, 4.63, 5.50, 5.74, 3.88, 4.62, 4.62],
[5.88, 4.64, 5.07, 5.46, 3.74, 4.63, 4.62],
[6.25, 4.51, 5.11, 5.45, 3.98, 4.25, 4.59],
[5.93, 4.96, 5.15, 5.24, 4.08, 4.13, 4.33],
[6.39, 4.65, 5.02, 5.47, 4.00, 3.91, 3.88],
[6.31, 4.26, 5.10, 5.58, 4.28, 3.77, 3.47],
[5.86, 3.77, 5.24, 5.36, 4.01, 3.43, 3.51],
[5.61, 3.39, 4.93, 5.38, 4.14, 3.68, 3.83],
[5.31, 3.76, 4.96, 5.30, 4.49, 3.63, 3.67],
[5.40, 4.06, 5.40, 5.77, 4.49, 3.94, 3.79],
[5.03, 3.87, 5.74, 5.75, 4.46, 4.40, 4.18],
[5.38, 3.91, 5.53, 6.15, 4.13, 4.03, 4.02],
[5.79, 4.13, 5.79, 6.04, 3.79, 3.93, 4.41],
[6.27, 4.27, 5.68, 6.01, 4.23, 3.50, 4.65],
[6.59, 4.57, 5.90, 5.71, 4.57, 3.39, 4.89],
[6.91, 5.04, 5.75, 5.23, 4.92, 3.30, 4.41],
[6.71, 5.31, 6.11, 5.47, 5.28, 3.25, 4.66],
[6.33, 5.40, 5.77, 5.79, 5.67, 2.94, 4.37],
[6.07, 5.21, 5.85, 5.82, 6.00, 2.71, 4.58],
[5.98, 5.06, 5.61, 5.61, 5.89, 2.55, 4.76],
[6.46, 4.69, 5.75, 5.31, 5.55, 2.21, 4.37],
[6.95, 5.12, 5.50, 5.24, 5.39, 2.29, 4.16],
[6.77, 5.27, 5.14, 5.41, 5.26, 2.21, 4.02],
[6.70, 5.72, 5.31, 5.60, 5.31, 2.04, 3.77],
[6.28, 6.10, 5.68, 5.28, 5.22, 2.39, 3.38],
[6.61, 6.27, 5.73, 4.99, 4.90, 2.30, 3.07],
[6.25, 6.49, 6.04, 5.09, 4.57, 2.41, 2.90],
[6.47, 6.16, 6.27, 5.39, 4.96, 2.40, 2.50],
[6.45, 6.26, 6.60, 5.58, 4.82, 2.79, 2.76],
[6.88, 6.39, 6.10, 5.33, 4.39, 2.67, 2.29],
[7.00, 6.58, 6.25, 5.48, 4.63, 2.27, 2.17],
[6.59, 6.20, 6.73, 5.10, 5.05, 2.09, 1.84],
[6.59, 5.70, 6.91, 5.39, 4.68, 2.55, 1.83],
[6.64, 5.20, 7.01, 5.30, 5.02, 2.22, 2.21],
[6.38, 5.37, 7.36, 5.04, 4.84, 2.59, 2.00],
[6.10, 5.40, 7.72, 5.51, 4.60, 2.59, 1.76],
[6.35, 5.22, 7.68, 5.43, 4.66, 2.95, 1.27],
[6.52, 5.38, 7.62, 5.23, 4.41, 2.69, 1.40],
[6.87, 5.53, 7.74, 4.99, 4.87, 2.20, 1.11],
[6.84, 6.03, 7.53, 5.43, 4.42, 2.69, 1.60],
[7.09, 5.77, 7.46, 5.40, 4.08, 2.65, 1.23],
[6.88, 5.66, 7.84, 5.60, 4.16, 2.63, 1.59],
[6.84, 6.08, 8.11, 5.66, 4.10, 2.14, 1.50],
[6.98, 5.62, 8.04, 6.01, 4.43, 2.39, 1.80],
[7.02, 5.63, 7.65, 5.64, 4.07, 1.95, 1.55],
[7.13, 6.11, 7.52, 5.67, 3.97, 2.32, 1.35],
[7.59, 6.03, 7.67, 5.30, 4.16, 2.69, 1.51],
[7.61, 6.27, 7.47, 4.91, 4.12, 2.51, 1.08],
[7.21, 6.28, 7.44, 5.37, 4.04, 2.62, 1.06],
[7.48, 6.52, 7.59, 5.75, 3.84, 2.16, 1.43],
[7.66, 7.00, 7.94, 6.08, 3.46, 2.35, 1.43],
[7.51, 7.34, 8.25, 6.58, 3.18, 2.31, 1.74],
[7.12, 7.34, 7.77, 6.78, 3.10, 1.96, 1.44],
[6.97, 7.68, 8.03, 7.20, 3.55, 2.35, 1.83],
[6.67, 8.09, 7.87, 7.65, 3.66, 2.58, 1.71],
[6.20, 7.68, 7.58, 8.00, 3.66, 2.40, 2.12],
[6.34, 7.58, 7.33, 7.92, 3.29, 2.20, 2.45],
[6.22, 7.46, 7.22, 8.30, 2.80, 2.31, 2.85],
[5.98, 7.59, 6.86, 8.46, 2.88, 2.16, 2.79],
[6.37, 7.19, 7.18, 7.99, 3.04, 2.16, 2.91],
[6.56, 7.40, 7.54, 8.19, 3.45, 2.20, 3.26],
[6.26, 7.48, 7.24, 8.61, 3.88, 1.73, 3.14],
[6.69, 7.93, 6.85, 8.66, 3.58, 1.93, 3.53],
[7.13, 8.23, 6.60, 8.91, 3.60, 2.25, 3.65],
[6.83, 8.35, 6.65, 9.08, 3.97, 2.69, 3.69],
[7.31, 8.44, 6.74, 9.34, 4.05, 2.59, 3.50],
[7.43, 8.35, 7.19, 8.96, 4.40, 2.14, 3.25],
[7.54, 8.58, 7.14, 8.98, 4.06, 1.68, 3.64],
[7.18, 8.82, 6.88, 8.50, 3.60, 1.98, 4.00],
[7.21, 9.09, 7.14, 8.65, 3.61, 2.14, 3.63],
[7.45, 9.02, 7.30, 8.94, 4.10, 1.89, 3.78],
[7.37, 8.87, 6.95, 8.63, 3.74, 1.97, 3.42],
[6.88, 9.22, 7.02, 8.65, 4.02, 1.99, 3.76],
[7.08, 9.04, 7.38, 8.40, 3.95, 2.37, 3.62],
[6.75, 8.60, 7.50, 8.38, 3.81, 2.14, 3.67],
[6.60, 8.48, 7.60, 8.23, 3.71, 2.35, 3.61],
[6.21, 8.71, 7.15, 8.04, 3.94, 1.86, 3.39],
[6.36, 8.79, 7.30, 7.91, 4.43, 2.14, 3.43],
[6.82, 8.93, 7.80, 7.57, 4.07, 2.39, 3.33],
[6.45, 9.36, 8.15, 7.73, 4.04, 2.53, 3.03],
[6.85, 9.68, 8.40, 7.74, 4.34, 2.47, 3.28],
[6.48, 10.16, 8.87, 8.07, 4.80, 2.93, 3.46],
[6.10, 10.56, 8.53, 7.99, 5.18, 3.09, 3.25],
[5.64, 10.63, 8.94, 7.92, 4.90, 2.93, 2.95],
[6.01, 10.55, 8.52, 8.40, 5.40, 3.22, 2.87],
[6.21, 10.65, 8.80, 8.80, 5.73, 3.06, 2.63],
[6.61, 10.55, 8.92, 8.47, 5.62, 2.90, 2.40],
[7.02, 10.19, 9.20, 8.07, 5.20, 2.68, 2.53],
[7.04, 10.48, 8.71, 7.87, 4.85, 2.46, 2.96],
[6.77, 10.36, 8.25, 8.02, 5.18, 2.41, 3.26],
[7.09, 10.03, 8.19, 8.39, 4.72, 2.74, 2.97],
[6.65, 10.24, 7.80, 8.69, 4.62, 3.15, 3.16],
[7.07, 10.01, 7.69, 8.81, 4.55, 3.40, 3.58],
[6.80, 10.14, 7.23, 8.99, 4.37, 3.82, 3.23],
[6.79, 10.31, 6.98, 9.10, 4.26, 4.02, 3.62],
[6.48, 9.88, 7.07, 8.90, 4.25, 3.76, 3.13],
[6.39, 10.05, 6.95, 8.87, 4.59, 4.10, 2.93]])
# 精心设计的模拟PT持股仓位目标信号:
self.pt_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.000, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.250, 0.150, 0.000, 0.300, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.370, 0.193, 0.120, 0.072, 0.072, 0.072, 0.096],
[0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111],
[0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190],
[0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272],
[0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300]])
# 精心设计的模拟PS比例交易信号,与模拟PT信号高度相似
self.ps_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.100, 0.150],
[0.200, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.100, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, -0.750, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.333, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, -0.500, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, -1.000],
[0.000, 0.000, 0.000, 0.000, 0.200, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.500, 0.000, 0.000, 0.150, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.200, 0.000, -1.000, 0.200, 0.000],
[0.500, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.200, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, -0.500, 0.200],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.150, 0.000, 0.000],
[-1.000, 0.000, 0.000, 0.250, 0.000, 0.250, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.250, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, -1.000, 0.000, -1.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.800, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.100, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, -1.000, 0.000, 0.100],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, -1.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-1.000, 0.000, 0.150, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000]])
# 精心设计的模拟VS股票交易信号,与模拟PS信号类似
self.vs_signals = np.array([[000, 000, 000, 000, 500, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 300, 300],
[400, 400, 000, 000, 000, 000, 000],
[000, 000, 250, 000, 000, 000, 000],
[000, 000, 000, 000, -400, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, -200, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, -300],
[000, 000, 000, 000, 500, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 000, 300, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 400, 000, -300, 600, 000],
[500, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[600, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, -400, 600],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 500, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 300, 000, 000],
[-500, 000, 000, 500, 000, 200, 000],
[000, 000, 000, 000, 000, 000, 000],
[500, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, -700, 000, -600, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-400, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 300, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, -600, 000, 300],
[000, 000, 000, 000, 000, 000, 000],
[000, -300, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 700, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000]])
# 精心设计的模拟多价格交易信号,模拟50个交易日对三只股票的操作
self.multi_shares = ['000010', '000030', '000039']
self.multi_dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08']
self.multi_dates = [pd.Timestamp(date_text) for date_text in self.multi_dates]
# 操作的交易价格包括开盘价、最高价和收盘价
self.multi_prices_open = np.array([[10.02, 9.88, 7.26],
[10.00, 9.88, 7.00],
[9.98, 9.89, 6.88],
[9.97, 9.75, 6.91],
[9.99, 9.74, np.nan],
[10.01, 9.80, 6.81],
[10.04, 9.62, 6.63],
[10.06, 9.65, 6.45],
[10.06, 9.58, 6.16],
[10.11, 9.67, 6.24],
[10.11, 9.81, 5.96],
[10.07, 9.80, 5.97],
[10.06, 10.00, 5.96],
[10.09, 9.95, 6.20],
[10.03, 10.10, 6.35],
[10.02, 10.06, 6.11],
[10.06, 10.14, 6.37],
[10.08, 9.90, 5.58],
[9.99, 10.20, 5.65],
[10.00, 10.29, 5.65],
[10.03, 9.86, 5.19],
[10.02, 9.48, 5.42],
[10.06, 10.01, 6.30],
[10.03, 10.24, 6.15],
[9.97, 10.26, 6.05],
[9.94, 10.24, 5.89],
[9.83, 10.12, 5.22],
[9.78, 10.65, 5.20],
[9.77, 10.64, 5.07],
[9.91, 10.56, 6.04],
[9.92, 10.42, 6.12],
[9.97, 10.43, 5.85],
[9.91, 10.29, 5.67],
[9.90, 10.30, 6.02],
[9.88, 10.44, 6.04],
[9.91, 10.60, 7.07],
[9.63, 10.67, 7.64],
[9.64, 10.46, 7.99],
[9.57, 10.39, 7.59],
[9.55, 10.90, 8.73],
[9.58, 11.01, 8.72],
[9.61, 11.01, 8.97],
[9.62, np.nan, 8.58],
[9.55, np.nan, 8.71],
[9.57, 10.82, 8.77],
[9.61, 11.02, 8.40],
[9.63, 10.96, 7.95],
[9.64, 11.55, 7.76],
[9.61, 11.74, 8.25],
[9.56, 11.80, 7.51]])
self.multi_prices_high = np.array([[10.07, 9.91, 7.41],
[10.00, 10.04, 7.31],
[10.00, 9.93, 7.14],
[10.00, 10.04, 7.00],
[10.03, 9.84, np.nan],
[10.03, 9.88, 6.82],
[10.04, 9.99, 6.96],
[10.09, 9.70, 6.85],
[10.10, 9.67, 6.50],
[10.14, 9.71, 6.34],
[10.11, 9.85, 6.04],
[10.10, 9.90, 6.02],
[10.09, 10.00, 6.12],
[10.09, 10.20, 6.38],
[10.10, 10.11, 6.43],
[10.05, 10.18, 6.46],
[10.07, 10.21, 6.43],
[10.09, 10.26, 6.27],
[10.10, 10.38, 5.77],
[10.00, 10.47, 6.01],
[10.04, 10.42, 5.67],
[10.04, 10.07, 5.67],
[10.06, 10.24, 6.35],
[10.09, 10.27, 6.32],
[10.05, 10.38, 6.43],
[9.97, 10.43, 6.36],
[9.96, 10.39, 5.79],
[9.86, 10.65, 5.47],
[9.77, 10.84, 5.65],
[9.92, 10.65, 6.04],
[9.94, 10.73, 6.14],
[9.97, 10.63, 6.23],
[9.97, 10.51, 5.83],
[9.92, 10.35, 6.25],
[9.92, 10.46, 6.27],
[9.92, 10.63, 7.12],
[9.93, 10.74, 7.82],
[9.64, 10.76, 8.14],
[9.58, 10.54, 8.27],
[9.60, 11.02, 8.92],
[9.58, 11.12, 8.76],
[9.62, 11.17, 9.15],
[9.62, np.nan, 8.90],
[9.64, np.nan, 9.01],
[9.59, 10.92, 9.16],
[9.62, 11.15, 9.00],
[9.63, 11.11, 8.27],
[9.70, 11.55, 7.99],
[9.66, 11.95, 8.33],
[9.64, 11.93, 8.25]])
self.multi_prices_close = np.array([[10.04, 9.68, 6.64],
[10.00, 9.87, 7.26],
[10.00, 9.86, 7.03],
[9.99, 9.87, 6.87],
[9.97, 9.79, np.nan],
[9.99, 9.82, 6.64],
[10.03, 9.80, 6.85],
[10.03, 9.66, 6.70],
[10.06, 9.62, 6.39],
[10.06, 9.58, 6.22],
[10.11, 9.69, 5.92],
[10.09, 9.78, 5.91],
[10.07, 9.75, 6.11],
[10.06, 9.96, 5.91],
[10.09, 9.90, 6.23],
[10.03, 10.04, 6.28],
[10.03, 10.06, 6.28],
[10.06, 10.08, 6.27],
[10.08, 10.24, 5.70],
[10.00, 10.24, 5.56],
[9.99, 10.24, 5.67],
[10.03, 9.86, 5.16],
[10.03, 10.13, 5.69],
[10.06, 10.12, 6.32],
[10.03, 10.10, 6.14],
[9.97, 10.25, 6.25],
[9.94, 10.24, 5.79],
[9.83, 10.22, 5.26],
[9.77, 10.75, 5.05],
[9.84, 10.64, 5.45],
[9.91, 10.56, 6.06],
[9.93, 10.60, 6.21],
[9.96, 10.42, 5.69],
[9.91, 10.25, 5.46],
[9.91, 10.24, 6.02],
[9.88, 10.49, 6.69],
[9.91, 10.57, 7.43],
[9.64, 10.63, 7.72],
[9.56, 10.48, 8.16],
[9.57, 10.37, 7.83],
[9.55, 10.96, 8.70],
[9.57, 11.02, 8.71],
[9.61, np.nan, 8.88],
[9.61, np.nan, 8.54],
[9.55, 10.88, 8.87],
[9.57, 10.87, 8.87],
[9.63, 11.01, 8.18],
[9.64, 11.01, 7.80],
[9.65, 11.58, 7.97],
[9.62, 11.80, 8.25]])
# 交易信号包括三组,分别作用与开盘价、最高价和收盘价
# 此时的关键是股票交割期的处理,交割期不为0时,以交易日为单位交割
self.multi_signals = []
# multisignal的第一组信号为开盘价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.000, 0.000],
[0.000, -0.500, 0.000],
[0.000, -0.500, 0.000],
[0.000, 0.000, 0.000],
[0.150, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.300, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.300],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.350, 0.250],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.100, 0.000, 0.350],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.050, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第二组信号为最高价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.150, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, -0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.200],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第三组信号为收盘价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-0.500, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, -0.800],
[0.000, 0.000, 0.000],
[0.000, -1.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-0.750, 0.000, 0.000],
[0.000, 0.000, -0.850],
[0.000, 0.000, 0.000],
[0.000, -0.700, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, -1.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-1.000, 0.000, 0.000],
[0.000, -1.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 交易回测所需的价格也有三组,分别是开盘价、最高价和收盘价
self.multi_histories = []
# multisignal的第一组信号为开盘价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_open,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第二组信号为最高价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_high,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第三组信号为收盘价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_close,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 设置回测参数
self.cash = qt.CashPlan(['2016/07/01', '2016/08/12', '2016/09/23'], [10000, 10000, 10000])
self.rate = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=0,
sell_min=0,
slipage=0)
self.rate2 = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=10,
sell_min=5,
slipage=0)
self.pt_signal_hp = dataframe_to_hp(
pd.DataFrame(self.pt_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.ps_signal_hp = dataframe_to_hp(
pd.DataFrame(self.ps_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.vs_signal_hp = dataframe_to_hp(
pd.DataFrame(self.vs_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.multi_signal_hp = stack_dataframes(
self.multi_signals,
stack_along='htypes',
htypes='open, high, close'
)
self.history_list = dataframe_to_hp(
pd.DataFrame(self.prices, index=self.dates, columns=self.shares),
htypes='close'
)
self.multi_history_list = stack_dataframes(
self.multi_histories,
stack_along='htypes',
htypes='open, high, close'
)
# 模拟PT信号回测结果
# PT信号,先卖后买,交割期为0
self.pt_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614],
[101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21979.4972],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21880.9628],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21630.0454],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20968.0007],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21729.9339],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21107.6400],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21561.1745],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21553.0916],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22316.9366],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22084.2862],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21777.3543],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22756.8225],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22843.4697],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22762.1766],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22257.0973],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23136.5259],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 21813.7852],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22395.3204],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23717.6858],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22715.4263],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 22498.3254],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23341.1733],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24162.3941],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24847.1508],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23515.9755],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24555.8997],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24390.6372],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24073.3309],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24394.6500],
[2076.3314, 903.0334, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 3487.5655, 0.0000, 34904.8150],
[0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 34198.4475],
[0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 33753.0190],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 34953.8178],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 33230.2498],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35026.7819],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36976.2649],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38673.8147],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38717.3429],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36659.0854],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35877.9607],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36874.4840],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37010.2695],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 38062.3510],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36471.1357],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37534.9927],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37520.2569],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36747.7952],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36387.9409],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 35925.9715],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36950.7028],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37383.2463],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37761.2724],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 39548.2653],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41435.1291],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41651.6261],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41131.9920],
[644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 41286.4702],
[644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 40978.7259],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 40334.5453],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41387.9172],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42492.6707],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42953.7188],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42005.1092],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42017.9106],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 43750.2824],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41766.8679],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 42959.1150],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 41337.9320],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 40290.3688]])
# PT信号,先买后卖,交割期为0
self.pt_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614],
[101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270],
[797.1684, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 2703.5808, 0.0000, 21979.4972],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21700.7241],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21446.6630],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20795.3593],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21557.2924],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 20933.6887],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21392.5581],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21390.2918],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22147.7562],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21910.9053],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21594.2980],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22575.4380],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22655.8312],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22578.4365],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22073.2661],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22955.2367],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 21628.1647],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22203.4237],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 23516.2598],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22505.8428],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22199.1042],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23027.9302],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23848.5806],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24540.8871],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23205.6838],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24267.6685],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24115.3796],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23814.3667],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24133.6611],
[2061.6837, 896.6628, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 3285.8830, 0.0000, 34658.5742],
[0.0000, 896.6628, 507.6643, 466.6033, 0.0000, 1523.7106, 1467.7407, 12328.8684, 0.0000, 33950.7917],
[0.0000, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 4380.3797, 0.0000, 33711.4045],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 34922.0959],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 33237.1081],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35031.8071],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36976.3376],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38658.5245],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38712.2854],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36655.3125],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35904.3692],
[644.1423, 902.2617, 514.8253, 0.0000, 15.5990, 0.0000, 1467.7407, 14821.9004, 0.0000, 36873.9080],
[644.1423, 902.2617, 514.8253, 0.0000, 1220.8683, 0.0000, 1467.7407, 10470.8781, 0.0000, 36727.7895],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37719.9840],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36138.1277],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37204.0760],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37173.1201],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36398.2298],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36034.2178],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 35583.6399],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36599.2645],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37013.3408],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37367.7449],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 39143.8273],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41007.3074],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41225.4657],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 40685.9525],
[644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 1467.7407, 6592.6891, 0.0000, 40851.5435],
[644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 0.0000, 0.0000, 41082.1210],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 40385.0135],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 41455.1513],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42670.6769],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 43213.7233],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42205.2480],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42273.9386],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 44100.0777],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42059.7208],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 43344.9653],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 41621.0324],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 40528.0648]])
# PT信号,先卖后买,交割期为2天(股票)0天(现金)以便利用先卖的现金继续买入
self.pt_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661],
[101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027],
[797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21584.441],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21309.576],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20664.323],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21445.597],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 20806.458],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21288.441],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21294.365],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 22058.784],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21805.540],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21456.333],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22459.720],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22611.602],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22470.912],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21932.634],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22425.864],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21460.103],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22376.968],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23604.295],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22704.826],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22286.293],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23204.755],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24089.017],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24768.185],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23265.196],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24350.540],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24112.706],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23709.076],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24093.545],
[2060.275, 896.050, 504.579, 288.667, 0.000, 763.410, 1577.904, 2835.944, 0.000, 34634.888],
[578.327, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 732.036, 0.000, 33912.261],
[0.000, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 4415.981, 0.000, 33711.951],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 34951.433],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 33224.596],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35065.209],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 37018.699],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38706.035],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38724.569],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 36647.268],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35928.930],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36967.229],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37056.598],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 38129.862],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36489.333],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37599.602],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37566.823],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36799.280],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36431.196],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 35940.942],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36973.050],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37393.292],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37711.276],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 39515.991],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41404.440],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41573.523],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41011.613],
[644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 41160.181],
[644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 40815.512],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 40145.531],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41217.281],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42379.061],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42879.589],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41891.452],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41929.003],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 43718.052],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41685.916],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 42930.410],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 41242.589],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 40168.084]])
# PT信号,先买后卖,交割期为2天(股票)1天(现金)
self.pt_res_bs21 = np.array([
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661],
[101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027],
[797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497],
[797.168, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 2475.037, 0.000, 21584.441],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21266.406],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20623.683],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21404.957],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 20765.509],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21248.748],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21256.041],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 22018.958],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21764.725],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21413.241],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22417.021],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22567.685],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22427.699],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21889.359],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22381.938],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21416.358],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22332.786],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 0.000, 2386.698, 0.000, 23557.595],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 23336.992],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 22907.742],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24059.201],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24941.902],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25817.514],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24127.939],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25459.688],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25147.370],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25005.842],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 1086.639, 2752.004, 0.000, 25598.700],
[2138.154, 929.921, 503.586, 288.667, 0.000, 761.900, 1086.639, 4818.835, 0.000, 35944.098],
[661.356, 929.921, 503.586, 553.843, 0.000, 1954.237, 1086.639, 8831.252, 0.000, 35237.243],
[0.000, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 9460.955, 0.000, 35154.442],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36166.632],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 34293.883],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 35976.901],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37848.552],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39512.574],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39538.024],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37652.984],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36687.909],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37749.277],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37865.518],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38481.190],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37425.087],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38051.341],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38065.478],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37429.495],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37154.479],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 36692.717],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37327.055],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37937.630],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 38298.645],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 39689.369],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40992.397],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 41092.265],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40733.622],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40708.515],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40485.321],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 39768.059],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 40519.595],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 41590.937],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42354.983],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41175.149],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41037.902],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42706.213],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 40539.205],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 41608.692],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39992.148],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39134.828]])
# 模拟PS信号回测结果
# PS信号,先卖后买,交割期为0
self.ps_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111],
[346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118],
[346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621],
[115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21389.4018],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22027.4535],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 20939.9992],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21250.0636],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22282.7812],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21407.0658],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21160.2373],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21826.7682],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22744.9403],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23466.1185],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22017.8821],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23191.4662],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23099.0822],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22684.7671],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22842.1346],
[1073.8232, 416.6787, 735.6442, 269.8496, 1785.2055, 938.6967, 1339.2073, 5001.4246, 0.0000,
33323.8359],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32820.2901],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32891.2308],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34776.5296],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 33909.0325],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34560.1906],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 36080.4552],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38618.4454],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38497.9230],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 37110.0991],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 35455.2467],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35646.1860],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35472.3020],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36636.4694],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35191.7035],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36344.2242],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36221.6005],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35943.5708],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35708.2608],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35589.0286],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36661.0285],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36310.5909],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36466.7637],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 37784.4918],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39587.6766],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 40064.0191],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39521.6439],
[0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39932.2761],
[0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39565.2475],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 38943.1632],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39504.1184],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40317.8004],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40798.5768],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39962.5711],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40194.4793],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 41260.4003],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39966.3024],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 40847.3160],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 39654.5445],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 38914.8151]])
# PS信号,先买后卖,交割期为0
self.ps_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111],
[346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118],
[346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621],
[115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21389.4018],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21625.6913],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 20873.0389],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21450.9447],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 22269.3892],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21969.5329],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21752.6924],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22000.6088],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23072.5655],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23487.5201],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22441.0460],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23201.2700],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23400.9485],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22306.2008],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21989.5913],
[1073.8232, 737.0632, 735.6442, 269.8496, 1708.7766, 938.6967, 0.0000, 5215.4255, 0.0000, 31897.1636],
[0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31509.5059],
[0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31451.7888],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32773.4592],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32287.0318],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32698.1938],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34031.5183],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 35537.8336],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36212.6487],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36007.5294],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34691.3797],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33904.8810],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34341.6098],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 35479.9505],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34418.4455],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34726.7182],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34935.0407],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34136.7505],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33804.1575],
[195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 33653.8970],
[195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 34689.8757],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 34635.7841],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 35253.2755],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 36388.1051],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 37987.4204],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38762.2103],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38574.0544],
[195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39101.9156],
[195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39132.5587],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38873.2941],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39336.6594],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39565.9568],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39583.4317],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39206.8350],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39092.6551],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39666.1834],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38798.0749],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 39143.5561],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38617.8779],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38156.1701]])
# PS信号,先卖后买,交割期为2天(股票)1天(现金)
self.ps_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 205.065, 321.089, 5059.722, 0.000, 9761.111],
[346.982, 416.679, 0.000, 0.000, 555.556, 205.065, 321.089, 1201.278, 0.000, 9646.112],
[346.982, 416.679, 191.037, 0.000, 555.556, 205.065, 321.089, 232.719, 0.000, 9685.586],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9813.218],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9803.129],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9608.020],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9311.573],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8883.625],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8751.390],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8794.181],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9136.570],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9209.359],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9093.829],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9387.554],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9585.959],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 9928.777],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10060.381],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10281.002],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10095.561],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 0.000, 4506.393, 0.000, 10029.957],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9875.613],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9614.946],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9824.172],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9732.574],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9968.339],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 10056.158],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9921.492],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9894.162],
[115.719, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 6179.774, 0.000, 20067.937],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21133.508],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20988.848],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20596.743],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 19910.773],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20776.707],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20051.797],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20725.388],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20828.880],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21647.181],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21310.169],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20852.099],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21912.395],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21937.828],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21962.458],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21389.402],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22027.453],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 20939.999],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21250.064],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22282.781],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21407.066],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21160.237],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21826.768],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22744.940],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23466.118],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22017.882],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23191.466],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23099.082],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22684.767],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22842.135],
[1073.823, 416.679, 735.644, 269.850, 1785.205, 938.697, 1339.207, 5001.425, 0.000, 33323.836],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32820.290],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32891.231],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34776.530],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 33909.032],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34560.191],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 36080.455],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38618.445],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38497.923],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 37110.099],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 35455.247],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35646.186],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35472.302],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36636.469],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35191.704],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36344.224],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36221.601],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35943.571],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35708.261],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35589.029],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36661.029],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36310.591],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36466.764],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 37784.492],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39587.677],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 40064.019],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39521.644],
[0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39932.276],
[0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39565.248],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 38943.163],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39504.118],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40317.800],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40798.577],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39962.571],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40194.479],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 41260.400],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39966.302],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 40847.316],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 39654.544],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 38914.815]])
# PS信号,先买后卖,交割期为2天(股票)1天(现金)
self.ps_res_bs21 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 208.333, 326.206, 5020.833, 0.000, 9761.111],
[351.119, 421.646, 0.000, 0.000, 555.556, 208.333, 326.206, 1116.389, 0.000, 9645.961],
[351.119, 421.646, 190.256, 0.000, 555.556, 208.333, 326.206, 151.793, 0.000, 9686.841],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9813.932],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9803.000],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9605.334],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9304.001],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8870.741],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8738.282],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8780.664],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9126.199],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9199.746],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9083.518],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9380.932],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9581.266],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 9927.154],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10059.283],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10281.669],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10093.263],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 0.000, 4453.525, 0.000, 10026.289],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9870.523],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9606.437],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9818.691],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9726.556],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9964.547],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 10053.449],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9917.440],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9889.495],
[117.098, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 6189.948, 0.000, 20064.523],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21124.484],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20827.077],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20396.124],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19856.445],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20714.156],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19971.485],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20733.948],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20938.903],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21660.772],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21265.298],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20684.378],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21754.770],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21775.215],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21801.488],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21235.427],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21466.714],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 20717.431],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21294.450],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 22100.247],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21802.552],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21593.608],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21840.028],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22907.725],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23325.945],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22291.942],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23053.050],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23260.084],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22176.244],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21859.297],
[1055.763, 740.051, 729.561, 272.237, 1706.748, 932.896, 0.000, 5221.105, 0.000, 31769.617],
[0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31389.961],
[0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31327.498],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32647.140],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32170.095],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32577.742],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 33905.444],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35414.492],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 36082.120],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35872.293],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 34558.132],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33778.138],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34213.578],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 35345.791],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34288.014],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34604.406],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34806.850],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34012.232],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33681.345],
[192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 33540.463],
[192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 34574.280],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 34516.781],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 35134.412],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 36266.530],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 37864.376],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38642.633],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38454.227],
[192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 38982.227],
[192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 39016.154],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38759.803],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39217.182],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39439.690],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39454.081],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39083.341],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38968.694],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39532.030],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38675.507],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 39013.741],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38497.668],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38042.410]])
# 模拟VS信号回测结果
# VS信号,先卖后买,交割期为0
self.vs_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 9925.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954.0000, 0.0000, 9785.0000],
[400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878.0000, 0.0000, 9666.0000],
[400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0.0000, 0.0000, 9731.0000],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9830.9270],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9785.8540],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9614.3412],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9303.1953],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8834.4398],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8712.7554],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8717.9507],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9079.1479],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9166.0276],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9023.6607],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9291.6864],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9411.6371],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357],
[0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20425.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20137.8405],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20711.3567],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21470.3891],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21902.9538],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20962.9538],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21833.5184],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21941.8169],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21278.5184],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21224.4700],
[1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160.0000, 0.0000, 31225.2119],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30894.5748],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30764.3811],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31815.5828],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31615.4215],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 32486.1394],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 33591.2847],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34056.5428],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34756.4863],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34445.5428],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34433.9541],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33870.4703],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34014.3010],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34680.5671],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33890.9945],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34004.6640],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34127.7768],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33421.1638],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33120.9057],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 32613.3171],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 33168.1558],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
33504.6236],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
33652.1318],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
34680.4867],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35557.5191],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35669.7128],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35211.4466],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35550.6079],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35711.6563],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35682.6079],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35880.8336],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36249.8740],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36071.6159],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35846.1562],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35773.3578],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36274.9465],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35739.3094],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 36135.0917],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35286.5835],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35081.3658]])
# VS信号,先买后卖,交割期为0
self.vs_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 10000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 9925],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954, 0.0000, 9785],
[400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878, 0.0000, 9666],
[400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0, 0.0000, 9731],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9830.927022],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9785.854043],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9614.341223],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9303.195266],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8834.439842],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8712.755424],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8717.95069],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9079.147929],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9166.027613],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9023.66075],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9291.686391],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9411.637081],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357],
[0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20425.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20137.84054],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20711.35674],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21470.38914],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21902.95375],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20962.95375],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21833.51837],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21941.81688],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21278.51837],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21224.46995],
[1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160, 0.0000, 31225.21185],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30894.57479],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30764.38113],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31815.5828],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31615.42154],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 32486.13941],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 33591.28466],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34056.54276],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34756.48633],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34445.54276],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34433.95412],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33870.47032],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34014.30104],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34680.56715],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33890.99452],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34004.66398],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34127.77683],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33421.1638],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33120.9057],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 32613.31706],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 33168.15579],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33504.62357],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33652.13176],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 34680.4867],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35557.51909],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35669.71276],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35211.44665],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35550.60792],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35711.65633],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35682.60792],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35880.83362],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36249.87403],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36071.61593],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35846.15615],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35773.35783],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36274.94647],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35739.30941],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 36135.09172],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35286.58353],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35081.36584]])
# VS信号,先卖后买,交割期为2天(股票)1天(现金)
self.vs_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000],
[400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000],
[400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736],
[0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470],
[1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]])
# VS信号,先买后卖,交割期为2天(股票)1天(现金)
self.vs_res_bs21 = np.array(
[[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000],
[400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000],
[400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736],
[0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470],
[1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]])
# Multi信号处理结果,先卖后买,使用卖出的现金买进,交割期为2天(股票)0天(现金)
self.multi_res = np.array(
[[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 9965.1867],
[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 10033.0650],
[0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10034.8513],
[0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10036.6376],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10019.3404],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10027.7062],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10030.1477],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10005.1399],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10002.5054],
[150.3516, 489.4532, 0.0000, 3765.8877, 0.0000, 9967.3860],
[75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10044.4059],
[75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10078.1430],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10138.2709],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10050.4768],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10300.0711],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10392.6970],
[75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10400.5282],
[75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10408.9220],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10376.5914],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10346.8794],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10364.7474],
[75.1758, 381.1856, 645.5014, 2459.1665, 0.0000, 10302.4570],
[18.7939, 381.1856, 645.5014, 3024.6764, 0.0000, 10747.4929],
[18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11150.9107],
[18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11125.2946],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11191.9956],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11145.7486],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11090.0768],
[132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11113.8733],
[132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11456.3281],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21983.7333],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22120.6165],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21654.5327],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21429.6550],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21912.5643],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22516.3100],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23169.0777],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23390.8080],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23743.3742],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 23210.7311],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24290.4375],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24335.3279],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18317.3553],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18023.4660],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24390.0527],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24389.6421],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24483.5953],
[0.0000, 559.9112, 0.0000, 18321.5674, 0.0000, 24486.1895],
[0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389],
[0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389]])
def test_loop_step_pt_sb00(self):
""" test loop step PT-signal, sell first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.pt_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[2][7],
own_amounts=self.pt_res_sb00[2][0:7],
available_cash=self.pt_res_sb00[2][7],
available_amounts=self.pt_res_sb00[2][0:7],
op=self.pt_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[2][7] + c_g + c_s
amounts = self.pt_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[30][7],
own_amounts=self.pt_res_sb00[30][0:7],
available_cash=self.pt_res_sb00[30][7],
available_amounts=self.pt_res_sb00[30][0:7],
op=self.pt_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[30][7] + c_g + c_s
amounts = self.pt_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[59][7] + 10000,
own_amounts=self.pt_res_sb00[59][0:7],
available_cash=self.pt_res_sb00[59][7] + 10000,
available_amounts=self.pt_res_sb00[59][0:7],
op=self.pt_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.pt_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[95][7],
own_amounts=self.pt_res_sb00[95][0:7],
available_cash=self.pt_res_sb00[95][7],
available_amounts=self.pt_res_sb00[95][0:7],
op=self.pt_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[96][7] + c_g + c_s
amounts = self.pt_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[97][0:7]))
def test_loop_step_pt_bs00(self):
""" test loop step PT-signal, buy first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.pt_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[2][7],
own_amounts=self.pt_res_bs00[2][0:7],
available_cash=self.pt_res_bs00[2][7],
available_amounts=self.pt_res_bs00[2][0:7],
op=self.pt_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[2][7] + c_g + c_s
amounts = self.pt_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[30][7],
own_amounts=self.pt_res_bs00[30][0:7],
available_cash=self.pt_res_bs00[30][7],
available_amounts=self.pt_res_bs00[30][0:7],
op=self.pt_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[30][7] + c_g + c_s
amounts = self.pt_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[59][7] + 10000,
own_amounts=self.pt_res_bs00[59][0:7],
available_cash=self.pt_res_bs00[59][7] + 10000,
available_amounts=self.pt_res_bs00[59][0:7],
op=self.pt_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.pt_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[95][7],
own_amounts=self.pt_res_bs00[95][0:7],
available_cash=self.pt_res_bs00[95][7],
available_amounts=self.pt_res_bs00[95][0:7],
op=self.pt_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[96][7] + c_g + c_s
amounts = self.pt_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[97][0:7]))
def test_loop_step_ps_sb00(self):
""" test loop step PS-signal, sell first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.ps_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[2][7],
own_amounts=self.ps_res_sb00[2][0:7],
available_cash=self.ps_res_sb00[2][7],
available_amounts=self.ps_res_sb00[2][0:7],
op=self.ps_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[2][7] + c_g + c_s
amounts = self.ps_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[30][7],
own_amounts=self.ps_res_sb00[30][0:7],
available_cash=self.ps_res_sb00[30][7],
available_amounts=self.ps_res_sb00[30][0:7],
op=self.ps_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[30][7] + c_g + c_s
amounts = self.ps_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[59][7] + 10000,
own_amounts=self.ps_res_sb00[59][0:7],
available_cash=self.ps_res_sb00[59][7] + 10000,
available_amounts=self.ps_res_sb00[59][0:7],
op=self.ps_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.ps_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[95][7],
own_amounts=self.ps_res_sb00[95][0:7],
available_cash=self.ps_res_sb00[95][7],
available_amounts=self.ps_res_sb00[95][0:7],
op=self.ps_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[96][7] + c_g + c_s
amounts = self.ps_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[97][0:7]))
def test_loop_step_ps_bs00(self):
""" test loop step PS-signal, buy first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.ps_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[2][7],
own_amounts=self.ps_res_sb00[2][0:7],
available_cash=self.ps_res_bs00[2][7],
available_amounts=self.ps_res_bs00[2][0:7],
op=self.ps_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[2][7] + c_g + c_s
amounts = self.ps_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[30][7],
own_amounts=self.ps_res_sb00[30][0:7],
available_cash=self.ps_res_bs00[30][7],
available_amounts=self.ps_res_bs00[30][0:7],
op=self.ps_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[30][7] + c_g + c_s
amounts = self.ps_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[59][7] + 10000,
own_amounts=self.ps_res_bs00[59][0:7],
available_cash=self.ps_res_bs00[59][7] + 10000,
available_amounts=self.ps_res_bs00[59][0:7],
op=self.ps_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.ps_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[95][7],
own_amounts=self.ps_res_bs00[95][0:7],
available_cash=self.ps_res_bs00[95][7],
available_amounts=self.ps_res_bs00[95][0:7],
op=self.ps_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[96][7] + c_g + c_s
amounts = self.ps_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[97][0:7]))
def test_loop_step_vs_sb00(self):
"""test loop step of Volume Signal type of signals"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.vs_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7750)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 500., 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[2][7],
own_amounts=self.vs_res_sb00[2][0:7],
available_cash=self.vs_res_sb00[2][7],
available_amounts=self.vs_res_sb00[2][0:7],
op=self.vs_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[2][7] + c_g + c_s
amounts = self.vs_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[30][7],
own_amounts=self.vs_res_sb00[30][0:7],
available_cash=self.vs_res_sb00[30][7],
available_amounts=self.vs_res_sb00[30][0:7],
op=self.vs_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[30][7] + c_g + c_s
amounts = self.vs_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[59][7] + 10000,
own_amounts=self.vs_res_sb00[59][0:7],
available_cash=self.vs_res_sb00[59][7] + 10000,
available_amounts=self.vs_res_sb00[59][0:7],
op=self.vs_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.vs_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[95][7],
own_amounts=self.vs_res_sb00[95][0:7],
available_cash=self.vs_res_sb00[95][7],
available_amounts=self.vs_res_sb00[95][0:7],
op=self.vs_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[96][7] + c_g + c_s
amounts = self.vs_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[97][0:7]))
def test_loop_step_vs_bs00(self):
"""test loop step of Volume Signal type of signals"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.vs_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7750)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 500., 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[2][7],
own_amounts=self.vs_res_bs00[2][0:7],
available_cash=self.vs_res_bs00[2][7],
available_amounts=self.vs_res_bs00[2][0:7],
op=self.vs_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[2][7] + c_g + c_s
amounts = self.vs_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[30][7],
own_amounts=self.vs_res_bs00[30][0:7],
available_cash=self.vs_res_bs00[30][7],
available_amounts=self.vs_res_bs00[30][0:7],
op=self.vs_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[30][7] + c_g + c_s
amounts = self.vs_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[59][7] + 10000,
own_amounts=self.vs_res_bs00[59][0:7],
available_cash=self.vs_res_bs00[59][7] + 10000,
available_amounts=self.vs_res_bs00[59][0:7],
op=self.vs_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.vs_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[95][7],
own_amounts=self.vs_res_bs00[95][0:7],
available_cash=self.vs_res_bs00[95][7],
available_amounts=self.vs_res_bs00[95][0:7],
op=self.vs_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[96][7] + c_g + c_s
amounts = self.vs_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[97][0:7]))
def test_loop_pt(self):
""" Test looping of PT proportion target signals, with
stock delivery delay = 0 days
cash delivery delay = 0 day
buy-sell sequence = sell first
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 0 days \n'
'cash delivery delay = 0 day \n'
'buy-sell sequence = sell first')
res = apply_loop(op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
# print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.pt_res_bs00, 2))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=0,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
# print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_pt_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.pt_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.pt_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_pt_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.pt_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.pt_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps(self):
""" Test looping of PS Proportion Signal type of signals
"""
res = apply_loop(op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.ps_res_bs00, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.ps_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.ps_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.ps_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.ps_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs(self):
""" Test looping of VS Volume Signal type of signals
"""
res = apply_loop(op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.vs_res_bs00, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.vs_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.vs_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.vs_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.vs_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_multiple_signal(self):
""" Test looping of PS Proportion Signal type of signals
"""
res = apply_loop(op_type=1,
op_list=self.multi_signal_hp,
history_list=self.multi_history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
max_cash_usage=True,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.multi_res[i]))
print()
self.assertTrue(np.allclose(res, self.multi_res, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=1,
op_list=self.multi_signal_hp,
history_list=self.multi_history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=0,
stock_delivery_period=2,
max_cash_usage=False,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
class TestStrategy(unittest.TestCase):
""" test all properties and methods of strategy base class"""
def setUp(self) -> None:
pass
class TestLSStrategy(RollingTiming):
"""用于test测试的简单多空蒙板生成策略。基于RollingTiming滚动择时方法生成
该策略有两个参数,N与Price
N用于计算OHLC价格平均值的N日简单移动平均,判断,当移动平均值大于等于Price时,状态为看多,否则为看空
"""
def __init__(self):
super().__init__(stg_name='test_LS',
stg_text='test long/short strategy',
par_count=2,
par_types='discr, conti',
par_bounds_or_enums=([1, 5], [2, 10]),
data_types='close, open, high, low',
data_freq='d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
n, price = params
h = hist_data.T
avg = (h[0] + h[1] + h[2] + h[3]) / 4
ma = sma(avg, n)
if ma[-1] < price:
return 0
else:
return 1
class TestSelStrategy(SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='high, low, close',
data_freq='d',
sample_freq='10d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = np.nanmean(hist_data, axis=(1, 2))
dif = (hist_data[:, :, 2] - np.roll(hist_data[:, :, 2], 1, 1))
dif_no_nan = np.array([arr[~np.isnan(arr)][-1] for arr in dif])
difper = dif_no_nan / avg
large2 = difper.argsort()[1:]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSelStrategyDiffTime(SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
# TODO: This strategy is not working, find out why and improve
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='close, low, open',
data_freq='d',
sample_freq='w',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = hist_data.mean(axis=1).squeeze()
difper = (hist_data[:, :, 0] - np.roll(hist_data[:, :, 0], 1))[:, -1] / avg
large2 = difper.argsort()[0:2]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSigStrategy(SimpleTiming):
"""用于Test测试的简单信号生成策略,基于SimpleTiming策略生成
策略有三个参数,第一个参数为ratio,另外两个参数为price1以及price2
ratio是k线形状比例的阈值,定义为abs((C-O)/(H-L))。当这个比值小于ratio阈值时,判断该K线为十字交叉(其实还有丁字等多种情形,但这里做了
简化处理。
信号生成的规则如下:
1,当某个K线出现十字交叉,且昨收与今收之差大于price1时,买入信号
2,当某个K线出现十字交叉,且昨收与今收之差小于price2时,卖出信号
"""
def __init__(self):
super().__init__(stg_name='test_SIG',
stg_text='test signal creation strategy',
par_count=3,
par_types='conti, conti, conti',
par_bounds_or_enums=([2, 10], [0, 3], [0, 3]),
data_types='close, open, high, low',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
r, price1, price2 = params
h = hist_data.T
ratio = np.abs((h[0] - h[1]) / (h[3] - h[2]))
diff = h[0] - np.roll(h[0], 1)
sig = np.where((ratio < r) & (diff > price1),
1,
np.where((ratio < r) & (diff < price2), -1, 0))
return sig
class MyStg(qt.RollingTiming):
"""自定义双均线择时策略策略"""
def __init__(self):
"""这个均线择时策略只有三个参数:
- SMA 慢速均线,所选择的股票
- FMA 快速均线
- M 边界值
策略的其他说明
"""
"""
必须初始化的关键策略参数清单:
"""
super().__init__(
pars=(20, 100, 0.01),
par_count=3,
par_types=['discr', 'discr', 'conti'],
par_bounds_or_enums=[(10, 250), (10, 250), (0.0, 0.5)],
stg_name='CUSTOM ROLLING TIMING STRATEGY',
stg_text='Customized Rolling Timing Strategy for Testing',
data_types='close',
window_length=100,
)
print(f'=====================\n====================\n'
f'custom strategy initialized, \npars: {self.pars}\npar_count:{self.par_count}\npar_types:'
f'{self.par_types}\n'
f'{self.info()}')
# 策略的具体实现代码写在策略的_realize()函数中
# 这个函数固定接受两个参数: hist_price代表特定组合的历史数据, params代表具体的策略参数
def _realize(self, hist_price, params):
"""策略的具体实现代码:
s:短均线计算日期;l:长均线计算日期;m:均线边界宽度;hesitate:均线跨越类型"""
f, s, m = params
# 临时处理措施,在策略实现层对传入的数据切片,后续应该在策略实现层以外事先对数据切片,保证传入的数据符合data_types参数即可
h = hist_price.T
# 计算长短均线的当前值
s_ma = qt.sma(h[0], s)[-1]
f_ma = qt.sma(h[0], f)[-1]
# 计算慢均线的停止边界,当快均线在停止边界范围内时,平仓,不发出买卖信号
s_ma_u = s_ma * (1 + m)
s_ma_l = s_ma * (1 - m)
# 根据观望模式在不同的点位产生Long/short/empty标记
if f_ma > s_ma_u: # 当快均线在慢均线停止范围以上时,持有多头头寸
return 1
elif s_ma_l < f_ma < s_ma_u: # 当均线在停止边界以内时,平仓
return 0
else: # f_ma < s_ma_l 当快均线在慢均线停止范围以下时,持有空头头寸
return -1
class TestOperator(unittest.TestCase):
"""全面测试Operator对象的所有功能。包括:
1, Strategy 参数的设置
2, 历史数据的获取与分配提取
3, 策略优化参数的批量设置和优化空间的获取
4, 策略输出值的正确性验证
5, 策略结果的混合结果确认
"""
def setUp(self):
"""prepare data for Operator test"""
print('start testing HistoryPanel object\n')
# build up test data: a 4-type, 3-share, 50-day matrix of prices that contains nan values in some days
# for some share_pool
# for share1:
data_rows = 50
share1_close = [10.04, 10, 10, 9.99, 9.97, 9.99, 10.03, 10.03, 10.06, 10.06, 10.11,
10.09, 10.07, 10.06, 10.09, 10.03, 10.03, 10.06, 10.08, 10, 9.99,
10.03, 10.03, 10.06, 10.03, 9.97, 9.94, 9.83, 9.77, 9.84, 9.91, 9.93,
9.96, 9.91, 9.91, 9.88, 9.91, 9.64, 9.56, 9.57, 9.55, 9.57, 9.61, 9.61,
9.55, 9.57, 9.63, 9.64, 9.65, 9.62]
share1_open = [10.02, 10, 9.98, 9.97, 9.99, 10.01, 10.04, 10.06, 10.06, 10.11,
10.11, 10.07, 10.06, 10.09, 10.03, 10.02, 10.06, 10.08, 9.99, 10,
10.03, 10.02, 10.06, 10.03, 9.97, 9.94, 9.83, 9.78, 9.77, 9.91, 9.92,
9.97, 9.91, 9.9, 9.88, 9.91, 9.63, 9.64, 9.57, 9.55, 9.58, 9.61, 9.62,
9.55, 9.57, 9.61, 9.63, 9.64, 9.61, 9.56]
share1_high = [10.07, 10, 10, 10, 10.03, 10.03, 10.04, 10.09, 10.1, 10.14, 10.11, 10.1,
10.09, 10.09, 10.1, 10.05, 10.07, 10.09, 10.1, 10, 10.04, 10.04, 10.06,
10.09, 10.05, 9.97, 9.96, 9.86, 9.77, 9.92, 9.94, 9.97, 9.97, 9.92, 9.92,
9.92, 9.93, 9.64, 9.58, 9.6, 9.58, 9.62, 9.62, 9.64, 9.59, 9.62, 9.63,
9.7, 9.66, 9.64]
share1_low = [9.99, 10, 9.97, 9.97, 9.97, 9.98, 9.99, 10.03, 10.03, 10.04, 10.11, 10.07,
10.05, 10.03, 10.03, 10.01, 9.99, 10.03, 9.95, 10, 9.95, 10, 10.01, 9.99,
9.96, 9.89, 9.83, 9.77, 9.77, 9.8, 9.9, 9.91, 9.89, 9.89, 9.87, 9.85, 9.6,
9.64, 9.53, 9.55, 9.54, 9.55, 9.58, 9.54, 9.53, 9.53, 9.63, 9.64, 9.59, 9.56]
# for share2:
share2_close = [9.68, 9.87, 9.86, 9.87, 9.79, 9.82, 9.8, 9.66, 9.62, 9.58, 9.69, 9.78, 9.75,
9.96, 9.9, 10.04, 10.06, 10.08, 10.24, 10.24, 10.24, 9.86, 10.13, 10.12,
10.1, 10.25, 10.24, 10.22, 10.75, 10.64, 10.56, 10.6, 10.42, 10.25, 10.24,
10.49, 10.57, 10.63, 10.48, 10.37, 10.96, 11.02, np.nan, np.nan, 10.88, 10.87, 11.01,
11.01, 11.58, 11.8]
share2_open = [9.88, 9.88, 9.89, 9.75, 9.74, 9.8, 9.62, 9.65, 9.58, 9.67, 9.81, 9.8, 10,
9.95, 10.1, 10.06, 10.14, 9.9, 10.2, 10.29, 9.86, 9.48, 10.01, 10.24, 10.26,
10.24, 10.12, 10.65, 10.64, 10.56, 10.42, 10.43, 10.29, 10.3, 10.44, 10.6,
10.67, 10.46, 10.39, 10.9, 11.01, 11.01, np.nan, np.nan, 10.82, 11.02, 10.96,
11.55, 11.74, 11.8]
share2_high = [9.91, 10.04, 9.93, 10.04, 9.84, 9.88, 9.99, 9.7, 9.67, 9.71, 9.85, 9.9, 10,
10.2, 10.11, 10.18, 10.21, 10.26, 10.38, 10.47, 10.42, 10.07, 10.24, 10.27,
10.38, 10.43, 10.39, 10.65, 10.84, 10.65, 10.73, 10.63, 10.51, 10.35, 10.46,
10.63, 10.74, 10.76, 10.54, 11.02, 11.12, 11.17, np.nan, np.nan, 10.92, 11.15,
11.11, 11.55, 11.95, 11.93]
share2_low = [9.63, 9.84, 9.81, 9.74, 9.67, 9.72, 9.57, 9.54, 9.51, 9.47, 9.68, 9.63, 9.75,
9.65, 9.9, 9.93, 10.03, 9.8, 10.14, 10.09, 9.78, 9.21, 9.11, 9.68, 10.05,
10.12, 9.89, 9.89, 10.59, 10.43, 10.34, 10.32, 10.21, 10.2, 10.18, 10.36,
10.51, 10.41, 10.32, 10.37, 10.87, 10.95, np.nan, np.nan, 10.65, 10.71, 10.75,
10.91, 11.31, 11.58]
# for share3:
share3_close = [6.64, 7.26, 7.03, 6.87, np.nan, 6.64, 6.85, 6.7, 6.39, 6.22, 5.92, 5.91, 6.11,
5.91, 6.23, 6.28, 6.28, 6.27, np.nan, 5.56, 5.67, 5.16, 5.69, 6.32, 6.14, 6.25,
5.79, 5.26, 5.05, 5.45, 6.06, 6.21, 5.69, 5.46, 6.02, 6.69, 7.43, 7.72, 8.16,
7.83, 8.7, 8.71, 8.88, 8.54, 8.87, 8.87, 8.18, 7.8, 7.97, 8.25]
share3_open = [7.26, 7, 6.88, 6.91, np.nan, 6.81, 6.63, 6.45, 6.16, 6.24, 5.96, 5.97, 5.96,
6.2, 6.35, 6.11, 6.37, 5.58, np.nan, 5.65, 5.19, 5.42, 6.3, 6.15, 6.05, 5.89,
5.22, 5.2, 5.07, 6.04, 6.12, 5.85, 5.67, 6.02, 6.04, 7.07, 7.64, 7.99, 7.59,
8.73, 8.72, 8.97, 8.58, 8.71, 8.77, 8.4, 7.95, 7.76, 8.25, 7.51]
share3_high = [7.41, 7.31, 7.14, 7, np.nan, 6.82, 6.96, 6.85, 6.5, 6.34, 6.04, 6.02, 6.12, 6.38,
6.43, 6.46, 6.43, 6.27, np.nan, 6.01, 5.67, 5.67, 6.35, 6.32, 6.43, 6.36, 5.79,
5.47, 5.65, 6.04, 6.14, 6.23, 5.83, 6.25, 6.27, 7.12, 7.82, 8.14, 8.27, 8.92,
8.76, 9.15, 8.9, 9.01, 9.16, 9, 8.27, 7.99, 8.33, 8.25]
share3_low = [6.53, 6.87, 6.83, 6.7, np.nan, 6.63, 6.57, 6.41, 6.15, 6.07, 5.89, 5.82, 5.73, 5.81,
6.1, 6.06, 6.16, 5.57, np.nan, 5.51, 5.19, 5.12, 5.69, 6.01, 5.97, 5.86, 5.18, 5.19,
4.96, 5.45, 5.84, 5.85, 5.28, 5.42, 6.02, 6.69, 7.28, 7.64, 7.25, 7.83, 8.41, 8.66,
8.53, 8.54, 8.73, 8.27, 7.95, 7.67, 7.8, 7.51]
# for sel_finance test
shares_eps = np.array([[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, 0.2, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[0.1, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.3, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.15, np.nan, np.nan],
[np.nan, 0.1, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.2, np.nan, np.nan],
[np.nan, 0.5, np.nan],
[0.4, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[0.9, np.nan, np.nan],
[np.nan, np.nan, 0.1]])
self.date_indices = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14', '2016-07-15', '2016-07-18',
'2016-07-19', '2016-07-20', '2016-07-21', '2016-07-22',
'2016-07-25', '2016-07-26', '2016-07-27', '2016-07-28',
'2016-07-29', '2016-08-01', '2016-08-02', '2016-08-03',
'2016-08-04', '2016-08-05', '2016-08-08', '2016-08-09',
'2016-08-10', '2016-08-11', '2016-08-12', '2016-08-15',
'2016-08-16', '2016-08-17', '2016-08-18', '2016-08-19',
'2016-08-22', '2016-08-23', '2016-08-24', '2016-08-25',
'2016-08-26', '2016-08-29', '2016-08-30', '2016-08-31',
'2016-09-01', '2016-09-02', '2016-09-05', '2016-09-06',
'2016-09-07', '2016-09-08']
self.shares = ['000010', '000030', '000039']
self.types = ['close', 'open', 'high', 'low']
self.sel_finance_tyeps = ['eps']
self.test_data_3D = np.zeros((3, data_rows, 4))
self.test_data_2D = np.zeros((data_rows, 3))
self.test_data_2D2 = np.zeros((data_rows, 4))
self.test_data_sel_finance = np.empty((3, data_rows, 1))
# Build up 3D data
self.test_data_3D[0, :, 0] = share1_close
self.test_data_3D[0, :, 1] = share1_open
self.test_data_3D[0, :, 2] = share1_high
self.test_data_3D[0, :, 3] = share1_low
self.test_data_3D[1, :, 0] = share2_close
self.test_data_3D[1, :, 1] = share2_open
self.test_data_3D[1, :, 2] = share2_high
self.test_data_3D[1, :, 3] = share2_low
self.test_data_3D[2, :, 0] = share3_close
self.test_data_3D[2, :, 1] = share3_open
self.test_data_3D[2, :, 2] = share3_high
self.test_data_3D[2, :, 3] = share3_low
self.test_data_sel_finance[:, :, 0] = shares_eps.T
self.hp1 = qt.HistoryPanel(values=self.test_data_3D,
levels=self.shares,
columns=self.types,
rows=self.date_indices)
print(f'in test Operator, history panel is created for timing test')
self.hp1.info()
self.hp2 = qt.HistoryPanel(values=self.test_data_sel_finance,
levels=self.shares,
columns=self.sel_finance_tyeps,
rows=self.date_indices)
print(f'in test_Operator, history panel is created for selection finance test:')
self.hp2.info()
self.op = qt.Operator(strategies='dma', signal_type='PS')
self.op2 = qt.Operator(strategies='dma, macd, trix')
def test_init(self):
""" test initialization of Operator class"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.signal_type, 'pt')
self.assertIsInstance(op.strategies, list)
self.assertEqual(len(op.strategies), 0)
op = qt.Operator('dma')
self.assertIsInstance(op, qt.Operator)
self.assertIsInstance(op.strategies, list)
self.assertIsInstance(op.strategies[0], TimingDMA)
op = qt.Operator('dma, macd')
self.assertIsInstance(op, qt.Operator)
op = qt.Operator(['dma', 'macd'])
self.assertIsInstance(op, qt.Operator)
def test_repr(self):
""" test basic representation of Opeartor class"""
op = qt.Operator()
self.assertEqual(op.__repr__(), 'Operator()')
op = qt.Operator('macd, dma, trix, random, avg_low')
self.assertEqual(op.__repr__(), 'Operator(macd, dma, trix, random, avg_low)')
self.assertEqual(op['dma'].__repr__(), 'Q-TIMING(DMA)')
self.assertEqual(op['macd'].__repr__(), 'R-TIMING(MACD)')
self.assertEqual(op['trix'].__repr__(), 'R-TIMING(TRIX)')
self.assertEqual(op['random'].__repr__(), 'SELECT(RANDOM)')
self.assertEqual(op['avg_low'].__repr__(), 'FACTOR(AVG LOW)')
def test_info(self):
"""Test information output of Operator"""
print(f'test printing information of operator object')
self.op.info()
def test_get_strategy_by_id(self):
""" test get_strategy_by_id()"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
self.assertEqual(op.strategy_ids, ['macd', 'dma', 'trix'])
self.assertIs(op.get_strategy_by_id('macd'), op.strategies[0])
self.assertIs(op.get_strategy_by_id(1), op.strategies[1])
self.assertIs(op.get_strategy_by_id('trix'), op.strategies[2])
def test_get_items(self):
""" test method __getitem__(), it should be the same as geting strategies by id"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
self.assertEqual(op.strategy_ids, ['macd', 'dma', 'trix'])
self.assertIs(op['macd'], op.strategies[0])
self.assertIs(op['trix'], op.strategies[2])
self.assertIs(op[1], op.strategies[1])
self.assertIs(op[3], op.strategies[2])
def test_get_strategies_by_price_type(self):
""" test get_strategies_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategies_by_price_type('close')
stg_open = op.get_strategies_by_price_type('open')
stg_high = op.get_strategies_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, [op.strategies[1]])
self.assertEqual(stg_open, [op.strategies[0], op.strategies[2]])
self.assertEqual(stg_high, [])
stg_wrong = op.get_strategies_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_get_strategy_count_by_price_type(self):
""" test get_strategy_count_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_count_by_price_type('close')
stg_open = op.get_strategy_count_by_price_type('open')
stg_high = op.get_strategy_count_by_price_type('high')
self.assertIsInstance(stg_close, int)
self.assertIsInstance(stg_open, int)
self.assertIsInstance(stg_high, int)
self.assertEqual(stg_close, 1)
self.assertEqual(stg_open, 2)
self.assertEqual(stg_high, 0)
stg_wrong = op.get_strategy_count_by_price_type(123)
self.assertIsInstance(stg_wrong, int)
self.assertEqual(stg_wrong, 0)
def test_get_strategy_names_by_price_type(self):
""" test get_strategy_names_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_names_by_price_type('close')
stg_open = op.get_strategy_names_by_price_type('open')
stg_high = op.get_strategy_names_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, ['DMA'])
self.assertEqual(stg_open, ['MACD', 'TRIX'])
self.assertEqual(stg_high, [])
stg_wrong = op.get_strategy_names_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_get_strategy_id_by_price_type(self):
""" test get_strategy_IDs_by_price_type"""
print('-----Test get strategy IDs by price type------\n')
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_id_by_price_type('close')
stg_open = op.get_strategy_id_by_price_type('open')
stg_high = op.get_strategy_id_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, ['dma'])
self.assertEqual(stg_open, ['macd', 'trix'])
self.assertEqual(stg_high, [])
op.add_strategies('dma, macd')
op.set_parameter('dma_1', price_type='open')
op.set_parameter('macd', price_type='open')
op.set_parameter('macd_1', price_type='high')
op.set_parameter('trix', price_type='close')
print(f'Operator strategy id:\n'
f'{op.strategies} on memory pos:\n'
f'{[id(stg) for stg in op.strategies]}')
stg_close = op.get_strategy_id_by_price_type('close')
stg_open = op.get_strategy_id_by_price_type('open')
stg_high = op.get_strategy_id_by_price_type('high')
stg_all = op.get_strategy_id_by_price_type()
print(f'All IDs of strategies:\n'
f'{stg_all}\n'
f'All price types of strategies:\n'
f'{[stg.price_type for stg in op.strategies]}')
self.assertEqual(stg_close, ['dma', 'trix'])
self.assertEqual(stg_open, ['macd', 'dma_1'])
self.assertEqual(stg_high, ['macd_1'])
stg_wrong = op.get_strategy_id_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_property_strategies(self):
""" test property strategies"""
print(f'created a new simple Operator with only one strategy: DMA')
op = qt.Operator('dma')
strategies = op.strategies
self.assertIsInstance(strategies, list)
op.info()
print(f'created the second simple Operator with three strategies')
self.assertIsInstance(strategies[0], TimingDMA)
op = qt.Operator('dma, macd, cdl')
strategies = op.strategies
op.info()
self.assertIsInstance(strategies, list)
self.assertIsInstance(strategies[0], TimingDMA)
self.assertIsInstance(strategies[1], TimingMACD)
self.assertIsInstance(strategies[2], TimingCDL)
def test_property_strategy_count(self):
""" test Property strategy_count, and the method get_strategy_count_by_price_type()"""
self.assertEqual(self.op.strategy_count, 1)
self.assertEqual(self.op2.strategy_count, 3)
self.assertEqual(self.op.get_strategy_count_by_price_type(), 1)
self.assertEqual(self.op2.get_strategy_count_by_price_type(), 3)
self.assertEqual(self.op.get_strategy_count_by_price_type('close'), 1)
self.assertEqual(self.op.get_strategy_count_by_price_type('high'), 0)
self.assertEqual(self.op2.get_strategy_count_by_price_type('close'), 3)
self.assertEqual(self.op2.get_strategy_count_by_price_type('open'), 0)
def test_property_strategy_names(self):
""" test property strategy_ids"""
op = qt.Operator('dma')
self.assertIsInstance(op.strategy_ids, list)
names = op.strategy_ids[0]
print(f'names are {names}')
self.assertEqual(names, 'dma')
op = qt.Operator('dma, macd, trix, cdl')
self.assertIsInstance(op.strategy_ids, list)
self.assertEqual(op.strategy_ids[0], 'dma')
self.assertEqual(op.strategy_ids[1], 'macd')
self.assertEqual(op.strategy_ids[2], 'trix')
self.assertEqual(op.strategy_ids[3], 'cdl')
op = qt.Operator('dma, macd, trix, dma, dma')
self.assertIsInstance(op.strategy_ids, list)
self.assertEqual(op.strategy_ids[0], 'dma')
self.assertEqual(op.strategy_ids[1], 'macd')
self.assertEqual(op.strategy_ids[2], 'trix')
self.assertEqual(op.strategy_ids[3], 'dma_1')
self.assertEqual(op.strategy_ids[4], 'dma_2')
def test_property_strategy_blenders(self):
""" test property strategy blenders including property setter,
and test the method get_blender()"""
print(f'------- Test property strategy blenders ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'pt')
# test adding blender to empty operator
op.strategy_blenders = '1 + 2'
op.signal_type = 'proportion signal'
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'ps')
op.add_strategy('dma')
op.strategy_blenders = '1+2'
self.assertEqual(op.strategy_blenders, {'close': ['+', '2', '1']})
op.clear_strategies()
self.assertEqual(op.strategy_blenders, {})
op.add_strategies('dma, trix, macd, dma')
op.set_parameter('dma', price_type='open')
op.set_parameter('trix', price_type='high')
op.set_blender('open', '1+2')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '2', '1'])
self.assertEqual(blender_close, None)
self.assertEqual(blender_high, None)
op.set_blender('open', '1+2+3')
op.set_blender('abc', '1+2+3')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
blender_abc = op.get_blender('abc')
self.assertEqual(op.strategy_blenders, {'open': ['+', '3', '+', '2', '1']})
self.assertEqual(blender_open, ['+', '3', '+', '2', '1'])
self.assertEqual(blender_close, None)
self.assertEqual(blender_high, None)
self.assertEqual(blender_abc, None)
op.set_blender('open', 123)
blender_open = op.get_blender('open')
self.assertEqual(blender_open, [])
op.set_blender(None, '1+1')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(op.bt_price_types, ['close', 'high', 'open'])
self.assertEqual(op.get_blender(), {'close': ['+', '1', '1'],
'open': ['+', '1', '1'],
'high': ['+', '1', '1']})
self.assertEqual(blender_open, ['+', '1', '1'])
self.assertEqual(blender_close, ['+', '1', '1'])
self.assertEqual(blender_high, ['+', '1', '1'])
op.set_blender(None, ['1+1', '3+4'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '3'])
self.assertEqual(blender_close, ['+', '1', '1'])
self.assertEqual(blender_high, ['+', '4', '3'])
self.assertEqual(op.view_blender('open'), '3+4')
self.assertEqual(op.view_blender('close'), '1+1')
self.assertEqual(op.view_blender('high'), '3+4')
op.strategy_blenders = (['1+2', '2*3', '1+4'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
self.assertEqual(op.view_blender('open'), '1+4')
self.assertEqual(op.view_blender('close'), '1+2')
self.assertEqual(op.view_blender('high'), '2*3')
# test error inputs:
# wrong type of price_type
self.assertRaises(TypeError, op.set_blender, 1, '1+3')
# price_type not found, no change is made
op.set_blender('volume', '1+3')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# price_type not valid, no change is made
op.set_blender('closee', '1+2')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# wrong type of blender, set to empty list
op.set_blender('open', 55)
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# wrong type of blender, set to empty list
op.set_blender('close', ['1+2'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, [])
self.assertEqual(blender_high, ['*', '3', '2'])
# can't parse blender, set to empty list
op.set_blender('high', 'a+bc')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, [])
self.assertEqual(blender_high, [])
def test_property_singal_type(self):
""" test property signal_type"""
op = qt.Operator()
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.signal_type, 'pt')
op = qt.Operator(signal_type='ps')
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator(signal_type='PS')
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator(signal_type='proportion signal')
self.assertEqual(op.signal_type, 'ps')
print(f'"pt" will be the default type if wrong value is given')
op = qt.Operator(signal_type='wrong value')
self.assertEqual(op.signal_type, 'pt')
print(f'test signal_type.setter')
op.signal_type = 'ps'
self.assertEqual(op.signal_type, 'ps')
print(f'test error raising')
self.assertRaises(TypeError, setattr, op, 'signal_type', 123)
self.assertRaises(ValueError, setattr, op, 'signal_type', 'wrong value')
def test_property_op_data_types(self):
""" test property op_data_types"""
op = qt.Operator()
self.assertIsInstance(op.op_data_types, list)
self.assertEqual(op.op_data_types, [])
op = qt.Operator('macd, dma, trix')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
op = qt.Operator('macd, cdl')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
self.assertEqual(dt[1], 'high')
self.assertEqual(dt[2], 'low')
self.assertEqual(dt[3], 'open')
self.assertEqual(dt, ['close', 'high', 'low', 'open'])
op.add_strategy('dma')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
self.assertEqual(dt[1], 'high')
self.assertEqual(dt[2], 'low')
self.assertEqual(dt[3], 'open')
self.assertEqual(dt, ['close', 'high', 'low', 'open'])
def test_property_op_data_type_count(self):
""" test property op_data_type_count"""
op = qt.Operator()
self.assertIsInstance(op.op_data_type_count, int)
self.assertEqual(op.op_data_type_count, 0)
op = qt.Operator('macd, dma, trix')
dtn = op.op_data_type_count
self.assertEqual(dtn, 1)
op = qt.Operator('macd, cdl')
dtn = op.op_data_type_count
self.assertEqual(dtn, 4)
op.add_strategy('dma')
dtn = op.op_data_type_count
self.assertEqual(dtn, 4)
def test_property_op_data_freq(self):
""" test property op_data_freq"""
op = qt.Operator()
self.assertIsInstance(op.op_data_freq, str)
self.assertEqual(len(op.op_data_freq), 0)
self.assertEqual(op.op_data_freq, '')
op = qt.Operator('macd, dma, trix')
dtf = op.op_data_freq
self.assertIsInstance(dtf, str)
self.assertEqual(dtf[0], 'd')
op.set_parameter('macd', data_freq='m')
dtf = op.op_data_freq
self.assertIsInstance(dtf, list)
self.assertEqual(len(dtf), 2)
self.assertEqual(dtf[0], 'd')
self.assertEqual(dtf[1], 'm')
def test_property_bt_price_types(self):
""" test property bt_price_types"""
print('------test property bt_price_tyeps-------')
op = qt.Operator()
self.assertIsInstance(op.bt_price_types, list)
self.assertEqual(len(op.bt_price_types), 0)
self.assertEqual(op.bt_price_types, [])
op = qt.Operator('macd, dma, trix')
btp = op.bt_price_types
self.assertIsInstance(btp, list)
self.assertEqual(btp[0], 'close')
op.set_parameter('macd', price_type='open')
btp = op.bt_price_types
btpc = op.bt_price_type_count
print(f'price_types are \n{btp}')
self.assertIsInstance(btp, list)
self.assertEqual(len(btp), 2)
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
op.add_strategies(['dma', 'macd'])
op.set_parameter('dma_1', price_type='high')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'high')
self.assertEqual(btp[2], 'open')
self.assertEqual(btpc, 3)
op.remove_strategy('dma_1')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
op.remove_strategy('macd_1')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
def test_property_op_data_type_list(self):
""" test property op_data_type_list"""
op = qt.Operator()
self.assertIsInstance(op.op_data_type_list, list)
self.assertEqual(len(op.op_data_type_list), 0)
self.assertEqual(op.op_data_type_list, [])
op = qt.Operator('macd, dma, trix, cdl')
ohd = op.op_data_type_list
print(f'ohd is {ohd}')
self.assertIsInstance(ohd, list)
self.assertEqual(ohd[0], ['close'])
op.set_parameter('macd', data_types='open, close')
ohd = op.op_data_type_list
print(f'ohd is {ohd}')
self.assertIsInstance(ohd, list)
self.assertEqual(len(ohd), 4)
self.assertEqual(ohd[0], ['open', 'close'])
self.assertEqual(ohd[1], ['close'])
self.assertEqual(ohd[2], ['close'])
self.assertEqual(ohd[3], ['open', 'high', 'low', 'close'])
def test_property_op_history_data(self):
""" Test this important function to get operation history data that shall be used in
signal generation
these data are stored in list of nd-arrays, each ndarray represents the data
that is needed for each and every strategy
"""
print(f'------- Test getting operation history data ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.op_history_data, {})
self.assertEqual(op.signal_type, 'pt')
def test_property_opt_space_par(self):
""" test property opt_space_par"""
print(f'-----test property opt_space_par--------:\n')
op = qt.Operator()
self.assertIsInstance(op.opt_space_par, tuple)
self.assertIsInstance(op.opt_space_par[0], list)
self.assertIsInstance(op.opt_space_par[1], list)
self.assertEqual(len(op.opt_space_par), 2)
self.assertEqual(op.opt_space_par, ([], []))
op = qt.Operator('macd, dma, trix, cdl')
osp = op.opt_space_par
print(f'before setting opt_tags opt_space_par is empty:\n'
f'osp is {osp}\n')
self.assertIsInstance(osp, tuple)
self.assertEqual(osp[0], [])
self.assertEqual(osp[1], [])
op.set_parameter('macd', opt_tag=1)
op.set_parameter('dma', opt_tag=1)
osp = op.opt_space_par
print(f'after setting opt_tags opt_space_par is not empty:\n'
f'osp is {osp}\n')
self.assertIsInstance(osp, tuple)
self.assertEqual(len(osp), 2)
self.assertIsInstance(osp[0], list)
self.assertIsInstance(osp[1], list)
self.assertEqual(len(osp[0]), 6)
self.assertEqual(len(osp[1]), 6)
self.assertEqual(osp[0], [(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
self.assertEqual(osp[1], ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
def test_property_opt_types(self):
""" test property opt_tags"""
print(f'-----test property opt_tags--------:\n')
op = qt.Operator()
self.assertIsInstance(op.opt_tags, list)
self.assertEqual(len(op.opt_tags), 0)
self.assertEqual(op.opt_tags, [])
op = qt.Operator('macd, dma, trix, cdl')
otp = op.opt_tags
print(f'before setting opt_tags opt_space_par is empty:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, list)
self.assertEqual(otp, [0, 0, 0, 0])
op.set_parameter('macd', opt_tag=1)
op.set_parameter('dma', opt_tag=1)
otp = op.opt_tags
print(f'after setting opt_tags opt_space_par is not empty:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, list)
self.assertEqual(len(otp), 4)
self.assertEqual(otp, [1, 1, 0, 0])
def test_property_max_window_length(self):
""" test property max_window_length"""
print(f'-----test property max window length--------:\n')
op = qt.Operator()
self.assertIsInstance(op.max_window_length, int)
self.assertEqual(op.max_window_length, 0)
op = qt.Operator('macd, dma, trix, cdl')
mwl = op.max_window_length
print(f'before setting window_length the value is 270:\n'
f'mwl is {mwl}\n')
self.assertIsInstance(mwl, int)
self.assertEqual(mwl, 270)
op.set_parameter('macd', window_length=300)
op.set_parameter('dma', window_length=350)
mwl = op.max_window_length
print(f'after setting window_length the value is new set value:\n'
f'mwl is {mwl}\n')
self.assertIsInstance(mwl, int)
self.assertEqual(mwl, 350)
def test_property_bt_price_type_count(self):
""" test property bt_price_type_count"""
print(f'-----test property bt_price_type_count--------:\n')
op = qt.Operator()
self.assertIsInstance(op.bt_price_type_count, int)
self.assertEqual(op.bt_price_type_count, 0)
op = qt.Operator('macd, dma, trix, cdl')
otp = op.bt_price_type_count
print(f'before setting price_type the price count is 1:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, int)
self.assertEqual(otp, 1)
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='open')
otp = op.bt_price_type_count
print(f'after setting price_type the price type count is 2:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, int)
self.assertEqual(otp, 2)
def test_property_set(self):
""" test all property setters:
setting following properties:
- strategy_blenders
- signal_type
other properties can not be set"""
print(f'------- Test setting properties ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'pt')
op.strategy_blenders = '1 + 2'
op.signal_type = 'proportion signal'
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator('macd, dma, trix, cdl')
# TODO: 修改set_parameter(),使下面的用法成立
# a_to_sell.set_parameter('dma, cdl', price_type='open')
op.set_parameter('dma', price_type='open')
op.set_parameter('cdl', price_type='open')
sb = op.strategy_blenders
st = op.signal_type
self.assertIsInstance(sb, dict)
print(f'before setting: strategy_blenders={sb}')
self.assertEqual(sb, {})
op.strategy_blenders = '1+2 * 3'
sb = op.strategy_blenders
print(f'after setting strategy_blender={sb}')
self.assertEqual(sb, {'close': ['+', '*', '3', '2', '1'],
'open': ['+', '*', '3', '2', '1']})
op.strategy_blenders = ['1+2', '3-4']
sb = op.strategy_blenders
print(f'after setting strategy_blender={sb}')
self.assertEqual(sb, {'close': ['+', '2', '1'],
'open': ['-', '4', '3']})
def test_operator_ready(self):
"""test the method ready of Operator"""
op = qt.Operator()
print(f'operator is ready? "{op.ready}"')
def test_operator_add_strategy(self):
"""test adding strategies to Operator"""
op = qt.Operator('dma, all, urgent')
self.assertIsInstance(op, qt.Operator)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[1], qt.SelectingAll)
self.assertIsInstance(op.strategies[2], qt.RiconUrgent)
self.assertIsInstance(op[0], qt.TimingDMA)
self.assertIsInstance(op[1], qt.SelectingAll)
self.assertIsInstance(op[2], qt.RiconUrgent)
self.assertIsInstance(op['dma'], qt.TimingDMA)
self.assertIsInstance(op['all'], qt.SelectingAll)
self.assertIsInstance(op['urgent'], qt.RiconUrgent)
self.assertEqual(op.strategy_count, 3)
print(f'test adding strategies into existing op')
print('test adding strategy by string')
op.add_strategy('macd')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[3], qt.TimingMACD)
self.assertEqual(op.strategy_count, 4)
op.add_strategy('random')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[4], qt.SelectingRandom)
self.assertEqual(op.strategy_count, 5)
test_ls = TestLSStrategy()
op.add_strategy(test_ls)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[5], TestLSStrategy)
self.assertEqual(op.strategy_count, 6)
print(f'Test different instance of objects are added to operator')
op.add_strategy('dma')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[6], qt.TimingDMA)
self.assertIsNot(op.strategies[0], op.strategies[6])
def test_operator_add_strategies(self):
""" etst adding multiple strategies to Operator"""
op = qt.Operator('dma, all, urgent')
self.assertEqual(op.strategy_count, 3)
print('test adding multiple strategies -- adding strategy by list of strings')
op.add_strategies(['dma', 'macd'])
self.assertEqual(op.strategy_count, 5)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[3], qt.TimingDMA)
self.assertIsInstance(op.strategies[4], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by comma separated strings')
op.add_strategies('dma, macd')
self.assertEqual(op.strategy_count, 7)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[5], qt.TimingDMA)
self.assertIsInstance(op.strategies[6], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by list of strategies')
op.add_strategies([qt.TimingDMA(), qt.TimingMACD()])
self.assertEqual(op.strategy_count, 9)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[7], qt.TimingDMA)
self.assertIsInstance(op.strategies[8], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by list of strategy and str')
op.add_strategies(['DMA', qt.TimingMACD()])
self.assertEqual(op.strategy_count, 11)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[9], qt.TimingDMA)
self.assertIsInstance(op.strategies[10], qt.TimingMACD)
self.assertIsNot(op.strategies[0], op.strategies[9])
self.assertIs(type(op.strategies[0]), type(op.strategies[9]))
print('test adding fault data')
self.assertRaises(AssertionError, op.add_strategies, 123)
self.assertRaises(AssertionError, op.add_strategies, None)
def test_opeartor_remove_strategy(self):
""" test method remove strategy"""
op = qt.Operator('dma, all, urgent')
op.add_strategies(['dma', 'macd'])
op.add_strategies(['DMA', TestLSStrategy()])
self.assertEqual(op.strategy_count, 7)
print('test removing strategies from Operator')
op.remove_strategy('dma')
self.assertEqual(op.strategy_count, 6)
self.assertEqual(op.strategy_ids, ['all', 'urgent', 'dma_1', 'macd', 'dma_2', 'custom'])
self.assertEqual(op.strategies[0], op['all'])
self.assertEqual(op.strategies[1], op['urgent'])
self.assertEqual(op.strategies[2], op['dma_1'])
self.assertEqual(op.strategies[3], op['macd'])
self.assertEqual(op.strategies[4], op['dma_2'])
self.assertEqual(op.strategies[5], op['custom'])
op.remove_strategy('dma_1')
self.assertEqual(op.strategy_count, 5)
self.assertEqual(op.strategy_ids, ['all', 'urgent', 'macd', 'dma_2', 'custom'])
self.assertEqual(op.strategies[0], op['all'])
self.assertEqual(op.strategies[1], op['urgent'])
self.assertEqual(op.strategies[2], op['macd'])
self.assertEqual(op.strategies[3], op['dma_2'])
self.assertEqual(op.strategies[4], op['custom'])
def test_opeartor_clear_strategies(self):
""" test operator clear strategies"""
op = qt.Operator('dma, all, urgent')
op.add_strategies(['dma', 'macd'])
op.add_strategies(['DMA', TestLSStrategy()])
self.assertEqual(op.strategy_count, 7)
print('test removing strategies from Operator')
op.clear_strategies()
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op.add_strategy('dma', pars=(12, 123, 25))
self.assertEqual(op.strategy_count, 1)
self.assertEqual(op.strategy_ids, ['dma'])
self.assertEqual(type(op.strategies[0]), TimingDMA)
self.assertEqual(op.strategies[0].pars, (12, 123, 25))
op.clear_strategies()
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
def test_operator_prepare_data(self):
"""test processes that related to prepare data"""
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(strategies=[test_ls, test_sel, test_sig])
too_early_cash = qt.CashPlan(dates='2016-01-01', amounts=10000)
early_cash = qt.CashPlan(dates='2016-07-01', amounts=10000)
on_spot_cash = qt.CashPlan(dates='2016-07-08', amounts=10000)
no_trade_cash = qt.CashPlan(dates='2016-07-08, 2016-07-30, 2016-08-11, 2016-09-03',
amounts=[10000, 10000, 10000, 10000])
# 在所有策略的参数都设置好之前调用prepare_data会发生assertion Error
self.assertRaises(AssertionError,
self.op.prepare_data,
hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
late_cash = qt.CashPlan(dates='2016-12-31', amounts=10000)
multi_cash = qt.CashPlan(dates='2016-07-08, 2016-08-08', amounts=[10000, 10000])
self.op.set_parameter(stg_id='custom',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.assertEqual(self.op.strategies[0].pars, {'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='custom_1',
pars=())
self.assertEqual(self.op.strategies[1].pars, ()),
self.op.set_parameter(stg_id='custom_2',
pars=(0.2, 0.02, -0.02))
self.assertEqual(self.op.strategies[2].pars, (0.2, 0.02, -0.02)),
self.op.prepare_data(hist_data=self.hp1,
cash_plan=on_spot_cash)
self.assertIsInstance(self.op._op_history_data, dict)
self.assertEqual(len(self.op._op_history_data), 3)
# test if automatic strategy blenders are set
self.assertEqual(self.op.strategy_blenders,
{'close': ['+', '2', '+', '1', '0']})
tim_hist_data = self.op._op_history_data['custom']
sel_hist_data = self.op._op_history_data['custom_1']
ric_hist_data = self.op._op_history_data['custom_2']
print(f'in test_prepare_data in TestOperator:')
print('selecting history data:\n', sel_hist_data)
print('originally passed data in correct sequence:\n', self.test_data_3D[:, 3:, [2, 3, 0]])
print('difference is \n', sel_hist_data - self.test_data_3D[:, :, [2, 3, 0]])
self.assertTrue(np.allclose(sel_hist_data, self.test_data_3D[:, :, [2, 3, 0]], equal_nan=True))
self.assertTrue(np.allclose(tim_hist_data, self.test_data_3D, equal_nan=True))
self.assertTrue(np.allclose(ric_hist_data, self.test_data_3D[:, 3:, :], equal_nan=True))
# raises Value Error if empty history panel is given
empty_hp = qt.HistoryPanel()
correct_hp = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 4)),
columns=self.types,
levels=self.shares,
rows=self.date_indices)
too_many_shares = qt.HistoryPanel(values=np.random.randint(10, size=(5, 50, 4)))
too_many_types = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 5)))
# raises Error when history panel is empty
self.assertRaises(ValueError,
self.op.prepare_data,
empty_hp,
on_spot_cash)
# raises Error when first investment date is too early
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
early_cash)
# raises Error when last investment date is too late
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
late_cash)
# raises Error when some of the investment dates are on no-trade-days
self.assertRaises(ValueError,
self.op.prepare_data,
correct_hp,
no_trade_cash)
# raises Error when number of shares in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_shares,
on_spot_cash)
# raises Error when too early cash investment date
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
too_early_cash)
# raises Error when number of d_types in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_types,
on_spot_cash)
# test the effect of data type sequence in strategy definition
def test_operator_generate(self):
""" Test signal generation process of operator objects
:return:
"""
# 使用test模块的自定义策略生成三种交易策略
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sel2 = TestSelStrategyDiffTime()
test_sig = TestSigStrategy()
print('--Test PT type signal generation--')
# 测试PT类型的信号生成:
# 创建一个Operator对象,信号类型为PT(比例目标信号)
# 这个Operator对象包含两个策略,分别为LS-Strategy以及Sel-Strategy,代表择时和选股策略
# 两个策略分别生成PT信号后混合成一个信号输出
self.op = qt.Operator(strategies=[test_ls, test_sel])
self.op.set_parameter(stg_id='custom',
pars={'000010': (5, 10.),
'000030': (5, 10.),
'000039': (5, 6.)})
self.op.set_parameter(stg_id=1,
pars=())
# self.a_to_sell.set_blender(blender='0+1+2')
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
print('--test operator information in normal mode--')
self.op.info()
self.assertEqual(self.op.strategy_blenders,
{'close': ['+', '1', '0']})
self.op.set_blender(None, '0*1')
self.assertEqual(self.op.strategy_blenders,
{'close': ['*', '1', '0']})
print('--test operation signal created in Proportional Target (PT) Mode--')
op_list = self.op.create_signal(hist_data=self.hp1)
self.assertTrue(isinstance(op_list, HistoryPanel))
backtest_price_types = op_list.htypes
self.assertEqual(backtest_price_types[0], 'close')
self.assertEqual(op_list.shape, (3, 45, 1))
reduced_op_list = op_list.values.squeeze().T
print(f'op_list created, it is a 3 share/45 days/1 htype array, to make comparison happen, \n'
f'it will be squeezed to a 2-d array to compare on share-wise:\n'
f'{reduced_op_list}')
target_op_values = np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0]])
self.assertTrue(np.allclose(target_op_values, reduced_op_list, equal_nan=True))
print('--Test two separate signal generation for different price types--')
# 测试两组PT类型的信号生成:
# 在Operator对象中增加两个SigStrategy策略,策略类型相同但是策略的参数不同,回测价格类型为"OPEN"
# Opeartor应该生成两组交易信号,分别用于"close"和"open"两中不同的价格类型
# 这里需要重新生成两个新的交易策略对象,否则在op的strategies列表中产生重复的对象引用,从而引起错误
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
self.op.add_strategies([test_ls, test_sel])
self.op.set_parameter(stg_id='custom_2',
price_type='open')
self.op.set_parameter(stg_id='custom_3',
price_type='open')
self.assertEqual(self.op['custom'].price_type, 'close')
self.assertEqual(self.op['custom_2'].price_type, 'open')
self.op.set_parameter(stg_id='custom_2',
pars={'000010': (5, 10.),
'000030': (5, 10.),
'000039': (5, 6.)})
self.op.set_parameter(stg_id='custom_3',
pars=())
self.op.set_blender(blender='0 or 1', price_type='open')
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
print('--test how operator information is printed out--')
self.op.info()
self.assertEqual(self.op.strategy_blenders,
{'close': ['*', '1', '0'],
'open': ['or', '1', '0']})
print('--test opeartion signal created in Proportional Target (PT) Mode--')
op_list = self.op.create_signal(hist_data=self.hp1)
self.assertTrue(isinstance(op_list, HistoryPanel))
signal_close = op_list['close'].squeeze().T
signal_open = op_list['open'].squeeze().T
self.assertEqual(signal_close.shape, (45, 3))
self.assertEqual(signal_open.shape, (45, 3))
target_op_close = np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0]])
target_op_open = np.array([[0.5, 0.5, 1.0],
[0.5, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 0.5, 0.0],
[1.0, 0.5, 0.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.5, 1.0, 0.0],
[0.5, 1.0, 0.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0]])
signal_pairs = [[list(sig1), list(sig2), sig1 == sig2]
for sig1, sig2
in zip(list(target_op_close), list(signal_close))]
print(f'signals side by side:\n'
f'{signal_pairs}')
self.assertTrue(np.allclose(target_op_close, signal_close, equal_nan=True))
signal_pairs = [[list(sig1), list(sig2), sig1 == sig2]
for sig1, sig2
in zip(list(target_op_open), list(signal_open))]
print(f'signals side by side:\n'
f'{signal_pairs}')
self.assertTrue(np.allclose(target_op_open, signal_open, equal_nan=True))
print('--Test two separate signal generation for different price types--')
# 更多测试集合
def test_stg_parameter_setting(self):
""" test setting parameters of strategies
test the method set_parameters
:return:
"""
op = qt.Operator(strategies='dma, all, urgent')
print(op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy in new op: \n{op.strategies[0].info()}')
# TODO: allow set_parameters to a list of strategies or str-listed strategies
# TODO: allow set_parameters to all strategies of specific bt price type
print(f'Set up strategy parameters by strategy id')
op.set_parameter('dma',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
op.set_parameter('all',
window_length=20)
op.set_parameter('all', price_type='high')
print(f'Can also set up strategy parameters by strategy index')
op.set_parameter(2, price_type='open')
op.set_parameter(2,
opt_tag=1,
pars=(9, -0.09),
window_length=10)
self.assertEqual(op.strategies[0].pars, (5, 10, 5))
self.assertEqual(op.strategies[0].par_boes, ((5, 10), (5, 15), (10, 15)))
self.assertEqual(op.strategies[2].pars, (9, -0.09))
self.assertEqual(op.op_data_freq, 'd')
self.assertEqual(op.op_data_types, ['close', 'high', 'open'])
self.assertEqual(op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (1, 40), (-0.5, 0.5)],
['discr', 'discr', 'discr', 'discr', 'conti']))
self.assertEqual(op.max_window_length, 20)
print(f'KeyError will be raised if wrong strategy id is given')
self.assertRaises(KeyError, op.set_parameter, stg_id='t-1', pars=(1, 2))
self.assertRaises(KeyError, op.set_parameter, stg_id='wrong_input', pars=(1, 2))
print(f'ValueError will be raised if parameter can be set')
self.assertRaises(ValueError, op.set_parameter, stg_id=0, pars=('wrong input', 'wrong input'))
# test blenders of different price types
# test setting blenders to different price types
# TODO: to allow operands like "and", "or", "not", "xor"
# a_to_sell.set_blender('close', '0 and 1 or 2')
# self.assertEqual(a_to_sell.get_blender('close'), 'str-1.2')
self.assertEqual(op.bt_price_types, ['close', 'high', 'open'])
op.set_blender('open', '0 & 1 | 2')
self.assertEqual(op.get_blender('open'), ['|', '2', '&', '1', '0'])
op.set_blender('high', '(0|1) & 2')
self.assertEqual(op.get_blender('high'), ['&', '2', '|', '1', '0'])
op.set_blender('close', '0 & 1 | 2')
self.assertEqual(op.get_blender(), {'close': ['|', '2', '&', '1', '0'],
'high': ['&', '2', '|', '1', '0'],
'open': ['|', '2', '&', '1', '0']})
self.assertEqual(op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (1, 40), (-0.5, 0.5)],
['discr', 'discr', 'discr', 'discr', 'conti']))
self.assertEqual(op.opt_tags, [1, 0, 1])
def test_signal_blend(self):
self.assertEqual(blender_parser('0 & 1'), ['&', '1', '0'])
self.assertEqual(blender_parser('0 or 1'), ['or', '1', '0'])
self.assertEqual(blender_parser('0 & 1 | 2'), ['|', '2', '&', '1', '0'])
blender = blender_parser('0 & 1 | 2')
self.assertEqual(signal_blend([1, 1, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 1, 0], blender), 1)
self.assertEqual(signal_blend([0, 1, 1], blender), 1)
self.assertEqual(signal_blend([0, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 0], blender), 0)
self.assertEqual(signal_blend([0, 1, 0], blender), 0)
self.assertEqual(signal_blend([0, 0, 0], blender), 0)
# parse: '0 & ( 1 | 2 )'
self.assertEqual(blender_parser('0 & ( 1 | 2 )'), ['&', '|', '2', '1', '0'])
blender = blender_parser('0 & ( 1 | 2 )')
self.assertEqual(signal_blend([1, 1, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 1, 0], blender), 1)
self.assertEqual(signal_blend([0, 1, 1], blender), 0)
self.assertEqual(signal_blend([0, 0, 1], blender), 0)
self.assertEqual(signal_blend([1, 0, 0], blender), 0)
self.assertEqual(signal_blend([0, 1, 0], blender), 0)
self.assertEqual(signal_blend([0, 0, 0], blender), 0)
# parse: '(1-2)/3 + 0'
self.assertEqual(blender_parser('(1-2)/3 + 0'), ['+', '0', '/', '3', '-', '2', '1'])
blender = blender_parser('(1-2)/3 + 0')
self.assertEqual(signal_blend([5, 9, 1, 4], blender), 7)
# pars: '(0*1/2*(3+4))+5*(6+7)-8'
self.assertEqual(blender_parser('(0*1/2*(3+4))+5*(6+7)-8'), ['-', '8', '+', '*', '+', '7', '6', '5', '*',
'+', '4', '3', '/', '2', '*', '1', '0'])
blender = blender_parser('(0*1/2*(3+4))+5*(6+7)-8')
self.assertEqual(signal_blend([1, 1, 1, 1, 1, 1, 1, 1, 1], blender), 3)
self.assertEqual(signal_blend([2, 1, 4, 3, 5, 5, 2, 2, 10], blender), 14)
# parse: '0/max(2,1,3 + 5)+4'
self.assertEqual(blender_parser('0/max(2,1,3 + 5)+4'), ['+', '4', '/', 'max(3)', '+', '5', '3', '1', '2', '0'])
blender = blender_parser('0/max(2,1,3 + 5)+4')
self.assertEqual(signal_blend([8.0, 4, 3, 5.0, 0.125, 5], blender), 0.925)
self.assertEqual(signal_blend([2, 1, 4, 3, 5, 5, 2, 2, 10], blender), 5.25)
print('speed test')
import time
st = time.time()
blender = blender_parser('0+max(1,2,(3+4)*5, max(6, (7+8)*9), 10-11) * (12+13)')
res = []
for i in range(10000):
res = signal_blend([1, 1, 2, 3, 4, 5, 3, 4, 5, 6, 7, 8, 2, 3], blender)
et = time.time()
print(f'total time for RPN processing: {et - st}, got result: {res}')
blender = blender_parser("0 + 1 * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 7)
blender = blender_parser("(0 + 1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
blender = blender_parser("(0+1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
blender = blender_parser("(0 + 1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
# TODO: 目前对于-(1+2)这样的表达式还无法处理
# self.a_to_sell.set_blender('selecting', "-(0 + 1) * 2")
# self.assertEqual(self.a_to_sell.signal_blend([1, 2, 3]), -9)
blender = blender_parser("(0-1)/2 + 3")
print(f'RPN of notation: "(0-1)/2 + 3" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, 2, 3, 0.0], blender), -0.33333333)
blender = blender_parser("0 + 1 / 2")
print(f'RPN of notation: "0 + 1 / 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, math.pi, 4], blender), 1.78539816)
blender = blender_parser("(0 + 1) / 2")
print(f'RPN of notation: "(0 + 1) / 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(signal_blend([1, 2, 3], blender), 1)
blender = blender_parser("(0 + 1 * 2) / 3")
print(f'RPN of notation: "(0 + 1 * 2) / 3" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([3, math.e, 10, 10], blender), 3.0182818284590454)
blender = blender_parser("0 / 1 * 2")
print(f'RPN of notation: "0 / 1 * 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(signal_blend([1, 3, 6], blender), 2)
blender = blender_parser("(0 - 1 + 2) * 4")
print(f'RPN of notation: "(0 - 1 + 2) * 4" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, 1, -1, np.nan, math.pi], blender), -3.141592653589793)
blender = blender_parser("0 * 1")
print(f'RPN of notation: "0 * 1" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([math.pi, math.e], blender), 8.539734222673566)
blender = blender_parser('abs(3-sqrt(2) / cos(1))')
print(f'RPN of notation: "abs(3-sqrt(2) / cos(1))" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['abs(1)', '-', '/', 'cos(1)', '1', 'sqrt(1)', '2', '3'])
blender = blender_parser('0/max(2,1,3 + 5)+4')
print(f'RPN of notation: "0/max(2,1,3 + 5)+4" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '4', '/', 'max(3)', '+', '5', '3', '1', '2', '0'])
blender = blender_parser('1 + sum(1,2,3+3, sum(1, 2) + 3) *5')
print(f'RPN of notation: "1 + sum(1,2,3+3, sum(1, 2) + 3) *5" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '*', '5', 'sum(4)', '+', '3', 'sum(2)', '2', '1',
'+', '3', '3', '2', '1', '1'])
blender = blender_parser('1+sum(1,2,(3+5)*4, sum(3, (4+5)*6), 7-8) * (2+3)')
print(f'RPN of notation: "1+sum(1,2,(3+5)*4, sum(3, (4+5)*6), 7-8) * (2+3)" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '*', '+', '3', '2', 'sum(5)', '-', '8', '7',
'sum(2)', '*', '6', '+', '5', '4', '3', '*', '4',
'+', '5', '3', '2', '1', '1'])
# TODO: ndarray type of signals to be tested:
def test_set_opt_par(self):
""" test setting opt pars in batch"""
print(f'--------- Testing setting Opt Pars: set_opt_par -------')
op = qt.Operator('dma, random, crossline')
op.set_parameter('dma',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.strategies[0].pars, (5, 10, 5))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (35, 120, 10, 'buy'))
self.assertEqual(op.opt_tags, [1, 0, 0])
op.set_opt_par((5, 12, 9))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (35, 120, 10, 'buy'))
op.set_parameter('crossline',
pars=(5, 10, 5, 'sell'),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15), ('buy', 'sell', 'none')),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.opt_tags, [1, 0, 1])
op.set_opt_par((5, 12, 9, 8, 26, 9, 'buy'))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
op.set_opt_par((9, 200, 155, 8, 26, 9, 'buy', 5, 12, 9))
self.assertEqual(op.strategies[0].pars, (9, 200, 155))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
# test set_opt_par when opt_tag is set to be 2 (enumerate type of parameters)
op.set_parameter('crossline',
pars=(5, 10, 5, 'sell'),
opt_tag=2,
par_boes=((5, 10), (5, 15), (10, 15), ('buy', 'sell', 'none')),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.opt_tags, [1, 0, 2])
self.assertEqual(op.strategies[0].pars, (9, 200, 155))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (5, 10, 5, 'sell'))
op.set_opt_par((5, 12, 9, (8, 26, 9, 'buy')))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
# Test Errors
# Not enough values for parameter
op.set_parameter('crossline', opt_tag=1)
self.assertRaises(ValueError, op.set_opt_par, (5, 12, 9, 8))
# wrong type of input
self.assertRaises(AssertionError, op.set_opt_par, [5, 12, 9, 7, 15, 12, 'sell'])
def test_stg_attribute_get_and_set(self):
self.stg = qt.TimingCrossline()
self.stg_type = 'R-TIMING'
self.stg_name = "CROSSLINE"
self.stg_text = 'Moving average crossline strategy, determine long/short position according to the cross ' \
'point' \
' of long and short term moving average prices '
self.pars = (35, 120, 10, 'buy')
self.par_boes = [(10, 250), (10, 250), (1, 100), ('buy', 'sell', 'none')]
self.par_count = 4
self.par_types = ['discr', 'discr', 'conti', 'enum']
self.opt_tag = 0
self.data_types = ['close']
self.data_freq = 'd'
self.sample_freq = 'd'
self.window_length = 270
self.assertEqual(self.stg.stg_type, self.stg_type)
self.assertEqual(self.stg.stg_name, self.stg_name)
self.assertEqual(self.stg.stg_text, self.stg_text)
self.assertEqual(self.stg.pars, self.pars)
self.assertEqual(self.stg.par_types, self.par_types)
self.assertEqual(self.stg.par_boes, self.par_boes)
self.assertEqual(self.stg.par_count, self.par_count)
self.assertEqual(self.stg.opt_tag, self.opt_tag)
self.assertEqual(self.stg.data_freq, self.data_freq)
self.assertEqual(self.stg.sample_freq, self.sample_freq)
self.assertEqual(self.stg.data_types, self.data_types)
self.assertEqual(self.stg.window_length, self.window_length)
self.stg.stg_name = 'NEW NAME'
self.stg.stg_text = 'NEW TEXT'
self.assertEqual(self.stg.stg_name, 'NEW NAME')
self.assertEqual(self.stg.stg_text, 'NEW TEXT')
self.stg.pars = (1, 2, 3, 4)
self.assertEqual(self.stg.pars, (1, 2, 3, 4))
self.stg.par_count = 3
self.assertEqual(self.stg.par_count, 3)
self.stg.par_boes = [(1, 10), (1, 10), (1, 10), (1, 10)]
self.assertEqual(self.stg.par_boes, [(1, 10), (1, 10), (1, 10), (1, 10)])
self.stg.par_types = ['conti', 'conti', 'discr', 'enum']
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'enum'])
self.stg.par_types = 'conti, conti, discr, conti'
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'conti'])
self.stg.data_types = 'close, open'
self.assertEqual(self.stg.data_types, ['close', 'open'])
self.stg.data_types = ['close', 'high', 'low']
self.assertEqual(self.stg.data_types, ['close', 'high', 'low'])
self.stg.data_freq = 'w'
self.assertEqual(self.stg.data_freq, 'w')
self.stg.window_length = 300
self.assertEqual(self.stg.window_length, 300)
def test_rolling_timing(self):
stg = TestLSStrategy()
stg_pars = {'000100': (5, 10),
'000200': (5, 10),
'000300': (5, 6)}
stg.set_pars(stg_pars)
history_data = self.hp1.values
output = stg.generate(hist_data=history_data)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
lsmask = np.array([[0., 0., 1.],
[0., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 0.],
[1., 1., 0.],
[1., 1., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.]])
# TODO: Issue to be solved: the np.nan value are converted to 0 in the lsmask,这样做可能会有意想不到的后果
# TODO: 需要解决nan值的问题
self.assertEqual(output.shape, lsmask.shape)
self.assertTrue(np.allclose(output, lsmask, equal_nan=True))
def test_sel_timing(self):
stg = TestSelStrategy()
stg_pars = ()
stg.set_pars(stg_pars)
history_data = self.hp1['high, low, close', :, :]
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
def test_simple_timing(self):
stg = TestSigStrategy()
stg_pars = (0.2, 0.02, -0.02)
stg.set_pars(stg_pars)
history_data = self.hp1['close, open, high, low', :, 3:50]
output = stg.generate(hist_data=history_data, shares=self.shares, dates=self.date_indices)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
sigmatrix = np.array([[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0]])
side_by_side_array = np.array([[i, out_line, sig_line]
for
i, out_line, sig_line
in zip(range(len(output)), output, sigmatrix)])
print(f'output and signal matrix lined up side by side is \n'
f'{side_by_side_array}')
self.assertEqual(sigmatrix.shape, output.shape)
self.assertTrue(np.allclose(output, sigmatrix))
def test_sel_finance(self):
"""Test selecting_finance strategy, test all built-in strategy parameters"""
stg = SelectingFinanceIndicator()
stg_pars = (False, 'even', 'greater', 0, 0, 0.67)
stg.set_pars(stg_pars)
stg.window_length = 5
stg.data_freq = 'd'
stg.sample_freq = '10d'
stg.sort_ascending = False
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg._poq = 0.67
history_data = self.hp2.values
print(f'Start to test financial selection parameter {stg_pars}')
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get mininum factor
stg_pars = (True, 'even', 'less', 1, 1, 0.67)
stg.sort_ascending = True
stg.condition = 'less'
stg.lbound = 1
stg.ubound = 1
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'linear', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'linear'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'proportion', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'proportion'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
# test single factor, get max factor in linear weight, threshold 0.2
stg_pars = (False, 'even', 'greater', 0.2, 0.2, 0.67)
stg.sort_ascending = False
stg.weighting = 'even'
stg.condition = 'greater'
stg.lbound = 0.2
stg.ubound = 0.2
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
def test_tokenizer(self):
self.assertListEqual(_exp_to_token('1+1'),
['1', '+', '1'])
print(_exp_to_token('1+1'))
self.assertListEqual(_exp_to_token('1 & 1'),
['1', '&', '1'])
print(_exp_to_token('1&1'))
self.assertListEqual(_exp_to_token('1 and 1'),
['1', 'and', '1'])
print(_exp_to_token('1 and 1'))
self.assertListEqual(_exp_to_token('1 or 1'),
['1', 'or', '1'])
print(_exp_to_token('1 or 1'))
self.assertListEqual(_exp_to_token('(1 - 1 + -1) * pi'),
['(', '1', '-', '1', '+', '-1', ')', '*', 'pi'])
print(_exp_to_token('(1 - 1 + -1) * pi'))
self.assertListEqual(_exp_to_token('abs(5-sqrt(2) / cos(pi))'),
['abs(', '5', '-', 'sqrt(', '2', ')', '/', 'cos(', 'pi', ')', ')'])
print(_exp_to_token('abs(5-sqrt(2) / cos(pi))'))
self.assertListEqual(_exp_to_token('sin(pi) + 2.14'),
['sin(', 'pi', ')', '+', '2.14'])
print(_exp_to_token('sin(pi) + 2.14'))
self.assertListEqual(_exp_to_token('(1-2)/3.0 + 0.0000'),
['(', '1', '-', '2', ')', '/', '3.0', '+', '0.0000'])
print(_exp_to_token('(1-2)/3.0 + 0.0000'))
self.assertListEqual(_exp_to_token('-(1. + .2) * max(1, 3, 5)'),
['-', '(', '1.', '+', '.2', ')', '*', 'max(', '1', ',', '3', ',', '5', ')'])
print(_exp_to_token('-(1. + .2) * max(1, 3, 5)'))
self.assertListEqual(_exp_to_token('(x + e * 10) / 10'),
['(', 'x', '+', 'e', '*', '10', ')', '/', '10'])
print(_exp_to_token('(x + e * 10) / 10'))
self.assertListEqual(_exp_to_token('8.2/((-.1+abs3(3,4,5))*0.12)'),
['8.2', '/', '(', '(', '-.1', '+', 'abs3(', '3', ',', '4', ',', '5', ')', ')', '*', '0.12',
')'])
print(_exp_to_token('8.2/((-.1+abs3(3,4,5))*0.12)'))
self.assertListEqual(_exp_to_token('8.2/abs3(3,4,25.34 + 5)*0.12'),
['8.2', '/', 'abs3(', '3', ',', '4', ',', '25.34', '+', '5', ')', '*', '0.12'])
print(_exp_to_token('8.2/abs3(3,4,25.34 + 5)*0.12'))
class TestLog(unittest.TestCase):
def test_init(self):
pass
class TestConfig(unittest.TestCase):
"""测试Config对象以及QT_CONFIG变量的设置和获取值"""
def test_init(self):
pass
def test_invest(self):
pass
def test_pars_string_to_type(self):
_parse_string_kwargs('000300', 'asset_pool', _valid_qt_kwargs())
class TestHistoryPanel(unittest.TestCase):
def setUp(self):
print('start testing HistoryPanel object\n')
self.data = np.random.randint(10, size=(5, 10, 4))
self.index = pd.date_range(start='20200101', freq='d', periods=10)
self.index2 = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14']
self.index3 = '2016-07-01, 2016-07-04, 2016-07-05, 2016-07-06, 2016-07-07, ' \
'2016-07-08, 2016-07-11, 2016-07-12, 2016-07-13, 2016-07-14'
self.shares = '000100,000101,000102,000103,000104'
self.htypes = 'close,open,high,low'
self.data2 = np.random.randint(10, size=(10, 5))
self.data3 = np.random.randint(10, size=(10, 4))
self.data4 = np.random.randint(10, size=(10))
self.hp = qt.HistoryPanel(values=self.data, levels=self.shares, columns=self.htypes, rows=self.index)
self.hp2 = qt.HistoryPanel(values=self.data2, levels=self.shares, columns='close', rows=self.index)
self.hp3 = qt.HistoryPanel(values=self.data3, levels='000100', columns=self.htypes, rows=self.index2)
self.hp4 = qt.HistoryPanel(values=self.data4, levels='000100', columns='close', rows=self.index3)
self.hp5 = qt.HistoryPanel(values=self.data)
self.hp6 = qt.HistoryPanel(values=self.data, levels=self.shares, rows=self.index3)
def test_properties(self):
""" test all properties of HistoryPanel
"""
self.assertFalse(self.hp.is_empty)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.shape, (5, 10, 4))
self.assertSequenceEqual(self.hp.htypes, ['close', 'open', 'high', 'low'])
self.assertSequenceEqual(self.hp.shares, ['000100', '000101', '000102', '000103', '000104'])
self.assertSequenceEqual(list(self.hp.hdates), list(self.index))
self.assertDictEqual(self.hp.columns, {'close': 0, 'open': 1, 'high': 2, 'low': 3})
self.assertDictEqual(self.hp.levels, {'000100': 0, '000101': 1, '000102': 2, '000103': 3, '000104': 4})
row_dict = {Timestamp('2020-01-01 00:00:00', freq='D'): 0,
Timestamp('2020-01-02 00:00:00', freq='D'): 1,
Timestamp('2020-01-03 00:00:00', freq='D'): 2,
Timestamp('2020-01-04 00:00:00', freq='D'): 3,
Timestamp('2020-01-05 00:00:00', freq='D'): 4,
Timestamp('2020-01-06 00:00:00', freq='D'): 5,
Timestamp('2020-01-07 00:00:00', freq='D'): 6,
Timestamp('2020-01-08 00:00:00', freq='D'): 7,
Timestamp('2020-01-09 00:00:00', freq='D'): 8,
Timestamp('2020-01-10 00:00:00', freq='D'): 9}
self.assertDictEqual(self.hp.rows, row_dict)
def test_len(self):
""" test the function len(HistoryPanel)
:return:
"""
self.assertEqual(len(self.hp), 10)
def test_empty_history_panel(self):
"""测试空HP或者特殊HP如维度标签为纯数字的HP"""
test_hp = qt.HistoryPanel(self.data)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
self.assertEqual(test_hp.level_count, 5)
self.assertEqual(test_hp.row_count, 10)
self.assertEqual(test_hp.column_count, 4)
self.assertEqual(test_hp.shares, list(range(5)))
self.assertEqual(test_hp.hdates, list(pd.date_range(start='20200730', periods=10, freq='d')))
self.assertEqual(test_hp.htypes, list(range(4)))
self.assertTrue(np.allclose(test_hp.values, self.data))
print(f'shares: {test_hp.shares}\nhtypes: {test_hp.htypes}')
print(test_hp)
# HistoryPanel should be empty if no value is given
empty_hp = qt.HistoryPanel()
self.assertTrue(empty_hp.is_empty)
self.assertIsInstance(empty_hp, qt.HistoryPanel)
self.assertEqual(empty_hp.shape[0], 0)
self.assertEqual(empty_hp.shape[1], 0)
self.assertEqual(empty_hp.shape[2], 0)
self.assertEqual(empty_hp.level_count, 0)
self.assertEqual(empty_hp.row_count, 0)
self.assertEqual(empty_hp.column_count, 0)
# HistoryPanel should also be empty if empty value (np.array([])) is given
empty_hp = qt.HistoryPanel(np.empty((5, 0, 4)), levels=self.shares, columns=self.htypes)
self.assertTrue(empty_hp.is_empty)
self.assertIsInstance(empty_hp, qt.HistoryPanel)
self.assertEqual(empty_hp.shape[0], 0)
self.assertEqual(empty_hp.shape[1], 0)
self.assertEqual(empty_hp.shape[2], 0)
self.assertEqual(empty_hp.level_count, 0)
self.assertEqual(empty_hp.row_count, 0)
self.assertEqual(empty_hp.column_count, 0)
def test_create_history_panel(self):
""" test the creation of a HistoryPanel object by passing all data explicitly
"""
self.assertIsInstance(self.hp, qt.HistoryPanel)
self.assertEqual(self.hp.shape[0], 5)
self.assertEqual(self.hp.shape[1], 10)
self.assertEqual(self.hp.shape[2], 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(list(self.hp.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp2, qt.HistoryPanel)
self.assertEqual(self.hp2.shape[0], 5)
self.assertEqual(self.hp2.shape[1], 10)
self.assertEqual(self.hp2.shape[2], 1)
self.assertEqual(self.hp2.level_count, 5)
self.assertEqual(self.hp2.row_count, 10)
self.assertEqual(self.hp2.column_count, 1)
self.assertEqual(list(self.hp2.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp2.columns.keys()), ['close'])
self.assertEqual(list(self.hp2.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp3, qt.HistoryPanel)
self.assertEqual(self.hp3.shape[0], 1)
self.assertEqual(self.hp3.shape[1], 10)
self.assertEqual(self.hp3.shape[2], 4)
self.assertEqual(self.hp3.level_count, 1)
self.assertEqual(self.hp3.row_count, 10)
self.assertEqual(self.hp3.column_count, 4)
self.assertEqual(list(self.hp3.levels.keys()), ['000100'])
self.assertEqual(list(self.hp3.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp3.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.assertIsInstance(self.hp4, qt.HistoryPanel)
self.assertEqual(self.hp4.shape[0], 1)
self.assertEqual(self.hp4.shape[1], 10)
self.assertEqual(self.hp4.shape[2], 1)
self.assertEqual(self.hp4.level_count, 1)
self.assertEqual(self.hp4.row_count, 10)
self.assertEqual(self.hp4.column_count, 1)
self.assertEqual(list(self.hp4.levels.keys()), ['000100'])
self.assertEqual(list(self.hp4.columns.keys()), ['close'])
self.assertEqual(list(self.hp4.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.hp5.info()
self.assertIsInstance(self.hp5, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp5.values, self.data))
self.assertEqual(self.hp5.shape[0], 5)
self.assertEqual(self.hp5.shape[1], 10)
self.assertEqual(self.hp5.shape[2], 4)
self.assertEqual(self.hp5.level_count, 5)
self.assertEqual(self.hp5.row_count, 10)
self.assertEqual(self.hp5.column_count, 4)
self.assertEqual(list(self.hp5.levels.keys()), [0, 1, 2, 3, 4])
self.assertEqual(list(self.hp5.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp5.rows.keys())[0], pd.Timestamp('2020-07-30'))
self.hp6.info()
self.assertIsInstance(self.hp6, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp6.values, self.data))
self.assertEqual(self.hp6.shape[0], 5)
self.assertEqual(self.hp6.shape[1], 10)
self.assertEqual(self.hp6.shape[2], 4)
self.assertEqual(self.hp6.level_count, 5)
self.assertEqual(self.hp6.row_count, 10)
self.assertEqual(self.hp6.column_count, 4)
self.assertEqual(list(self.hp6.levels.keys()), ['000100', '000101', '000102', '000103', '000104'])
self.assertEqual(list(self.hp6.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp6.rows.keys())[0], pd.Timestamp('2016-07-01'))
print('test creating HistoryPanel with very limited data')
print('test creating HistoryPanel with 2D data')
temp_data = np.random.randint(10, size=(7, 3)).astype('float')
temp_hp = qt.HistoryPanel(temp_data)
# Error testing during HistoryPanel creating
# shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data,
levels=self.shares, columns='close', rows=self.index)
# valus is not np.ndarray
self.assertRaises(TypeError,
qt.HistoryPanel,
list(self.data))
# dimension/shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data2,
levels='000100', columns=self.htypes, rows=self.index)
# value dimension over 3
self.assertRaises(AssertionError,
qt.HistoryPanel,
np.random.randint(10, size=(5, 10, 4, 2)))
# lebel value not valid
self.assertRaises(ValueError,
qt.HistoryPanel,
self.data2,
levels=self.shares, columns='close',
rows='a,b,c,d,e,f,g,h,i,j')
def test_history_panel_slicing(self):
"""测试HistoryPanel的各种切片方法
包括通过标签名称切片,通过数字切片,通过逗号分隔的标签名称切片,通过冒号分隔的标签名称切片等切片方式"""
self.assertTrue(np.allclose(self.hp['close'], self.data[:, :, 0:1]))
self.assertTrue(np.allclose(self.hp['close,open'], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp[['close', 'open']], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp['close:high'], self.data[:, :, 0:3]))
self.assertTrue(np.allclose(self.hp['close,high'], self.data[:, :, [0, 2]]))
self.assertTrue(np.allclose(self.hp[:, '000100'], self.data[0:1, :, ]))
self.assertTrue(np.allclose(self.hp[:, '000100,000101'], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, ['000100', '000101']], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, '000100:000102'], self.data[0:3, :]))
self.assertTrue(np.allclose(self.hp[:, '000100,000102'], self.data[[0, 2], :]))
self.assertTrue(np.allclose(self.hp['close,open', '000100,000102'], self.data[[0, 2], :, 0:2]))
print('start testing HistoryPanel')
data = np.random.randint(10, size=(10, 5))
# index = pd.date_range(start='20200101', freq='d', periods=10)
shares = '000100,000101,000102,000103,000104'
dtypes = 'close'
df = pd.DataFrame(data)
print('=========================\nTesting HistoryPanel creation from DataFrame')
hp = qt.dataframe_to_hp(df=df, shares=shares, htypes=dtypes)
hp.info()
hp = qt.dataframe_to_hp(df=df, shares='000100', htypes='close, open, high, low, middle', column_type='htypes')
hp.info()
print('=========================\nTesting HistoryPanel creation from initialization')
data = np.random.randint(10, size=(5, 10, 4)).astype('float')
index = pd.date_range(start='20200101', freq='d', periods=10)
dtypes = 'close, open, high,low'
data[0, [5, 6, 9], [0, 1, 3]] = np.nan
data[1:4, [4, 7, 6, 2], [1, 1, 3, 0]] = np.nan
data[4:5, [2, 9, 1, 2], [0, 3, 2, 1]] = np.nan
hp = qt.HistoryPanel(data, levels=shares, columns=dtypes, rows=index)
hp.info()
print('==========================\n输出close类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close', :, :], data[:, :, 0:1], equal_nan=True))
print(f'==========================\n输出close和open类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1], :, :], data[:, :, 0:2], equal_nan=True))
print(f'==========================\n输出第一只股票的所有类型历史数据\n')
self.assertTrue(np.allclose(hp[:, [0], :], data[0:1, :, :], equal_nan=True))
print('==========================\n输出第0、1、2个htype对应的所有股票全部历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1, 2]], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[['close', 'high']], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出0、1两个htype的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1]], data[:, :, 0:2], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close,high'], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出close起到high止的三个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close:high'], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出0、1、3三个股票的全部历史数据\n')
self.assertTrue(np.allclose(hp[:, [0, 1, 3]], data[[0, 1, 3], :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, ['000100', '000102']], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出0、1、2三个股票的历史数据\n', hp[:, 0: 3])
self.assertTrue(np.allclose(hp[:, 0: 3], data[0:3, :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100, 000102'], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出所有股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, :, 0:8], data[:, 0:8, :], equal_nan=True))
print('==========================\n输出000100股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100', 0:8], data[0, 0:8, :], equal_nan=True))
print('==========================\nstart testing multy axis slicing of HistoryPanel object')
print('==========================\n输出000100、000120两只股票的close、open两组历史数据\n',
hp['close,open', ['000100', '000102']])
print('==========================\n输出000100、000120两只股票的close到open三组历史数据\n',
hp['close,open', '000100, 000102'])
print(f'historyPanel: hp:\n{hp}')
print(f'data is:\n{data}')
hp.htypes = 'open,high,low,close'
hp.info()
hp.shares = ['000300', '600227', '600222', '000123', '000129']
hp.info()
def test_segment(self):
"""测试历史数据片段的获取"""
test_hp = qt.HistoryPanel(self.data,
levels=self.shares,
columns=self.htypes,
rows=self.index2)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
print(f'Test segment with None parameters')
seg1 = test_hp.segment()
seg2 = test_hp.segment('20150202')
seg3 = test_hp.segment(end_date='20201010')
self.assertIsInstance(seg1, qt.HistoryPanel)
self.assertIsInstance(seg2, qt.HistoryPanel)
self.assertIsInstance(seg3, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp.values
))
self.assertTrue(np.allclose(
seg2.values, test_hp.values
))
self.assertTrue(np.allclose(
seg3.values, test_hp.values
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
self.assertEqual(seg2.htypes, test_hp.htypes)
self.assertEqual(seg2.shares, test_hp.shares)
self.assertEqual(seg3.htypes, test_hp.htypes)
self.assertEqual(seg3.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
self.assertEqual(seg2.hdates, test_hp.hdates)
self.assertEqual(seg3.hdates, test_hp.hdates)
print(f'Test segment with proper dates')
seg1 = test_hp.segment()
seg2 = test_hp.segment('20160704')
seg3 = test_hp.segment(start_date='2016-07-05',
end_date='20160708')
self.assertIsInstance(seg1, qt.HistoryPanel)
self.assertIsInstance(seg2, qt.HistoryPanel)
self.assertIsInstance(seg3, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp[:, :, :]
))
self.assertTrue(np.allclose(
seg2.values, test_hp[:, :, 1:10]
))
self.assertTrue(np.allclose(
seg3.values, test_hp[:, :, 2:6]
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
self.assertEqual(seg2.htypes, test_hp.htypes)
self.assertEqual(seg2.shares, test_hp.shares)
self.assertEqual(seg3.htypes, test_hp.htypes)
self.assertEqual(seg3.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
self.assertEqual(seg2.hdates, test_hp.hdates[1:10])
self.assertEqual(seg3.hdates, test_hp.hdates[2:6])
print(f'Test segment with non-existing but in range dates')
seg1 = test_hp.segment()
seg2 = test_hp.segment('20160703')
seg3 = test_hp.segment(start_date='2016-07-03',
end_date='20160710')
self.assertIsInstance(seg1, qt.HistoryPanel)
self.assertIsInstance(seg2, qt.HistoryPanel)
self.assertIsInstance(seg3, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp[:, :, :]
))
self.assertTrue(np.allclose(
seg2.values, test_hp[:, :, 1:10]
))
self.assertTrue(np.allclose(
seg3.values, test_hp[:, :, 1:6]
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
self.assertEqual(seg2.htypes, test_hp.htypes)
self.assertEqual(seg2.shares, test_hp.shares)
self.assertEqual(seg3.htypes, test_hp.htypes)
self.assertEqual(seg3.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
self.assertEqual(seg2.hdates, test_hp.hdates[1:10])
self.assertEqual(seg3.hdates, test_hp.hdates[1:6])
print(f'Test segment with out-of-range dates')
seg1 = test_hp.segment(start_date='2016-05-03',
end_date='20160910')
self.assertIsInstance(seg1, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp[:, :, :]
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
def test_slice(self):
"""测试历史数据切片的获取"""
test_hp = qt.HistoryPanel(self.data,
levels=self.shares,
columns=self.htypes,
rows=self.index2)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
print(f'Test slice with shares')
share = '000101'
slc = test_hp.slice(shares=share)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, ['000101'])
self.assertEqual(slc.htypes, test_hp.htypes)
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp[:, '000101']))
share = '000101, 000103'
slc = test_hp.slice(shares=share)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, ['000101', '000103'])
self.assertEqual(slc.htypes, test_hp.htypes)
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp[:, '000101, 000103']))
print(f'Test slice with htypes')
htype = 'open'
slc = test_hp.slice(htypes=htype)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, test_hp.shares)
self.assertEqual(slc.htypes, ['open'])
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp['open']))
htype = 'open, close'
slc = test_hp.slice(htypes=htype)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, test_hp.shares)
self.assertEqual(slc.htypes, ['open', 'close'])
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp['open, close']))
# test that slicing of "open, close" does NOT equal to "close, open"
self.assertFalse(np.allclose(slc.values, test_hp['close, open']))
print(f'Test slicing with both htypes and shares')
share = '000103, 000101'
htype = 'high, low, close'
slc = test_hp.slice(shares=share, htypes=htype)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, ['000103', '000101'])
self.assertEqual(slc.htypes, ['high', 'low', 'close'])
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp['high, low, close', '000103, 000101']))
print(f'Test Error cases')
# duplicated input
htype = 'open, close, open'
self.assertRaises(AssertionError, test_hp.slice, htypes=htype)
def test_relabel(self):
new_shares_list = ['000001', '000002', '000003', '000004', '000005']
new_shares_str = '000001, 000002, 000003, 000004, 000005'
new_htypes_list = ['close', 'volume', 'value', 'exchange']
new_htypes_str = 'close, volume, value, exchange'
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_list)
print(temp_hp.info())
print(temp_hp.htypes)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.htypes, temp_hp.htypes)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.shares, new_shares_list)
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_str)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.htypes, temp_hp.htypes)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.shares, new_shares_list)
temp_hp = self.hp.copy()
temp_hp.re_label(htypes=new_htypes_list)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.shares, temp_hp.shares)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.htypes, new_htypes_list)
temp_hp = self.hp.copy()
temp_hp.re_label(htypes=new_htypes_str)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.shares, temp_hp.shares)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.htypes, new_htypes_list)
print(f'test errors raising')
temp_hp = self.hp.copy()
self.assertRaises(AssertionError, temp_hp.re_label, htypes=new_shares_str)
self.assertRaises(TypeError, temp_hp.re_label, htypes=123)
self.assertRaises(AssertionError, temp_hp.re_label, htypes='wrong input!')
def test_csv_to_hp(self):
pass
def test_hdf_to_hp(self):
pass
def test_hp_join(self):
# TODO: 这里需要加强,需要用具体的例子确认hp_join的结果正确
# TODO: 尤其是不同的shares、htypes、hdates,以及它们在顺
# TODO: 序不同的情况下是否能正确地组合
print(f'join two simple HistoryPanels with same shares')
temp_hp = self.hp.join(self.hp2, same_shares=True)
self.assertIsInstance(temp_hp, qt.HistoryPanel)
def test_df_to_hp(self):
print(f'test converting DataFrame to HistoryPanel')
data = np.random.randint(10, size=(10, 5))
df1 = pd.DataFrame(data)
df2 = pd.DataFrame(data, columns=str_to_list(self.shares))
df3 = pd.DataFrame(data[:, 0:4])
df4 = pd.DataFrame(data[:, 0:4], columns=str_to_list(self.htypes))
hp = qt.dataframe_to_hp(df1, htypes='close')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, [0, 1, 2, 3, 4])
self.assertEqual(hp.htypes, ['close'])
self.assertEqual(hp.hdates, [pd.Timestamp('1970-01-01 00:00:00'),
pd.Timestamp('1970-01-01 00:00:00.000000001'),
pd.Timestamp('1970-01-01 00:00:00.000000002'),
pd.Timestamp('1970-01-01 00:00:00.000000003'),
pd.Timestamp('1970-01-01 00:00:00.000000004'),
pd.Timestamp('1970-01-01 00:00:00.000000005'),
pd.Timestamp('1970-01-01 00:00:00.000000006'),
pd.Timestamp('1970-01-01 00:00:00.000000007'),
pd.Timestamp('1970-01-01 00:00:00.000000008'),
pd.Timestamp('1970-01-01 00:00:00.000000009')])
hp = qt.dataframe_to_hp(df2, shares=self.shares, htypes='close')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, str_to_list(self.shares))
self.assertEqual(hp.htypes, ['close'])
hp = qt.dataframe_to_hp(df3, shares='000100', column_type='htypes')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, ['000100'])
self.assertEqual(hp.htypes, [0, 1, 2, 3])
hp = qt.dataframe_to_hp(df4, shares='000100', htypes=self.htypes, column_type='htypes')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, ['000100'])
self.assertEqual(hp.htypes, str_to_list(self.htypes))
hp.info()
self.assertRaises(KeyError, qt.dataframe_to_hp, df1)
def test_to_dataframe(self):
""" 测试HistoryPanel对象的to_dataframe方法
"""
print(f'START TEST == test_to_dataframe')
print(f'test converting test hp to dataframe with share == "000102":')
df_test = self.hp.to_dataframe(share='000102')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.htypes), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp[:, '000102'], values))
print(f'test DataFrame conversion with share == "000100"')
df_test = self.hp.to_dataframe(share='000100')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.htypes), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp[:, '000100'], values))
print(f'test DataFrame conversion error: type incorrect')
self.assertRaises(AssertionError, self.hp.to_dataframe, share=3.0)
print(f'test DataFrame error raising with share not found error')
self.assertRaises(KeyError, self.hp.to_dataframe, share='000300')
print(f'test DataFrame conversion with htype == "close"')
df_test = self.hp.to_dataframe(htype='close')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp['close'].T, values))
print(f'test DataFrame conversion with htype == "high"')
df_test = self.hp.to_dataframe(htype='high')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp['high'].T, values))
print(f'test DataFrame conversion with htype == "high" and dropna')
v = self.hp.values.astype('float')
v[:, 3, :] = np.nan
v[:, 4, :] = np.inf
test_hp = qt.HistoryPanel(v, levels=self.shares, columns=self.htypes, rows=self.index)
df_test = test_hp.to_dataframe(htype='high', dropna=True)
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates[:3]) + list(self.hp.hdates[4:]), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
target_values = test_hp['high'].T
target_values = target_values[np.where(~np.isnan(target_values))].reshape(9, 5)
self.assertTrue(np.allclose(target_values, values))
print(f'test DataFrame conversion with htype == "high", dropna and treat infs as na')
v = self.hp.values.astype('float')
v[:, 3, :] = np.nan
v[:, 4, :] = np.inf
test_hp = qt.HistoryPanel(v, levels=self.shares, columns=self.htypes, rows=self.index)
df_test = test_hp.to_dataframe(htype='high', dropna=True, inf_as_na=True)
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates[:3]) + list(self.hp.hdates[5:]), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
target_values = test_hp['high'].T
target_values = target_values[np.where(~np.isnan(target_values) & ~np.isinf(target_values))].reshape(8, 5)
self.assertTrue(np.allclose(target_values, values))
print(f'test DataFrame conversion error: type incorrect')
self.assertRaises(AssertionError, self.hp.to_dataframe, htype=pd.DataFrame())
print(f'test DataFrame error raising with share not found error')
self.assertRaises(KeyError, self.hp.to_dataframe, htype='non_type')
print(f'Raises ValueError when both or none parameter is given')
self.assertRaises(KeyError, self.hp.to_dataframe)
self.assertRaises(KeyError, self.hp.to_dataframe, share='000100', htype='close')
def test_to_df_dict(self):
"""测试HistoryPanel公有方法to_df_dict"""
print('test convert history panel slice by share')
df_dict = self.hp.to_df_dict('share')
self.assertEqual(self.hp.shares, list(df_dict.keys()))
df_dict = self.hp.to_df_dict()
self.assertEqual(self.hp.shares, list(df_dict.keys()))
print('test convert historypanel slice by htype ')
df_dict = self.hp.to_df_dict('htype')
self.assertEqual(self.hp.htypes, list(df_dict.keys()))
print('test raise assertion error')
self.assertRaises(AssertionError, self.hp.to_df_dict, by='random text')
self.assertRaises(AssertionError, self.hp.to_df_dict, by=3)
print('test empty hp')
df_dict = qt.HistoryPanel().to_df_dict('share')
self.assertEqual(df_dict, {})
def test_stack_dataframes(self):
print('test stack dataframes in a list')
df1 = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [2, 3, 4, 5], 'c': [3, 4, 5, 6]})
df1.index = ['20200101', '20200102', '20200103', '20200104']
df2 = pd.DataFrame({'b': [4, 3, 2, 1], 'd': [1, 1, 1, 1], 'c': [6, 5, 4, 3]})
df2.index = ['20200101', '20200102', '20200104', '20200105']
df3 = pd.DataFrame({'a': [6, 6, 6, 6], 'd': [4, 4, 4, 4], 'b': [2, 4, 6, 8]})
df3.index = ['20200101', '20200102', '20200103', '20200106']
values1 = np.array([[[1., 2., 3., np.nan],
[2., 3., 4., np.nan],
[3., 4., 5., np.nan],
[4., 5., 6., np.nan],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan]],
[[np.nan, 4., 6., 1.],
[np.nan, 3., 5., 1.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 2., 4., 1.],
[np.nan, 1., 3., 1.],
[np.nan, np.nan, np.nan, np.nan]],
[[6., 2., np.nan, 4.],
[6., 4., np.nan, 4.],
[6., 6., np.nan, 4.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[6., 8., np.nan, 4.]]])
values2 = np.array([[[1., np.nan, 6.],
[2., np.nan, 6.],
[3., np.nan, 6.],
[4., np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 6.]],
[[2., 4., 2.],
[3., 3., 4.],
[4., np.nan, 6.],
[5., 2., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 8.]],
[[3., 6., np.nan],
[4., 5., np.nan],
[5., np.nan, np.nan],
[6., 4., np.nan],
[np.nan, 3., np.nan],
[np.nan, np.nan, np.nan]],
[[np.nan, 1., 4.],
[np.nan, 1., 4.],
[np.nan, np.nan, 4.],
[np.nan, 1., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 4.]]])
print(df1.rename(index=pd.to_datetime))
print(df2.rename(index=pd.to_datetime))
print(df3.rename(index=pd.to_datetime))
hp1 = stack_dataframes([df1, df2, df3], stack_along='shares',
shares=['000100', '000200', '000300'])
hp2 = stack_dataframes([df1, df2, df3], stack_along='shares',
shares='000100, 000300, 000200')
print('hp1 is:\n', hp1)
print('hp2 is:\n', hp2)
self.assertEqual(hp1.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp1.shares, ['000100', '000200', '000300'])
self.assertTrue(np.allclose(hp1.values, values1, equal_nan=True))
self.assertEqual(hp2.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp2.shares, ['000100', '000300', '000200'])
self.assertTrue(np.allclose(hp2.values, values1, equal_nan=True))
hp3 = stack_dataframes([df1, df2, df3], stack_along='htypes',
htypes=['close', 'high', 'low'])
hp4 = stack_dataframes([df1, df2, df3], stack_along='htypes',
htypes='open, close, high')
print('hp3 is:\n', hp3.values)
print('hp4 is:\n', hp4.values)
self.assertEqual(hp3.htypes, ['close', 'high', 'low'])
self.assertEqual(hp3.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp3.values, values2, equal_nan=True))
self.assertEqual(hp4.htypes, ['open', 'close', 'high'])
self.assertEqual(hp4.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp4.values, values2, equal_nan=True))
print('test stack dataframes in a dict')
df1 = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [2, 3, 4, 5], 'c': [3, 4, 5, 6]})
df1.index = ['20200101', '20200102', '20200103', '20200104']
df2 = pd.DataFrame({'b': [4, 3, 2, 1], 'd': [1, 1, 1, 1], 'c': [6, 5, 4, 3]})
df2.index = ['20200101', '20200102', '20200104', '20200105']
df3 = pd.DataFrame({'a': [6, 6, 6, 6], 'd': [4, 4, 4, 4], 'b': [2, 4, 6, 8]})
df3.index = ['20200101', '20200102', '20200103', '20200106']
values1 = np.array([[[1., 2., 3., np.nan],
[2., 3., 4., np.nan],
[3., 4., 5., np.nan],
[4., 5., 6., np.nan],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan]],
[[np.nan, 4., 6., 1.],
[np.nan, 3., 5., 1.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 2., 4., 1.],
[np.nan, 1., 3., 1.],
[np.nan, np.nan, np.nan, np.nan]],
[[6., 2., np.nan, 4.],
[6., 4., np.nan, 4.],
[6., 6., np.nan, 4.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[6., 8., np.nan, 4.]]])
values2 = np.array([[[1., np.nan, 6.],
[2., np.nan, 6.],
[3., np.nan, 6.],
[4., np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 6.]],
[[2., 4., 2.],
[3., 3., 4.],
[4., np.nan, 6.],
[5., 2., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 8.]],
[[3., 6., np.nan],
[4., 5., np.nan],
[5., np.nan, np.nan],
[6., 4., np.nan],
[np.nan, 3., np.nan],
[np.nan, np.nan, np.nan]],
[[np.nan, 1., 4.],
[np.nan, 1., 4.],
[np.nan, np.nan, 4.],
[np.nan, 1., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 4.]]])
print(df1.rename(index=pd.to_datetime))
print(df2.rename(index=pd.to_datetime))
print(df3.rename(index=pd.to_datetime))
hp1 = stack_dataframes(dfs={'000001.SZ': df1, '000002.SZ': df2, '000003.SZ': df3},
stack_along='shares')
hp2 = stack_dataframes(dfs={'000001.SZ': df1, '000002.SZ': df2, '000003.SZ': df3},
stack_along='shares',
shares='000100, 000300, 000200')
print('hp1 is:\n', hp1)
print('hp2 is:\n', hp2)
self.assertEqual(hp1.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp1.shares, ['000001.SZ', '000002.SZ', '000003.SZ'])
self.assertTrue(np.allclose(hp1.values, values1, equal_nan=True))
self.assertEqual(hp2.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp2.shares, ['000100', '000300', '000200'])
self.assertTrue(np.allclose(hp2.values, values1, equal_nan=True))
hp3 = stack_dataframes(dfs={'close': df1, 'high': df2, 'low': df3},
stack_along='htypes')
hp4 = stack_dataframes(dfs={'close': df1, 'low': df2, 'high': df3},
stack_along='htypes',
htypes='open, close, high')
print('hp3 is:\n', hp3.values)
print('hp4 is:\n', hp4.values)
self.assertEqual(hp3.htypes, ['close', 'high', 'low'])
self.assertEqual(hp3.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp3.values, values2, equal_nan=True))
self.assertEqual(hp4.htypes, ['open', 'close', 'high'])
self.assertEqual(hp4.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp4.values, values2, equal_nan=True))
def test_to_csv(self):
pass
def test_to_hdf(self):
pass
def test_fill_na(self):
"""测试填充无效值"""
print(self.hp)
new_values = self.hp.values.astype(float)
new_values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]] = np.nan
print(new_values)
temp_hp = qt.HistoryPanel(values=new_values, levels=self.hp.levels, rows=self.hp.rows, columns=self.hp.columns)
self.assertTrue(np.allclose(temp_hp.values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]], np.nan, equal_nan=True))
temp_hp.fillna(2.3)
filled_values = new_values.copy()
filled_values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]] = 2.3
self.assertTrue(np.allclose(temp_hp.values,
filled_values, equal_nan=True))
def test_fill_inf(self):
"""测试填充无限值"""
def test_get_history_panel(self):
# TODO: implement this test case
# test get only one line of data
pass
def test_get_price_type_raw_data(self):
shares = '000039.SZ, 600748.SH, 000040.SZ'
start = '20200101'
end = '20200131'
htypes = 'open, high, low, close'
target_price_000039 = [[9.45, 9.49, 9.12, 9.17],
[9.46, 9.56, 9.4, 9.5],
[9.7, 9.76, 9.5, 9.51],
[9.7, 9.75, 9.7, 9.72],
[9.73, 9.77, 9.7, 9.73],
[9.83, 9.85, 9.71, 9.72],
[9.85, 9.85, 9.75, 9.79],
[9.96, 9.96, 9.83, 9.86],
[9.87, 9.94, 9.77, 9.93],
[9.82, 9.9, 9.76, 9.87],
[9.8, 9.85, 9.77, 9.82],
[9.84, 9.86, 9.71, 9.72],
[9.83, 9.93, 9.81, 9.86],
[9.7, 9.87, 9.7, 9.82],
[9.83, 9.86, 9.69, 9.79],
[9.8, 9.94, 9.8, 9.86]]
target_price_600748 = [[5.68, 5.68, 5.32, 5.37],
[5.62, 5.68, 5.46, 5.65],
[5.72, 5.72, 5.61, 5.62],
[5.76, 5.77, 5.6, 5.73],
[5.78, 5.84, 5.73, 5.75],
[5.89, 5.91, 5.76, 5.77],
[6.03, 6.04, 5.87, 5.89],
[5.94, 6.07, 5.94, 6.02],
[5.96, 5.98, 5.88, 5.97],
[6.04, 6.06, 5.95, 5.96],
[5.98, 6.04, 5.96, 6.03],
[6.1, 6.11, 5.89, 5.94],
[6.02, 6.12, 6., 6.1],
[5.96, 6.05, 5.88, 6.01],
[6.03, 6.03, 5.95, 5.99],
[6.02, 6.12, 5.99, 5.99]]
target_price_000040 = [[3.63, 3.83, 3.63, 3.65],
[3.99, 4.07, 3.97, 4.03],
[4.1, 4.11, 3.93, 3.95],
[4.12, 4.13, 4.06, 4.11],
[4.13, 4.19, 4.07, 4.13],
[4.27, 4.28, 4.11, 4.12],
[4.37, 4.38, 4.25, 4.29],
[4.34, 4.5, 4.32, 4.41],
[4.28, 4.35, 4.2, 4.34],
[4.41, 4.43, 4.29, 4.31],
[4.42, 4.45, 4.36, 4.41],
[4.51, 4.56, 4.33, 4.35],
[4.35, 4.55, 4.31, 4.55],
[4.3, 4.41, 4.22, 4.36],
[4.27, 4.44, 4.23, 4.34],
[4.23, 4.27, 4.18, 4.25]]
print(f'test get price type raw data with single thread')
df_list = get_price_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, freq='d')
self.assertIsInstance(df_list, dict)
self.assertEqual(len(df_list), 3)
self.assertTrue(np.allclose(df_list['000039.SZ'].values, np.array(target_price_000039)))
self.assertTrue(np.allclose(df_list['600748.SH'].values, np.array(target_price_600748)))
self.assertTrue(np.allclose(df_list['000040.SZ'].values, np.array(target_price_000040)))
print(f'in get financial report type raw data, got DataFrames: \n"000039.SZ":\n'
f'{df_list["000039.SZ"]}\n"600748.SH":\n'
f'{df_list["600748.SH"]}\n"000040.SZ":\n{df_list["000040.SZ"]}')
print(f'test get price type raw data with with multi threads')
df_list = get_price_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, freq='d', parallel=10)
self.assertIsInstance(df_list, dict)
self.assertEqual(len(df_list), 3)
self.assertTrue(np.allclose(df_list['000039.SZ'].values, np.array(target_price_000039)))
self.assertTrue(np.allclose(df_list['600748.SH'].values, np.array(target_price_600748)))
self.assertTrue(np.allclose(df_list['000040.SZ'].values, np.array(target_price_000040)))
print(f'in get financial report type raw data, got DataFrames: \n"000039.SZ":\n'
f'{df_list["000039.SZ"]}\n"600748.SH":\n'
f'{df_list["600748.SH"]}\n"000040.SZ":\n{df_list["000040.SZ"]}')
def test_get_financial_report_type_raw_data(self):
shares = '000039.SZ, 600748.SH, 000040.SZ'
start = '20160101'
end = '20201231'
htypes = 'eps,basic_eps,diluted_eps,total_revenue,revenue,total_share,' \
'cap_rese,undistr_porfit,surplus_rese,net_profit'
target_eps_000039 = [[1.41],
[0.1398],
[-0.0841],
[-0.1929],
[0.37],
[0.1357],
[0.1618],
[0.1191],
[1.11],
[0.759],
[0.3061],
[0.1409],
[0.81],
[0.4187],
[0.2554],
[0.1624],
[0.14],
[-0.0898],
[-0.1444],
[0.1291]]
target_eps_600748 = [[0.41],
[0.22],
[0.22],
[0.09],
[0.42],
[0.23],
[0.22],
[0.09],
[0.36],
[0.16],
[0.15],
[0.07],
[0.47],
[0.19],
[0.12],
[0.07],
[0.32],
[0.22],
[0.14],
[0.07]]
target_eps_000040 = [[-0.6866],
[-0.134],
[-0.189],
[-0.036],
[-0.6435],
[0.05],
[0.062],
[0.0125],
[0.8282],
[1.05],
[0.985],
[0.811],
[0.41],
[0.242],
[0.113],
[0.027],
[0.19],
[0.17],
[0.17],
[0.064]]
target_basic_eps_000039 = [[1.3980000e-01, 1.3980000e-01, 6.3591954e+10, 6.3591954e+10],
[-8.4100000e-02, -8.4100000e-02, 3.9431807e+10, 3.9431807e+10],
[-1.9290000e-01, -1.9290000e-01, 1.5852177e+10, 1.5852177e+10],
[3.7000000e-01, 3.7000000e-01, 8.5815341e+10, 8.5815341e+10],
[1.3570000e-01, 1.3430000e-01, 6.1660271e+10, 6.1660271e+10],
[1.6180000e-01, 1.6040000e-01, 4.2717729e+10, 4.2717729e+10],
[1.1910000e-01, 1.1900000e-01, 1.9099547e+10, 1.9099547e+10],
[1.1100000e+00, 1.1000000e+00, 9.3497622e+10, 9.3497622e+10],
[7.5900000e-01, 7.5610000e-01, 6.6906147e+10, 6.6906147e+10],
[3.0610000e-01, 3.0380000e-01, 4.3560398e+10, 4.3560398e+10],
[1.4090000e-01, 1.4050000e-01, 1.9253639e+10, 1.9253639e+10],
[8.1000000e-01, 8.1000000e-01, 7.6299930e+10, 7.6299930e+10],
[4.1870000e-01, 4.1710000e-01, 5.3962706e+10, 5.3962706e+10],
[2.5540000e-01, 2.5440000e-01, 3.3387152e+10, 3.3387152e+10],
[1.6240000e-01, 1.6200000e-01, 1.4675987e+10, 1.4675987e+10],
[1.4000000e-01, 1.4000000e-01, 5.1111652e+10, 5.1111652e+10],
[-8.9800000e-02, -8.9800000e-02, 3.4982614e+10, 3.4982614e+10],
[-1.4440000e-01, -1.4440000e-01, 2.3542843e+10, 2.3542843e+10],
[1.2910000e-01, 1.2860000e-01, 1.0412416e+10, 1.0412416e+10],
[7.2000000e-01, 7.1000000e-01, 5.8685804e+10, 5.8685804e+10]]
target_basic_eps_600748 = [[2.20000000e-01, 2.20000000e-01, 5.29423397e+09, 5.29423397e+09],
[2.20000000e-01, 2.20000000e-01, 4.49275653e+09, 4.49275653e+09],
[9.00000000e-02, 9.00000000e-02, 1.59067065e+09, 1.59067065e+09],
[4.20000000e-01, 4.20000000e-01, 8.86555586e+09, 8.86555586e+09],
[2.30000000e-01, 2.30000000e-01, 5.44850143e+09, 5.44850143e+09],
[2.20000000e-01, 2.20000000e-01, 4.34978927e+09, 4.34978927e+09],
[9.00000000e-02, 9.00000000e-02, 1.73793793e+09, 1.73793793e+09],
[3.60000000e-01, 3.60000000e-01, 8.66375241e+09, 8.66375241e+09],
[1.60000000e-01, 1.60000000e-01, 4.72875116e+09, 4.72875116e+09],
[1.50000000e-01, 1.50000000e-01, 3.76879016e+09, 3.76879016e+09],
[7.00000000e-02, 7.00000000e-02, 1.31785454e+09, 1.31785454e+09],
[4.70000000e-01, 4.70000000e-01, 7.23391685e+09, 7.23391685e+09],
[1.90000000e-01, 1.90000000e-01, 3.76072215e+09, 3.76072215e+09],
[1.20000000e-01, 1.20000000e-01, 2.35845364e+09, 2.35845364e+09],
[7.00000000e-02, 7.00000000e-02, 1.03831865e+09, 1.03831865e+09],
[3.20000000e-01, 3.20000000e-01, 6.48880919e+09, 6.48880919e+09],
[2.20000000e-01, 2.20000000e-01, 3.72209142e+09, 3.72209142e+09],
[1.40000000e-01, 1.40000000e-01, 2.22563924e+09, 2.22563924e+09],
[7.00000000e-02, 7.00000000e-02, 8.96647052e+08, 8.96647052e+08],
[4.80000000e-01, 4.80000000e-01, 6.61917508e+09, 6.61917508e+09]]
target_basic_eps_000040 = [[-1.34000000e-01, -1.34000000e-01, 2.50438755e+09, 2.50438755e+09],
[-1.89000000e-01, -1.89000000e-01, 1.32692347e+09, 1.32692347e+09],
[-3.60000000e-02, -3.60000000e-02, 5.59073338e+08, 5.59073338e+08],
[-6.43700000e-01, -6.43700000e-01, 6.80576162e+09, 6.80576162e+09],
[5.00000000e-02, 5.00000000e-02, 6.38891620e+09, 6.38891620e+09],
[6.20000000e-02, 6.20000000e-02, 5.23267082e+09, 5.23267082e+09],
[1.25000000e-02, 1.25000000e-02, 2.22420874e+09, 2.22420874e+09],
[8.30000000e-01, 8.30000000e-01, 8.67628947e+09, 8.67628947e+09],
[1.05000000e+00, 1.05000000e+00, 5.29431716e+09, 5.29431716e+09],
[9.85000000e-01, 9.85000000e-01, 3.56822382e+09, 3.56822382e+09],
[8.11000000e-01, 8.11000000e-01, 1.06613439e+09, 1.06613439e+09],
[4.10000000e-01, 4.10000000e-01, 8.13102532e+09, 8.13102532e+09],
[2.42000000e-01, 2.42000000e-01, 5.17971521e+09, 5.17971521e+09],
[1.13000000e-01, 1.13000000e-01, 3.21704120e+09, 3.21704120e+09],
[2.70000000e-02, 2.70000000e-02, 8.41966738e+08, 8.24272235e+08],
[1.90000000e-01, 1.90000000e-01, 3.77350171e+09, 3.77350171e+09],
[1.70000000e-01, 1.70000000e-01, 2.38643892e+09, 2.38643892e+09],
[1.70000000e-01, 1.70000000e-01, 1.29127117e+09, 1.29127117e+09],
[6.40000000e-02, 6.40000000e-02, 6.03256858e+08, 6.03256858e+08],
[1.30000000e-01, 1.30000000e-01, 1.66572918e+09, 1.66572918e+09]]
target_total_share_000039 = [[3.5950140e+09, 4.8005360e+09, 2.1573660e+10, 3.5823430e+09],
[3.5860750e+09, 4.8402300e+09, 2.0750827e+10, 3.5823430e+09],
[3.5860750e+09, 4.9053550e+09, 2.0791307e+10, 3.5823430e+09],
[3.5845040e+09, 4.8813110e+09, 2.1482857e+10, 3.5823430e+09],
[3.5831490e+09, 4.9764250e+09, 2.0926816e+10, 3.2825850e+09],
[3.5825310e+09, 4.8501270e+09, 2.1020418e+10, 3.2825850e+09],
[2.9851110e+09, 5.4241420e+09, 2.2438350e+10, 3.2825850e+09],
[2.9849890e+09, 4.1284000e+09, 2.2082769e+10, 3.2825850e+09],
[2.9849610e+09, 4.0838010e+09, 2.1045994e+10, 3.2815350e+09],
[2.9849560e+09, 4.2491510e+09, 1.9694345e+10, 3.2815350e+09],
[2.9846970e+09, 4.2351600e+09, 2.0016361e+10, 3.2815350e+09],
[2.9828890e+09, 4.2096630e+09, 1.9734494e+10, 3.2815350e+09],
[2.9813960e+09, 3.4564240e+09, 1.8562738e+10, 3.2793790e+09],
[2.9803530e+09, 3.0759650e+09, 1.8076208e+10, 3.2793790e+09],
[2.9792680e+09, 3.1376690e+09, 1.7994776e+10, 3.2793790e+09],
[2.9785770e+09, 3.1265850e+09, 1.7495053e+10, 3.2793790e+09],
[2.9783640e+09, 3.1343850e+09, 1.6740840e+10, 3.2035780e+09],
[2.9783590e+09, 3.1273880e+09, 1.6578389e+10, 3.2035780e+09],
[2.9782780e+09, 3.1169280e+09, 1.8047639e+10, 3.2035780e+09],
[2.9778200e+09, 3.1818630e+09, 1.7663145e+10, 3.2035780e+09]]
target_total_share_600748 = [[1.84456289e+09, 2.60058426e+09, 5.72443733e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.72096899e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.65738237e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.50257806e+09, 4.58026529e+08],
[1.84456289e+09, 2.59868164e+09, 5.16741523e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 5.14677280e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 4.94955591e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 4.79001451e+09, 4.44998882e+08],
[1.84456289e+09, 3.11401684e+09, 4.46326988e+09, 4.01064256e+08],
[1.84456289e+09, 3.11596723e+09, 4.45419136e+09, 4.01064256e+08],
[1.84456289e+09, 3.11596723e+09, 4.39652948e+09, 4.01064256e+08],
[1.84456289e+09, 3.18007783e+09, 4.26608403e+09, 4.01064256e+08],
[1.84456289e+09, 3.10935622e+09, 3.78417688e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.65806574e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.62063090e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.50063915e+09, 3.65651701e+08],
[1.41889453e+09, 3.55940850e+09, 3.22272993e+09, 3.62124939e+08],
[1.41889453e+09, 3.56129650e+09, 3.11477476e+09, 3.62124939e+08],
[1.41889453e+09, 3.59632888e+09, 3.06836903e+09, 3.62124939e+08],
[1.08337087e+09, 3.37400726e+07, 3.00918704e+09, 3.62124939e+08]]
target_total_share_000040 = [[1.48687387e+09, 1.06757900e+10, 8.31900755e+08, 2.16091994e+08],
[1.48687387e+09, 1.06757900e+10, 7.50177302e+08, 2.16091994e+08],
[1.48687387e+09, 1.06757899e+10, 9.90255974e+08, 2.16123282e+08],
[1.48687387e+09, 1.06757899e+10, 1.03109866e+09, 2.16091994e+08],
[1.48687387e+09, 1.06757910e+10, 2.07704745e+09, 2.16123282e+08],
[1.48687387e+09, 1.06757910e+10, 2.09608665e+09, 2.16123282e+08],
[1.48687387e+09, 1.06803833e+10, 2.13354083e+09, 2.16123282e+08],
[1.48687387e+09, 1.06804090e+10, 2.11489364e+09, 2.16123282e+08],
[1.33717327e+09, 8.87361727e+09, 2.42939924e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 2.34220254e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 2.16390368e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 1.07961915e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 8.58866066e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 6.87024393e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 5.71554565e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 5.54241222e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361726e+09, 5.10059576e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361726e+09, 4.59351639e+08, 1.88489589e+08],
[4.69593364e+08, 2.78355875e+08, 4.13430814e+08, 1.88489589e+08],
[4.69593364e+08, 2.74235459e+08, 3.83557678e+08, 1.88489589e+08]]
target_net_profit_000039 = [[np.nan],
[2.422180e+08],
[np.nan],
[2.510113e+09],
[np.nan],
[1.102220e+09],
[np.nan],
[4.068455e+09],
[np.nan],
[1.315957e+09],
[np.nan],
[3.158415e+09],
[np.nan],
[1.066509e+09],
[np.nan],
[7.349830e+08],
[np.nan],
[-5.411600e+08],
[np.nan],
[2.271961e+09]]
target_net_profit_600748 = [[np.nan],
[4.54341757e+08],
[np.nan],
[9.14476670e+08],
[np.nan],
[5.25360283e+08],
[np.nan],
[9.24502415e+08],
[np.nan],
[4.66560302e+08],
[np.nan],
[9.15265285e+08],
[np.nan],
[2.14639674e+08],
[np.nan],
[7.45093049e+08],
[np.nan],
[2.10967312e+08],
[np.nan],
[6.04572711e+08]]
target_net_profit_000040 = [[np.nan],
[-2.82458846e+08],
[np.nan],
[-9.57130872e+08],
[np.nan],
[9.22114527e+07],
[np.nan],
[1.12643819e+09],
[np.nan],
[1.31715269e+09],
[np.nan],
[5.39940093e+08],
[np.nan],
[1.51440838e+08],
[np.nan],
[1.75339071e+08],
[np.nan],
[8.04740415e+07],
[np.nan],
[6.20445815e+07]]
print('test get financial data, in multi thread mode')
df_list = get_financial_report_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, parallel=4)
self.assertIsInstance(df_list, tuple)
self.assertEqual(len(df_list), 4)
self.assertEqual(len(df_list[0]), 3)
self.assertEqual(len(df_list[1]), 3)
self.assertEqual(len(df_list[2]), 3)
self.assertEqual(len(df_list[3]), 3)
# 检查确认所有数据类型正确
self.assertTrue(all(isinstance(item, pd.DataFrame) for subdict in df_list for item in subdict.values()))
# 检查是否有空数据
print(all(item.empty for subdict in df_list for item in subdict.values()))
# 检查获取的每组数据正确,且所有数据的顺序一致, 如果取到空数据,则忽略
if df_list[0]['000039.SZ'].empty:
print(f'income data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000039.SZ'].values, target_basic_eps_000039))
if df_list[0]['600748.SH'].empty:
print(f'income data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['600748.SH'].values, target_basic_eps_600748))
if df_list[0]['000040.SZ'].empty:
print(f'income data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000040.SZ'].values, target_basic_eps_000040))
if df_list[1]['000039.SZ'].empty:
print(f'indicator data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000039.SZ'].values, target_eps_000039))
if df_list[1]['600748.SH'].empty:
print(f'indicator data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['600748.SH'].values, target_eps_600748))
if df_list[1]['000040.SZ'].empty:
print(f'indicator data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000040.SZ'].values, target_eps_000040))
if df_list[2]['000039.SZ'].empty:
print(f'balance data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000039.SZ'].values, target_total_share_000039))
if df_list[2]['600748.SH'].empty:
print(f'balance data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['600748.SH'].values, target_total_share_600748))
if df_list[2]['000040.SZ'].empty:
print(f'balance data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000040.SZ'].values, target_total_share_000040))
if df_list[3]['000039.SZ'].empty:
print(f'cash flow data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000039.SZ'].values, target_net_profit_000039, equal_nan=True))
if df_list[3]['600748.SH'].empty:
print(f'cash flow data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['600748.SH'].values, target_net_profit_600748, equal_nan=True))
if df_list[3]['000040.SZ'].empty:
print(f'cash flow data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000040.SZ'].values, target_net_profit_000040, equal_nan=True))
print('test get financial data, in single thread mode')
df_list = get_financial_report_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, parallel=0)
self.assertIsInstance(df_list, tuple)
self.assertEqual(len(df_list), 4)
self.assertEqual(len(df_list[0]), 3)
self.assertEqual(len(df_list[1]), 3)
self.assertEqual(len(df_list[2]), 3)
self.assertEqual(len(df_list[3]), 3)
# 检查确认所有数据类型正确
self.assertTrue(all(isinstance(item, pd.DataFrame) for subdict in df_list for item in subdict.values()))
# 检查是否有空数据,因为网络问题,有可能会取到空数据
self.assertFalse(all(item.empty for subdict in df_list for item in subdict.values()))
# 检查获取的每组数据正确,且所有数据的顺序一致, 如果取到空数据,则忽略
if df_list[0]['000039.SZ'].empty:
print(f'income data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000039.SZ'].values, target_basic_eps_000039))
if df_list[0]['600748.SH'].empty:
print(f'income data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['600748.SH'].values, target_basic_eps_600748))
if df_list[0]['000040.SZ'].empty:
print(f'income data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000040.SZ'].values, target_basic_eps_000040))
if df_list[1]['000039.SZ'].empty:
print(f'indicator data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000039.SZ'].values, target_eps_000039))
if df_list[1]['600748.SH'].empty:
print(f'indicator data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['600748.SH'].values, target_eps_600748))
if df_list[1]['000040.SZ'].empty:
print(f'indicator data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000040.SZ'].values, target_eps_000040))
if df_list[2]['000039.SZ'].empty:
print(f'balance data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000039.SZ'].values, target_total_share_000039))
if df_list[2]['600748.SH'].empty:
print(f'balance data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['600748.SH'].values, target_total_share_600748))
if df_list[2]['000040.SZ'].empty:
print(f'balance data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000040.SZ'].values, target_total_share_000040))
if df_list[3]['000039.SZ'].empty:
print(f'cash flow data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000039.SZ'].values, target_net_profit_000039, equal_nan=True))
if df_list[3]['600748.SH'].empty:
print(f'cash flow data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['600748.SH'].values, target_net_profit_600748, equal_nan=True))
if df_list[3]['000040.SZ'].empty:
print(f'cash flow data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000040.SZ'].values, target_net_profit_000040, equal_nan=True))
def test_get_composite_type_raw_data(self):
pass
class TestUtilityFuncs(unittest.TestCase):
def setUp(self):
pass
def test_time_string_format(self):
print('Testing qt.time_string_format() function:')
t = 3.14
self.assertEqual(time_str_format(t), '3s 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '3s ')
self.assertEqual(time_str_format(t, short_form=True), '3"140')
self.assertEqual(time_str_format(t, estimation=True, short_form=True), '3"')
t = 300.14
self.assertEqual(time_str_format(t), '5min 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '5min ')
self.assertEqual(time_str_format(t, short_form=True), "5'140")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "5'")
t = 7435.0014
self.assertEqual(time_str_format(t), '2hrs 3min 55s 1.4ms')
self.assertEqual(time_str_format(t, estimation=True), '2hrs ')
self.assertEqual(time_str_format(t, short_form=True), "2H3'55\"001")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "2H")
t = 88425.0509
self.assertEqual(time_str_format(t), '1days 33min 45s 50.9ms')
self.assertEqual(time_str_format(t, estimation=True), '1days ')
self.assertEqual(time_str_format(t, short_form=True), "1D33'45\"051")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "1D")
def test_str_to_list(self):
self.assertEqual(str_to_list('a,b,c,d,e'), ['a', 'b', 'c', 'd', 'e'])
self.assertEqual(str_to_list('a, b, c '), ['a', 'b', 'c'])
self.assertEqual(str_to_list('a, b: c', sep_char=':'), ['a,b', 'c'])
self.assertEqual(str_to_list('abc'), ['abc'])
self.assertEqual(str_to_list(''), [])
self.assertRaises(AssertionError, str_to_list, 123)
def test_list_or_slice(self):
str_dict = {'close': 0, 'open': 1, 'high': 2, 'low': 3}
self.assertEqual(list_or_slice(slice(1, 2, 1), str_dict), slice(1, 2, 1))
self.assertEqual(list_or_slice('open', str_dict), [1])
self.assertEqual(list(list_or_slice('close, high, low', str_dict)), [0, 2, 3])
self.assertEqual(list(list_or_slice('close:high', str_dict)), [0, 1, 2])
self.assertEqual(list(list_or_slice(['open'], str_dict)), [1])
self.assertEqual(list(list_or_slice(['open', 'high'], str_dict)), [1, 2])
self.assertEqual(list(list_or_slice(0, str_dict)), [0])
self.assertEqual(list(list_or_slice([0, 2], str_dict)), [0, 2])
self.assertEqual(list(list_or_slice([True, False, True, False], str_dict)), [0, 2])
def test_labels_to_dict(self):
target_list = [0, 1, 10, 100]
target_dict = {'close': 0, 'open': 1, 'high': 2, 'low': 3}
target_dict2 = {'close': 0, 'open': 2, 'high': 1, 'low': 3}
self.assertEqual(labels_to_dict('close, open, high, low', target_list), target_dict)
self.assertEqual(labels_to_dict(['close', 'open', 'high', 'low'], target_list), target_dict)
self.assertEqual(labels_to_dict('close, high, open, low', target_list), target_dict2)
self.assertEqual(labels_to_dict(['close', 'high', 'open', 'low'], target_list), target_dict2)
def test_input_to_list(self):
""" test util function input_to_list()"""
self.assertEqual(input_to_list(5, 3), [5, 5, 5])
self.assertEqual(input_to_list(5, 3, 0), [5, 5, 5])
self.assertEqual(input_to_list([5], 3, 0), [5, 0, 0])
self.assertEqual(input_to_list([5, 4], 3, 0), [5, 4, 0])
def test_regulate_date_format(self):
self.assertEqual(regulate_date_format('2019/11/06'), '20191106')
self.assertEqual(regulate_date_format('2019-11-06'), '20191106')
self.assertEqual(regulate_date_format('20191106'), '20191106')
self.assertEqual(regulate_date_format('191106'), '20061119')
self.assertEqual(regulate_date_format('830522'), '19830522')
self.assertEqual(regulate_date_format(datetime.datetime(2010, 3, 15)), '20100315')
self.assertEqual(regulate_date_format(pd.Timestamp('2010.03.15')), '20100315')
self.assertRaises(ValueError, regulate_date_format, 'abc')
self.assertRaises(ValueError, regulate_date_format, '2019/13/43')
def test_list_to_str_format(self):
self.assertEqual(list_to_str_format(['close', 'open', 'high', 'low']),
'close,open,high,low')
self.assertEqual(list_to_str_format(['letters', ' ', '123 4', 123, ' kk l']),
'letters,,1234,kkl')
self.assertEqual(list_to_str_format('a string input'),
'a,string,input')
self.assertEqual(list_to_str_format('already,a,good,string'),
'already,a,good,string')
self.assertRaises(AssertionError, list_to_str_format, 123)
def test_is_trade_day(self):
"""test if the funcion maybe_trade_day() and is_market_trade_day() works properly
"""
date_trade = '20210401'
date_holiday = '20210102'
date_weekend = '20210424'
date_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertTrue(maybe_trade_day(date_trade))
self.assertFalse(maybe_trade_day(date_holiday))
self.assertFalse(maybe_trade_day(date_weekend))
self.assertTrue(maybe_trade_day(date_seems_trade_day))
self.assertTrue(maybe_trade_day(date_too_early))
self.assertTrue(maybe_trade_day(date_too_late))
self.assertTrue(maybe_trade_day(date_christmas))
self.assertTrue(is_market_trade_day(date_trade))
self.assertFalse(is_market_trade_day(date_holiday))
self.assertFalse(is_market_trade_day(date_weekend))
self.assertFalse(is_market_trade_day(date_seems_trade_day))
self.assertFalse(is_market_trade_day(date_too_early))
self.assertFalse(is_market_trade_day(date_too_late))
self.assertTrue(is_market_trade_day(date_christmas))
self.assertFalse(is_market_trade_day(date_christmas, exchange='XHKG'))
date_trade = pd.to_datetime('20210401')
date_holiday = pd.to_datetime('20210102')
date_weekend = pd.to_datetime('20210424')
self.assertTrue(maybe_trade_day(date_trade))
self.assertFalse(maybe_trade_day(date_holiday))
self.assertFalse(maybe_trade_day(date_weekend))
def test_weekday_name(self):
""" test util func weekday_name()"""
self.assertEqual(weekday_name(0), 'Monday')
self.assertEqual(weekday_name(1), 'Tuesday')
self.assertEqual(weekday_name(2), 'Wednesday')
self.assertEqual(weekday_name(3), 'Thursday')
self.assertEqual(weekday_name(4), 'Friday')
self.assertEqual(weekday_name(5), 'Saturday')
self.assertEqual(weekday_name(6), 'Sunday')
def test_list_truncate(self):
""" test util func list_truncate()"""
l = [1,2,3,4,5]
ls = list_truncate(l, 2)
self.assertEqual(ls[0], [1, 2])
self.assertEqual(ls[1], [3, 4])
self.assertEqual(ls[2], [5])
self.assertRaises(AssertionError, list_truncate, l, 0)
self.assertRaises(AssertionError, list_truncate, 12, 0)
self.assertRaises(AssertionError, list_truncate, 0, l)
def test_maybe_trade_day(self):
""" test util function maybe_trade_day()"""
self.assertTrue(maybe_trade_day('20220104'))
self.assertTrue(maybe_trade_day('2021-12-31'))
self.assertTrue(maybe_trade_day( | pd.to_datetime('2020/03/06') | pandas.to_datetime |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
| tm.assert_frame_equal(df, expected) | pandas.util.testing.assert_frame_equal |
import os
import glob
import json
import logging
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from core.utils import Directories
from core.viz import plot_class_dist
class DataHandling(object):
def __init__(self):
pass
def drop_unique_cols(self, train, test, add_cols: list):
'''
Drops unique columns as a part of data-preprocessing; as they won't make any difference in model predictions.
Unique columns are automatically decided based on the logic below.
:param train: train set
:param test: test set
:param add_cols: any additional columns which need to be dropped. Needs to passed a an entry in list object
:return:
'''
df = pd.concat([train, test])
unique_cols = [col for col in df.columns if len(df[col].unique()) == 1]
df = df.drop(unique_cols, axis=1)
logging.info('**Dropped {} columns with unique values: {}'.format(len(unique_cols), ' '.join(unique_cols)))
print('**Dropped {} columns with unique values: {}'.format(len(unique_cols), ' '.join(unique_cols)))
df = df.drop(add_cols, axis=1)
logging.info('**Dropped {} additional columns: {}'.format(len(add_cols), ', '.join(add_cols)))
print('**Dropped {} additional columns: {}'.format(len(add_cols), ', '.join(add_cols)))
train, test = df.iloc[:len(train)], df.iloc[len(train):]
return train, test
def rename_class_labels(self, df, dataset: str):
'''
Preprocessing of class labels; some labels are binned under a common cluster of labels.
for CICDDoS: train & test sets had different naming conventions for nameing hte labels
:param df: whole dataset combined: pandas DataFrame()
:param dataset: name of the dataset
:return: clean dataframe
'''
if dataset == 'CICDDoS':
print('Class labels renamed for CICDDoS')
df.Label = df.Label.replace('DrDoS_DNS', 'DNS')
df.Label = df.Label.replace('DrDoS_LDAP', 'LDAP')
df.Label = df.Label.replace('DrDoS_MSSQL', 'MSSQL')
df.Label = df.Label.replace('DrDoS_NTP', 'NTP')
df.Label = df.Label.replace('DrDoS_NetBIOS', 'NetBIOS')
df.Label = df.Label.replace('DrDoS_SNMP', 'SNMP')
df.Label = df.Label.replace('DrDoS_SSDP', 'SSDP')
df.Label = df.Label.replace('DrDoS_UDP', 'UDP')
df.Label = df.Label.replace('UDP-lag', 'UDPLag')
elif dataset == 'CICIDS':
df.Label = df.Label.replace('DoS Hulk', 'DoS')
df.Label = df.Label.replace('DoS GoldenEye', 'DoS')
df.Label = df.Label.replace('DoS slowloris', 'DoS')
df.Label = df.Label.replace('DoS Slowhttptest', 'DoS')
df.Label = df.Label.replace('Web Attack � Brute Force', 'Web Attack')
df.Label = df.Label.replace('Web Attack � XSS', 'Web Attack')
df.Label = df.Label.replace('Web Attack � Sql Injection', 'Web Attack')
df.Label = df.Label.replace('Heartbleed', 'Others')
df.Label = df.Label.replace('Infiltration', 'Others')
df.Label = df.Label.replace('FTP-Patator', 'Patator')
df.Label = df.Label.replace('SSH-Patator', 'Patator')
return df
class FeatureEngineering(Directories):
def __init__(self, config):
super().__init__(config)
self.dhObj = DataHandling()
def _sample_cicddos(self, path: str, tr_samples: int, ts_samples: int):
'''
In CICDDoS undersampling was required as count of benign samples are limited to ~110K.
Equal number of samples for all other labels are picked from both sets.
:param path: directory of the raw files.
:param tr_samples: count of samples from training set.
:param ts_samples: count of samples from test set.
:return: None; saves the samples as individual <label_name>.csv files.
'''
logging.info('+++++ Sampling CICDDoS2019 dataset +++++')
print('+++++ Sampling CICDDoS2019 dataset +++++')
def helper_function(data, label: str, n_samples: int):
# Clean data for CICDDoS2019 by removing negative entries.
data.replace([np.inf, -np.inf], np.nan, inplace=True)
data.dropna(inplace=True)
data = data[data[' Fwd Header Length'] >= 0]
data = data[data[' Bwd Header Length'] >= 0]
data = data[data[' min_seg_size_forward'] >= 0]
if len(data) < n_samples: # consider all examples of this label
return data
else:
if label == 'Benign': # consider all benign samples as they are less
return data
else:
data = data.sample(n=n_samples).reset_index(drop=True)
return data
samples_path = os.path.join(path, 'samples')
if not os.path.exists(samples_path): # Create sample directory the first time
os.makedirs(samples_path)
os.makedirs(os.path.join(samples_path, 'train'))
os.makedirs(os.path.join(samples_path, 'test'))
logging.info('***** CICDDoS samples directory created for the first time *****')
# Consider class labels present in both datasets
class_labels = ['Benign', 'LDAP', 'MSSQL', 'NetBIOS', 'Syn', 'UDPLag','UDP']
for label in class_labels:
print('Sampling on label: {}...'.format(label))
filename = str(label) + '.csv'
train = pd.read_csv(os.path.join(path, 'training', filename))
test = pd.read_csv(os.path.join(path, 'testing', filename))
train = helper_function(train, label, tr_samples) # data cleaning & sampling
test = helper_function(test, label, ts_samples)
train.to_csv(os.path.join(samples_path, 'train', filename), index=False)
test.to_csv(os.path.join(samples_path, 'test', filename), index=False)
logging.info(f'***** {label} sampled in both directories\tTotal length: {len(train)} | {len(test)} *****')
print(f'***** {label} sampled in both directories\tTotal length: {len(train)} | {len(test)} *****')
def _nsl_kdd(self, path, add_cols):
cols = pd.read_csv(os.path.join(path, 'Field Names.csv'), header=None)
cols = cols.append([['label', 'Symbolic'], ['difficulty', 'Symbolic']])
cols.columns = ['Name', 'Type']
train = pd.read_csv(os.path.join(path, 'KDDTrain+.csv'), header=None)
train.columns = cols['Name'].T
test = pd.read_csv(os.path.join(path, 'KDDTest+.csv'), header=None)
test.columns = cols['Name'].T
with open(os.path.join(path, 'label_map.json'), 'r') as file:
label_map = json.load(file)
train['Label'] = train.label.map(label_map)
test['Label'] = test.label.map(label_map)
train, test = self.dhObj.drop_unique_cols(train, test, add_cols)
return train, test
def _cicddos(self, path: str, sample_data_flag: bool, add_cols: list):
'''
Reads CICDDoS dataset and performs basic preprocessing steps.
:param path: raw dataset directory path
:param sample_data_flag: if True samples from the raw dataset in defined ratios
:param add_cols: additional columns to be dropped [dataset specific]
:return: train and test sets.
'''
def helper_function(pth):
df = pd.DataFrame() # holds records from all individual files as [train, test]
for fPath in glob.glob(pth):
subset = pd.read_csv(fPath)
df = df.append(subset)
column_names = [c.replace(' ', '') for c in df.columns]
df.columns = column_names
df = self.dhObj.rename_class_labels(df, 'CICDDoS')
return df
if sample_data_flag:
self._sample_cicddos(path, tr_samples=60000, ts_samples=60000)
tr_path = os.path.join(path, 'samples', 'train', '*csv')
ts_path = os.path.join(path, 'samples', 'test', '*csv')
train = helper_function(tr_path)
test = helper_function(ts_path)
train, test = self.dhObj.drop_unique_cols(train, test, add_cols)
return train, test
def _cicids(self, path: str, add_cols: list):
'''
Reads CICIDS dataset and performs basic preprocessing steps.
:param path: raw dataset directory path
:param add_cols: additional columns to be dropped [dataset specific]
:return: train and test sets.
'''
def helper_function(path):
df = pd.DataFrame()
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith(".csv"):
print(f'Reading {file}...')
subset = pd.read_csv(os.path.join(root, file))
df = pd.concat([df, subset], ignore_index=True)
column_names = [c.replace(' ', '') for c in df.columns]
df.columns = column_names
df = self.dhObj.rename_class_labels(df, 'CICIDS')
return df
df = helper_function(path)
Y = df.pop('Label').to_frame()
X = df
x_train, x_test, y_train, y_test = train_test_split(X, Y, stratify=Y, test_size=0.2)
train = | pd.concat([x_train, y_train], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal
import pytest
from ber_public.deap import dim
from ber_public.deap import fab
from ber_public.deap import vent
def test_calculate_fabric_heat_loss():
"""Output is equivalent to DEAP 4.2.0 example A"""
floor_area = pd.Series([63])
roof_area = pd.Series([63])
wall_area = pd.Series([85.7])
window_area = pd.Series([29.6])
door_area = pd.Series([1.85])
floor_uvalue = pd.Series([0.14])
roof_uvalue = pd.Series([0.11])
wall_uvalue = pd.Series([0.13])
window_uvalue = pd.Series([0.87])
door_uvalue = pd.Series([1.5])
thermal_bridging_factor = pd.Series([0.05])
expected_output = pd.Series([68], dtype="int64")
output = fab.calculate_fabric_heat_loss(
roof_area=roof_area,
roof_uvalue=roof_uvalue,
wall_area=wall_area,
wall_uvalue=wall_uvalue,
floor_area=floor_area,
floor_uvalue=floor_uvalue,
window_area=window_area,
window_uvalue=window_uvalue,
door_area=door_area,
door_uvalue=door_uvalue,
thermal_bridging_factor=thermal_bridging_factor,
)
rounded_output = output.round().astype("int64")
assert_series_equal(rounded_output, expected_output)
def test_calculate_heat_loss_parameter():
"""Output is equivalent to DEAP 4.2.0 example A"""
fabric_heat_loss = | pd.Series([0.5]) | pandas.Series |
import numpy as np
import pandas as pd
import re
import glob
from flask import Flask, request, render_template, url_for
from flask_cors import CORS
from werkzeug.utils import secure_filename
import os
import logging
logging.basicConfig(level=logging.INFO)
import tensorflow as tf
import silence_tensorflow.auto # pylint: disable=unused-import
#physical_devices = tf.config.experimental.list_physical_devices('GPU')
#tf.config.experimental.set_memory_growth(physical_devices[0], True)
from tensorflow.keras.applications import VGG19
from tensorflow.keras.layers import Input
from tensorflow.keras.preprocessing import sequence
from models.arch import build_model
from models.layers import ContextVector, PhraseLevelFeatures, AttentionMaps
from utils.load_pickles import tok, labelencoder
from utils.helper_functions import image_feature_extractor, process_sentence, predict_answers
max_answers = 1000
max_seq_len = 22
vocab_size = len(tok.word_index) + 1
dim_d = 512
dim_k = 256
l_rate = 1e-4
d_rate = 0.5
reg_value = 0.01
MODEL_PATH = 'pickles/complete_model.h5'
IMAGE_PATH = 'static'
custom_objects = {
'PhraseLevelFeatures': PhraseLevelFeatures,
'AttentionMaps': AttentionMaps,
'ContextVector': ContextVector
}
# load the model
model = tf.keras.models.load_model(MODEL_PATH, custom_objects=custom_objects)
vgg_model = VGG19(weights="imagenet", include_top=False)
# Create Flask application
app = Flask(__name__, static_url_path='/static')
CORS(app)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/', methods=['POST'])
def predict():
try:
# delete images uploaded in previous session
files = glob.glob(IMAGE_PATH+'/*')
for f in files:
os.remove(f)
#0 --- Get the image file and question
f = request.files['image_file']
fname = secure_filename(f.filename)
f.save(IMAGE_PATH +'/'+ fname)
question = request.form["question"]
#1 --- Extract image features
img_feat = image_feature_extractor(IMAGE_PATH +'/'+ fname, vgg_model)
#2 --- Clean the question
questions_processed = | pd.Series(question) | pandas.Series |
"""Problems module for mathematical optimization and simulation problem type definitions."""
import itertools
from multimethod import multimethod
import numpy as np
import pandas as pd
import tqdm
import typing
import mesmo.config
import mesmo.data_interface
import mesmo.der_models
import mesmo.electric_grid_models
import mesmo.solutions
import mesmo.thermal_grid_models
import mesmo.utils
logger = mesmo.config.get_logger(__name__)
class Results(
mesmo.electric_grid_models.ElectricGridOperationResults,
mesmo.thermal_grid_models.ThermalGridOperationResults,
mesmo.der_models.DERModelSetOperationResults,
mesmo.electric_grid_models.ElectricGridDLMPResults,
mesmo.thermal_grid_models.ThermalGridDLMPResults,
):
"""Results object, which serves as data object to hold structured results variables from solved problems."""
price_data: mesmo.data_interface.PriceData
class ResultsDict(typing.Dict[str, Results]):
"""Results dictionary, which serves as collection object for labelled results objects."""
class ProblemBase(mesmo.utils.ObjectBase):
"""Problem base object, which serves as abstract base class for problem objects."""
def solve(self):
raise NotImplementedError
def get_results(self) -> Results:
raise NotImplementedError
class ProblemDict(typing.Dict[str, ProblemBase]):
"""Problem dictionary, which serves as collection object for labelled problem objects."""
def solve(self):
"""Solve all problems within this `ProblemDict`."""
# Loop over problems with tqdm to show progress bar.
mesmo.utils.log_time("solve problems", logger_object=logger)
for problem in tqdm.tqdm(
self.values(),
total=len(self),
disable=(mesmo.config.config["logs"]["level"] != "debug"), # Progress bar only shown in debug mode.
):
# Solve individual problem.
problem.solve()
mesmo.utils.log_time("solve problems", logger_object=logger)
def get_results(self) -> ResultsDict:
"""Get results for all problems within this `ProblemDict`."""
# Instantiate results dict.
results_dict = ResultsDict({label: Results() for label in self.keys()})
# Loop over problems with tqdm to show progress bar.
mesmo.utils.log_time("get results", logger_object=logger)
for label, problem in tqdm.tqdm(
self.items(),
total=len(self),
disable=(mesmo.config.config["logs"]["level"] != "debug"), # Progress bar only shown in debug mode.
):
# Get individual results.
results_dict[label] = problem.get_results()
mesmo.utils.log_time("get results", logger_object=logger)
return results_dict
class NominalOperationProblem(ProblemBase):
"""Nominal operation problem object, consisting of the corresponding electric / thermal grid models,
reference power flow solutions and DER model set for the given scenario.
- The nominal operation problem (alias: simulation problem, power flow problem)
represents the simulation problem of the DERs and grids considering the nominal operation schedule for all DERs.
- The problem formulation is able to consider combined as well as individual operation of
thermal and electric grids.
"""
scenario_name: str
timesteps: pd.Index
price_data: mesmo.data_interface.PriceData
electric_grid_model: mesmo.electric_grid_models.ElectricGridModel = None
thermal_grid_model: mesmo.thermal_grid_models.ThermalGridModel = None
der_model_set: mesmo.der_models.DERModelSet
results: Results
@multimethod
def __init__(
self,
scenario_name: str,
electric_grid_model: mesmo.electric_grid_models.ElectricGridModel = None,
thermal_grid_model: mesmo.thermal_grid_models.ThermalGridModel = None,
der_model_set: mesmo.der_models.DERModelSet = None,
):
# Obtain data.
scenario_data = mesmo.data_interface.ScenarioData(scenario_name)
self.price_data = mesmo.data_interface.PriceData(scenario_name)
# Store timesteps.
self.timesteps = scenario_data.timesteps
# Obtain electric grid model, power flow solution and linear model, if defined.
if pd.notnull(scenario_data.scenario.at["electric_grid_name"]):
if electric_grid_model is not None:
self.electric_grid_model = electric_grid_model
else:
mesmo.utils.log_time("electric grid model instantiation")
self.electric_grid_model = mesmo.electric_grid_models.ElectricGridModel(scenario_name)
mesmo.utils.log_time("electric grid model instantiation")
# Obtain thermal grid model, power flow solution and linear model, if defined.
if pd.notnull(scenario_data.scenario.at["thermal_grid_name"]):
if thermal_grid_model is not None:
self.thermal_grid_model = thermal_grid_model
else:
mesmo.utils.log_time("thermal grid model instantiation")
self.thermal_grid_model = mesmo.thermal_grid_models.ThermalGridModel(scenario_name)
mesmo.utils.log_time("thermal grid model instantiation")
# Obtain DER model set.
if der_model_set is not None:
self.der_model_set = der_model_set
else:
mesmo.utils.log_time("DER model instantiation")
self.der_model_set = mesmo.der_models.DERModelSet(scenario_name)
mesmo.utils.log_time("DER model instantiation")
def solve(self):
# Instantiate results variables.
if self.electric_grid_model is not None:
der_power_vector = pd.DataFrame(columns=self.electric_grid_model.ders, index=self.timesteps, dtype=complex)
node_voltage_vector = pd.DataFrame(
columns=self.electric_grid_model.nodes, index=self.timesteps, dtype=complex
)
branch_power_vector_1 = pd.DataFrame(
columns=self.electric_grid_model.branches, index=self.timesteps, dtype=complex
)
branch_power_vector_2 = pd.DataFrame(
columns=self.electric_grid_model.branches, index=self.timesteps, dtype=complex
)
loss = pd.DataFrame(columns=["total"], index=self.timesteps, dtype=complex)
if self.thermal_grid_model is not None:
der_thermal_power_vector = pd.DataFrame(
columns=self.thermal_grid_model.ders, index=self.timesteps, dtype=float
)
node_head_vector = pd.DataFrame(columns=self.thermal_grid_model.nodes, index=self.timesteps, dtype=float)
branch_flow_vector = pd.DataFrame(
columns=self.thermal_grid_model.branches, index=self.timesteps, dtype=float
)
pump_power = pd.DataFrame(columns=["total"], index=self.timesteps, dtype=float)
# Obtain nominal DER power vector.
if self.electric_grid_model is not None:
for der in self.electric_grid_model.ders:
# TODO: Use ders instead of der_names for der_models index.
der_name = der[1]
der_power_vector.loc[:, der] = self.der_model_set.der_models[
der_name
].active_power_nominal_timeseries + (
1.0j * self.der_model_set.der_models[der_name].reactive_power_nominal_timeseries
)
if self.thermal_grid_model is not None:
for der in self.electric_grid_model.ders:
der_name = der[1]
der_thermal_power_vector.loc[:, der] = self.der_model_set.der_models[
der_name
].thermal_power_nominal_timeseries
# Solve power flow.
mesmo.utils.log_time("power flow solution")
if self.electric_grid_model is not None:
power_flow_solutions = mesmo.utils.starmap(
mesmo.electric_grid_models.PowerFlowSolutionFixedPoint,
zip(itertools.repeat(self.electric_grid_model), der_power_vector.values),
)
power_flow_solutions = dict(zip(self.timesteps, power_flow_solutions))
if self.thermal_grid_model is not None:
thermal_power_flow_solutions = mesmo.utils.starmap(
mesmo.thermal_grid_models.ThermalPowerFlowSolution,
[(self.thermal_grid_model, row) for row in der_thermal_power_vector.values],
)
thermal_power_flow_solutions = dict(zip(self.timesteps, thermal_power_flow_solutions))
mesmo.utils.log_time("power flow solution")
# Obtain results.
if self.electric_grid_model is not None:
for timestep in self.timesteps:
power_flow_solution = power_flow_solutions[timestep]
# TODO: Flatten power flow solution arrays.
node_voltage_vector.loc[timestep, :] = power_flow_solution.node_voltage_vector
branch_power_vector_1.loc[timestep, :] = power_flow_solution.branch_power_vector_1
branch_power_vector_2.loc[timestep, :] = power_flow_solution.branch_power_vector_2
loss.loc[timestep, :] = power_flow_solution.loss
der_active_power_vector = der_power_vector.apply(np.real)
der_reactive_power_vector = der_power_vector.apply(np.imag)
node_voltage_magnitude_vector = np.abs(node_voltage_vector)
node_voltage_angle_vector = np.angle(node_voltage_vector)
branch_power_magnitude_vector_1 = np.abs(branch_power_vector_1)
branch_active_power_vector_1 = np.real(branch_power_vector_1)
branch_reactive_power_vector_1 = np.imag(branch_power_vector_1)
branch_power_magnitude_vector_2 = np.abs(branch_power_vector_2)
branch_active_power_vector_2 = np.real(branch_power_vector_2)
branch_reactive_power_vector_2 = np.imag(branch_power_vector_2)
loss_active = loss.apply(np.real)
loss_reactive = loss.apply(np.imag)
if self.thermal_grid_model is not None:
for timestep in self.timesteps:
thermal_power_flow_solution = thermal_power_flow_solutions[timestep]
node_head_vector.loc[timestep, :] = thermal_power_flow_solution.node_head_vector
branch_flow_vector.loc[timestep, :] = thermal_power_flow_solution.branch_flow_vector
pump_power.loc[timestep, :] = thermal_power_flow_solution.pump_power
# Obtain per-unit values.
if self.electric_grid_model is not None:
der_active_power_vector_per_unit = der_active_power_vector * mesmo.utils.get_inverse_with_zeros(
np.real(self.electric_grid_model.der_power_vector_reference)
)
der_reactive_power_vector_per_unit = der_reactive_power_vector * mesmo.utils.get_inverse_with_zeros(
np.imag(self.electric_grid_model.der_power_vector_reference)
)
node_voltage_magnitude_vector_per_unit = node_voltage_magnitude_vector * mesmo.utils.get_inverse_with_zeros(
np.abs(self.electric_grid_model.node_voltage_vector_reference)
)
branch_power_magnitude_vector_1_per_unit = (
branch_power_magnitude_vector_1
* mesmo.utils.get_inverse_with_zeros(self.electric_grid_model.branch_power_vector_magnitude_reference)
)
branch_active_power_vector_1_per_unit = branch_active_power_vector_1 * mesmo.utils.get_inverse_with_zeros(
self.electric_grid_model.branch_power_vector_magnitude_reference
)
branch_reactive_power_vector_1_per_unit = (
branch_reactive_power_vector_1
* mesmo.utils.get_inverse_with_zeros(self.electric_grid_model.branch_power_vector_magnitude_reference)
)
branch_power_magnitude_vector_2_per_unit = (
branch_power_magnitude_vector_2
* mesmo.utils.get_inverse_with_zeros(self.electric_grid_model.branch_power_vector_magnitude_reference)
)
branch_active_power_vector_2_per_unit = branch_active_power_vector_2 * mesmo.utils.get_inverse_with_zeros(
self.electric_grid_model.branch_power_vector_magnitude_reference
)
branch_reactive_power_vector_2_per_unit = (
branch_reactive_power_vector_2
* mesmo.utils.get_inverse_with_zeros(self.electric_grid_model.branch_power_vector_magnitude_reference)
)
if self.thermal_grid_model is not None:
der_thermal_power_vector_per_unit = der_thermal_power_vector * mesmo.utils.get_inverse_with_zeros(
self.thermal_grid_model.der_thermal_power_vector_reference
)
node_head_vector_per_unit = node_head_vector * mesmo.utils.get_inverse_with_zeros(
self.thermal_grid_model.node_head_vector_reference
)
branch_flow_vector_per_unit = branch_flow_vector * mesmo.utils.get_inverse_with_zeros(
self.thermal_grid_model.branch_flow_vector_reference
)
# Store results.
self.results = Results(price_data=self.price_data, der_model_set=self.der_model_set)
if self.electric_grid_model is not None:
self.results.update(
Results(
electric_grid_model=self.electric_grid_model,
der_active_power_vector=der_active_power_vector,
der_active_power_vector_per_unit=der_active_power_vector_per_unit,
der_reactive_power_vector=der_reactive_power_vector,
der_reactive_power_vector_per_unit=der_reactive_power_vector_per_unit,
node_voltage_magnitude_vector=node_voltage_magnitude_vector,
node_voltage_magnitude_vector_per_unit=node_voltage_magnitude_vector_per_unit,
node_voltage_angle_vector=node_voltage_angle_vector,
branch_power_magnitude_vector_1=branch_power_magnitude_vector_1,
branch_power_magnitude_vector_1_per_unit=branch_power_magnitude_vector_1_per_unit,
branch_active_power_vector_1=branch_active_power_vector_1,
branch_active_power_vector_1_per_unit=branch_active_power_vector_1_per_unit,
branch_reactive_power_vector_1=branch_reactive_power_vector_1,
branch_reactive_power_vector_1_per_unit=branch_reactive_power_vector_1_per_unit,
branch_power_magnitude_vector_2=branch_power_magnitude_vector_2,
branch_power_magnitude_vector_2_per_unit=branch_power_magnitude_vector_2_per_unit,
branch_active_power_vector_2=branch_active_power_vector_2,
branch_active_power_vector_2_per_unit=branch_active_power_vector_2_per_unit,
branch_reactive_power_vector_2=branch_reactive_power_vector_2,
branch_reactive_power_vector_2_per_unit=branch_reactive_power_vector_2_per_unit,
loss_active=loss_active,
loss_reactive=loss_reactive,
)
)
if self.thermal_grid_model is not None:
self.results.update(
Results(
thermal_grid_model=self.thermal_grid_model,
der_thermal_power_vector=der_thermal_power_vector,
der_thermal_power_vector_per_unit=der_thermal_power_vector_per_unit,
node_head_vector=node_head_vector,
node_head_vector_per_unit=node_head_vector_per_unit,
branch_flow_vector=branch_flow_vector,
branch_flow_vector_per_unit=branch_flow_vector_per_unit,
pump_power=pump_power,
)
)
def get_results(self):
return self.results
class OptimalOperationProblem(ProblemBase):
"""Optimal operation problem object, consisting of an optimization problem as well as the corresponding
electric / thermal grid models, reference power flow solutions, linear grid models and DER model set
for the given scenario.
- The optimal operation problem (alias: optimal dispatch problem, optimal power flow problem)
formulates the optimization problem for minimizing the objective functions of DERs and grid operators
subject to the model constraints of all DERs and grids.
- The problem formulation is able to consider combined as well as individual operation of
thermal and electric grids.
Keyword Arguments:
solve_method (str): Solve method for the optimization problem. If `None` or 'default', it will use the default
method of solving a single-shot optimization using the global approximation method. If 'trust_region', it
will solve iteratively via trust-region method using the local approximation method.
Choices: 'default', 'trust_region', `None`. Default: `None`.
"""
solve_method: str
scenario_name: str
scenario_data: mesmo.data_interface.ScenarioData
timesteps: pd.Index
price_data: mesmo.data_interface.PriceData
electric_grid_model: mesmo.electric_grid_models.ElectricGridModel = None
power_flow_solution_reference: mesmo.electric_grid_models.PowerFlowSolutionBase = None
linear_electric_grid_model_set: mesmo.electric_grid_models.LinearElectricGridModelSet = None
thermal_grid_model: mesmo.thermal_grid_models.ThermalGridModel = None
thermal_power_flow_solution_reference: mesmo.thermal_grid_models.ThermalPowerFlowSolution = None
linear_thermal_grid_model_set: mesmo.thermal_grid_models.LinearThermalGridModelSet = None
der_model_set: mesmo.der_models.DERModelSet
optimization_problem: mesmo.solutions.OptimizationProblem
results: Results
@multimethod
def __init__(
self,
scenario_name: str,
electric_grid_model: mesmo.electric_grid_models.ElectricGridModel = None,
thermal_grid_model: mesmo.thermal_grid_models.ThermalGridModel = None,
der_model_set: mesmo.der_models.DERModelSet = None,
solve_method: str = None,
):
# Obtain solve method.
if solve_method in [None, "default"]:
self.solve_method = "default"
elif solve_method == "trust_region":
self.solve_method = "trust_region"
else:
raise ValueError(f"Unknown solve method for optimal operation problem: {solve_method}")
# Obtain and store data.
self.scenario_name = scenario_name
self.scenario_data = mesmo.data_interface.ScenarioData(scenario_name)
self.timesteps = self.scenario_data.timesteps
self.price_data = mesmo.data_interface.PriceData(scenario_name)
# Obtain electric grid model, power flow solution and linear model, if defined.
if pd.notnull(self.scenario_data.scenario.at["electric_grid_name"]):
mesmo.utils.log_time("electric grid model instantiation")
if electric_grid_model is not None:
self.electric_grid_model = electric_grid_model
else:
self.electric_grid_model = mesmo.electric_grid_models.ElectricGridModel(scenario_name)
self.power_flow_solution_reference = mesmo.electric_grid_models.PowerFlowSolutionFixedPoint(
self.electric_grid_model
)
self.linear_electric_grid_model_set = mesmo.electric_grid_models.LinearElectricGridModelSet(
self.electric_grid_model,
self.power_flow_solution_reference,
linear_electric_grid_model_method=mesmo.electric_grid_models.LinearElectricGridModelGlobal,
)
mesmo.utils.log_time("electric grid model instantiation")
# Obtain thermal grid model, power flow solution and linear model, if defined.
if | pd.notnull(self.scenario_data.scenario.at["thermal_grid_name"]) | pandas.notnull |
import pytest
import inspect
try:
import pandas as pd
import test_aide.pandas as ph
has_pandas = True
except ModuleNotFoundError:
has_pandas = False
@pytest.mark.skipif(not has_pandas, reason="pandas not installed")
def test_arguments():
"""Test arguments for arguments of test_aide.pandas._check_dfs_passed."""
expected_arguments = ["df_1", "df_2"]
arg_spec = inspect.getfullargspec(ph._check_dfs_passed)
arguments = arg_spec.args
assert len(expected_arguments) == len(
arguments
), f"Incorrect number of arguments -\n Expected: {len(expected_arguments)}\n Actual: {len(arguments)}"
assert (
expected_arguments == arguments
), f"Incorrect arguments -\n Expected: {expected_arguments}\n Actual: {arguments}"
default_values = arg_spec.defaults
assert (
default_values is None
), f"Unexpected default values -\n Expected: None\n Actual: {default_values}"
@pytest.mark.skipif(not has_pandas, reason="pandas not installed")
def test_exceptions_raised():
"""Test that the expected exceptions are raised by test_aide.pandas._check_dfs_passed."""
with pytest.raises(
TypeError, match=r"expecting first positional arg to be a pd.DataFrame.*"
):
ph._check_dfs_passed(1, pd.DataFrame())
with pytest.raises(
TypeError, match=r"expecting second positional arg to be a pd.DataFrame.*"
):
ph._check_dfs_passed(pd.DataFrame(), 1)
with pytest.raises(
ValueError,
match=r"expecting first positional arg and second positional arg to have equal number of rows but got\n 1\n 0",
):
ph._check_dfs_passed(pd.DataFrame({"a": 1}, index=[0]), | pd.DataFrame() | pandas.DataFrame |
import sys,os
#os.chdir("/Users/utkarshvirendranigam/Desktop/Homework/Project")
# required_packages=["PyQt5","re", "scipy","itertools","random","matplotlib","pandas","numpy","sklearn","pydotplus","collections","warnings","seaborn"]
#print(os.getcwd())
# for my_package in required_packages:
# try:
# command_string="conda install "+ my_package+ " --yes"
# os.system(command_string)
# except:
# count=1
from PyQt5.QtWidgets import (QMainWindow, QApplication, QWidget, QPushButton, QAction, QComboBox, QLabel,
QGridLayout, QCheckBox, QGroupBox, QVBoxLayout, QHBoxLayout, QLineEdit, QPlainTextEdit)
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot, QRect
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtCore import Qt
# from scipy import interp
from itertools import cycle, combinations
import random
from PyQt5.QtWidgets import QDialog, QVBoxLayout, QSizePolicy, QFormLayout, QRadioButton, QScrollArea, QMessageBox
from PyQt5.QtGui import QPixmap
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
import pandas as pd
import numpy as np
import pickle
from numpy.polynomial.polynomial import polyfit
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.compose import make_column_transformer
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve, auc, log_loss, brier_score_loss
from sklearn.calibration import calibration_curve
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import feature_selection
from sklearn import metrics
from sklearn.preprocessing import label_binarize
from sklearn.model_selection import cross_val_predict
# Libraries to display decision tree
from pydotplus import graph_from_dot_data
import collections
from sklearn.tree import export_graphviz
import webbrowser
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
from Preprocessing import PreProcessing
import random
import seaborn as sns
#%%-----------------------------------------------------------------------
import os
os.environ["PATH"] += os.pathsep + 'C:\\Program Files (x86)\\graphviz-2.38\\release\\bin'
#%%-----------------------------------------------------------------------
#::--------------------------------
# Deafault font size for all the windows
#::--------------------------------
font_size_window = 'font-size:18px'
class DecisionTree(QMainWindow):
#::--------------------------------------------------------------------------------
# Implementation of Random Forest Classifier using the happiness dataset
# the methods in this class are
# _init_ : initialize the class
# initUi : creates the canvas and all the elements in the canvas
# update : populates the elements of the canvas base on the parametes
# chosen by the user
#::---------------------------------------------------------------------------------
send_fig = pyqtSignal(str)
def __init__(self):
super(DecisionTree, self).__init__()
self.Title = "Decision Tree Classifier"
self.initUi()
def initUi(self):
#::-----------------------------------------------------------------
# Create the canvas and all the element to create a dashboard with
# all the necessary elements to present the results from the algorithm
# The canvas is divided using a grid loyout to facilitate the drawing
# of the elements
#::-----------------------------------------------------------------
self.setWindowTitle(self.Title)
self.setStyleSheet(font_size_window)
self.main_widget = QWidget(self)
self.layout = QGridLayout(self.main_widget)
self.groupBox1 = QGroupBox('Decision Tree Features')
self.groupBox1Layout= QGridLayout()
self.groupBox1.setLayout(self.groupBox1Layout)
self.feature0 = QCheckBox(features_list[0],self)
self.feature1 = QCheckBox(features_list[1],self)
self.feature2 = QCheckBox(features_list[2], self)
self.feature3 = QCheckBox(features_list[3], self)
self.feature4 = QCheckBox(features_list[4],self)
self.feature5 = QCheckBox(features_list[5],self)
self.feature6 = QCheckBox(features_list[6], self)
self.feature7 = QCheckBox(features_list[7], self)
self.feature8 = QCheckBox(features_list[8], self)
self.feature9 = QCheckBox(features_list[9], self)
self.feature10 = QCheckBox(features_list[10], self)
self.feature11 = QCheckBox(features_list[11], self)
self.feature12 = QCheckBox(features_list[12], self)
self.feature13 = QCheckBox(features_list[13], self)
self.feature14 = QCheckBox(features_list[14], self)
self.feature15 = QCheckBox(features_list[15], self)
self.feature16 = QCheckBox(features_list[16], self)
self.feature17 = QCheckBox(features_list[17], self)
self.feature18 = QCheckBox(features_list[18], self)
self.feature19 = QCheckBox(features_list[19], self)
self.feature20 = QCheckBox(features_list[20], self)
self.feature21 = QCheckBox(features_list[21], self)
self.feature22 = QCheckBox(features_list[22], self)
self.feature23 = QCheckBox(features_list[23], self)
self.feature24 = QCheckBox(features_list[24], self)
self.feature0.setChecked(True)
self.feature1.setChecked(True)
self.feature2.setChecked(True)
self.feature3.setChecked(True)
self.feature4.setChecked(True)
self.feature5.setChecked(True)
self.feature6.setChecked(True)
self.feature7.setChecked(True)
self.feature8.setChecked(True)
self.feature9.setChecked(True)
self.feature10.setChecked(True)
self.feature11.setChecked(True)
self.feature12.setChecked(True)
self.feature13.setChecked(True)
self.feature14.setChecked(True)
self.feature15.setChecked(True)
self.feature16.setChecked(True)
self.feature17.setChecked(True)
self.feature18.setChecked(True)
self.feature19.setChecked(True)
self.feature20.setChecked(True)
self.feature21.setChecked(True)
self.feature22.setChecked(True)
self.feature23.setChecked(True)
self.feature24.setChecked(True)
self.lblPercentTest = QLabel('Percentage for Test :')
self.lblPercentTest.adjustSize()
self.txtPercentTest = QLineEdit(self)
self.txtPercentTest.setText("30")
self.lblMaxDepth = QLabel('Maximun Depth :')
self.txtMaxDepth = QLineEdit(self)
self.txtMaxDepth.setText("3")
self.btnExecute = QPushButton("Run Model")
self.btnExecute.setGeometry(QRect(60, 500, 75, 23))
self.btnExecute.clicked.connect(self.update)
self.btnDTFigure = QPushButton("View Tree")
self.btnDTFigure.setGeometry(QRect(60, 500, 75, 23))
self.btnDTFigure.clicked.connect(self.view_tree)
# We create a checkbox for each feature
self.groupBox1Layout.addWidget(self.feature0, 0, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature1, 0, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature2, 1, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature3, 1, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature4, 2, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature5, 2, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature6, 3, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature7, 3, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature8, 4, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature9, 4, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature10, 5, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature11, 5, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature12, 6, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature13, 6, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature14, 7, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature15, 7, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature16, 8, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature17, 8, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature18, 9, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature19, 9, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature20, 10, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature21, 10, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature22, 11, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature23, 11, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature24, 12, 0, 1, 1)
self.groupBox1Layout.addWidget(self.lblPercentTest, 19, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtPercentTest, 19, 1, 1, 1)
self.groupBox1Layout.addWidget(self.lblMaxDepth, 20, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtMaxDepth, 20, 1, 1, 1)
self.groupBox1Layout.addWidget(self.btnExecute, 21, 0, 1, 1)
self.groupBox1Layout.addWidget(self.btnDTFigure, 21, 1, 1, 1)
self.groupBox2 = QGroupBox('Measurements:')
self.groupBox2Layout = QVBoxLayout()
self.groupBox2.setLayout(self.groupBox2Layout)
# self.groupBox2.setMinimumSize(400, 100)
self.current_model_summary = QWidget(self)
self.current_model_summary.layout = QFormLayout(self.current_model_summary)
self.txtCurrentAccuracy = QLineEdit()
self.txtCurrentPrecision = QLineEdit()
self.txtCurrentRecall = QLineEdit()
self.txtCurrentF1score = QLineEdit()
self.current_model_summary.layout.addRow('Accuracy:', self.txtCurrentAccuracy)
self.current_model_summary.layout.addRow('Precision:', self.txtCurrentPrecision)
self.current_model_summary.layout.addRow('Recall:', self.txtCurrentRecall)
self.current_model_summary.layout.addRow('F1 Score:', self.txtCurrentF1score)
self.groupBox2Layout.addWidget(self.current_model_summary)
self.groupBox3 = QGroupBox('Other Models Accuracy:')
self.groupBox3Layout = QVBoxLayout()
self.groupBox3.setLayout(self.groupBox3Layout)
self.other_models = QWidget(self)
self.other_models.layout = QFormLayout(self.other_models)
self.txtAccuracy_lr = QLineEdit()
self.txtAccuracy_gb = QLineEdit()
self.txtAccuracy_rf = QLineEdit()
self.other_models.layout.addRow('Logistic:', self.txtAccuracy_lr)
self.other_models.layout.addRow('Random Forest:', self.txtAccuracy_rf)
self.other_models.layout.addRow('Gradient Boosting:', self.txtAccuracy_gb)
self.groupBox3Layout.addWidget(self.other_models)
#::-------------------------------------
# Graphic 1 : Confusion Matrix
#::-------------------------------------
self.fig = Figure()
self.ax1 = self.fig.add_subplot(111)
self.axes=[self.ax1]
self.canvas = FigureCanvas(self.fig)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas.updateGeometry()
self.groupBoxG1 = QGroupBox('Confusion Matrix')
self.groupBoxG1Layout= QVBoxLayout()
self.groupBoxG1.setLayout(self.groupBoxG1Layout)
self.groupBoxG1Layout.addWidget(self.canvas)
#::---------------------------------------------
# Graphic 2 : ROC Curve
#::---------------------------------------------
self.fig2 = Figure()
self.ax2 = self.fig2.add_subplot(111)
self.axes2 = [self.ax2]
self.canvas2 = FigureCanvas(self.fig2)
self.canvas2.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas2.updateGeometry()
self.groupBoxG2 = QGroupBox('ROC Curve')
self.groupBoxG2Layout = QVBoxLayout()
self.groupBoxG2.setLayout(self.groupBoxG2Layout)
self.groupBoxG2Layout.addWidget(self.canvas2)
#::-------------------------------------------
# Graphic 3 : Importance of Features
#::-------------------------------------------
self.fig3 = Figure()
self.ax3 = self.fig3.add_subplot(111)
self.axes3 = [self.ax3]
self.canvas3 = FigureCanvas(self.fig3)
self.canvas3.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas3.updateGeometry()
self.groupBoxG3 = QGroupBox('Importance of Features')
self.groupBoxG3Layout = QVBoxLayout()
self.groupBoxG3.setLayout(self.groupBoxG3Layout)
self.groupBoxG3Layout.addWidget(self.canvas3)
#::--------------------------------------------
# Graphic 4 : ROC Curve by class
#::--------------------------------------------
self.fig4 = Figure()
self.ax4 = self.fig4.add_subplot(111)
self.axes4 = [self.ax4]
self.canvas4 = FigureCanvas(self.fig4)
self.canvas4.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas4.updateGeometry()
self.groupBoxG4 = QGroupBox('ROC Curve by Class')
self.groupBoxG4Layout = QVBoxLayout()
self.groupBoxG4.setLayout(self.groupBoxG4Layout)
self.groupBoxG4Layout.addWidget(self.canvas4)
#::-------------------------------------------------
# End of graphs
#::-------------------------------------------------
self.layout.addWidget(self.groupBox1, 0, 0, 3, 2)
self.layout.addWidget(self.groupBoxG1, 0, 2, 1, 1)
self.layout.addWidget(self.groupBoxG3, 0, 3, 1, 1)
self.layout.addWidget(self.groupBoxG2, 1, 2, 1, 1)
self.layout.addWidget(self.groupBoxG4, 1, 3, 1, 1)
self.layout.addWidget(self.groupBox2, 2, 2, 1, 1)
self.layout.addWidget(self.groupBox3, 2, 3, 1, 1)
self.setCentralWidget(self.main_widget)
self.resize(1800, 1200)
self.show()
def update(self):
'''
Random Forest Classifier
We pupulate the dashboard using the parametres chosen by the user
The parameters are processed to execute in the skit-learn Random Forest algorithm
then the results are presented in graphics and reports in the canvas
:return:None
'''
# processing the parameters
self.list_corr_features = pd.DataFrame([])
if self.feature0.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[0]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[0]]],axis=1)
if self.feature1.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[1]]],axis=1)
if self.feature2.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[2]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[2]]],axis=1)
if self.feature3.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[3]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[3]]],axis=1)
if self.feature4.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[4]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[4]]],axis=1)
if self.feature5.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[5]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[5]]],axis=1)
if self.feature6.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[6]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[6]]],axis=1)
if self.feature7.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[7]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[7]]],axis=1)
if self.feature8.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[8]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[8]]],axis=1)
if self.feature9.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[9]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[9]]],axis=1)
if self.feature10.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[10]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[10]]], axis=1)
if self.feature11.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[11]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[11]]], axis=1)
if self.feature12.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[12]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[12]]], axis=1)
if self.feature13.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[13]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[13]]], axis=1)
if self.feature14.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[14]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[14]]], axis=1)
if self.feature15.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[15]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[15]]], axis=1)
if self.feature16.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[16]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[16]]], axis=1)
if self.feature17.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[17]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[17]]], axis=1)
if self.feature18.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[18]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[18]]], axis=1)
if self.feature19.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[19]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[19]]], axis=1)
if self.feature20.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[20]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[20]]],axis=1)
if self.feature21.isChecked():
if len(self.list_corr_features) == 20:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[21]]],axis=1)
if self.feature22.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[22]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[22]]],axis=1)
if self.feature23.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[23]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[23]]],axis=1)
if self.feature24.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[24]]
else:
self.list_corr_features = | pd.concat([self.list_corr_features, df[features_list[24]]],axis=1) | pandas.concat |
import bt
import pandas as pd
import yfinance as yf
import matplotlib.pyplot as plt
# Disable SettingWithCopyWarning
pd.options.mode.chained_assignment = None
###### Fetching Data ######
# The tickers we're interested in
tickers = [
'SPY',
'VIRT',
'QQQ',
'TLT',
'GLD',
'UVXY',
'^VIX'
]
# Get maximum historical data for a single ticker
def get_yf_hist(ticker):
data = yf.Ticker(ticker).history(period='max')['Close']
data.rename(ticker, inplace=True)
return data
# For each ticker, get the historical data and add it to a list
def get_data_for_tickers(tickers):
data = []
for ticker in tickers:
data.append(get_yf_hist(ticker))
df = pd.concat(data, axis=1)
return df
data = get_data_for_tickers(tickers)
print(data)
###### Benchmark ######
# The name of our strategy
name = 'long_spy'
# Defining the actual strategy
benchmark_strat = bt.Strategy(
name,
[
bt.algos.RunOnce(),
bt.algos.SelectAll(),
bt.algos.WeighEqually(),
bt.algos.Rebalance()
]
)
# Make sure we're only running on the SPY data by selecting it out,
# and dropping the rows for which we have no data
spy_data = data[['SPY']]
spy_data.dropna(inplace=True)
# Generate the backtest using the defined strategy and data and run it
benchmark_test = bt.Backtest(benchmark_strat, spy_data)
res = bt.run(benchmark_test)
# Print the summary and plot our equity progression
res.plot()
res.display()
plt.show()
###### Strategy 1 ######
name = 'spy_virt_7030'
strategy_1 = bt.Strategy(
name,
[
bt.algos.RunDaily(),
bt.algos.SelectAll(),
bt.algos.WeighSpecified(SPY=0.7, VIRT=0.3),
bt.algos.Rebalance()
]
)
spy_virt_data = data[['SPY', 'VIRT']]
spy_virt_data.dropna(inplace=True)
backtest_1 = bt.Backtest(strategy_1, spy_virt_data)
res = bt.run(backtest_1, benchmark_test)
res.plot()
res.display()
plt.show()
###### Strategy 2 ######
name = 'eq_wt_monthly'
strategy_2 = bt.Strategy(
name,
[
bt.algos.RunMonthly(run_on_end_of_period=True),
bt.algos.SelectAll(),
bt.algos.WeighEqually(),
bt.algos.Rebalance()
]
)
eq_wt_data = data[['SPY', 'QQQ', 'TLT', 'GLD']]
eq_wt_data.dropna(inplace=True)
backtest_2 = bt.Backtest(strategy_2, eq_wt_data)
res = bt.run(backtest_2, benchmark_test)
res.plot()
res.display()
plt.show()
###### Strategy 3 ######
# We're going to isolate our data here first, and then drop nulls,
# since we need this series for the weights with the proper dates
spy_hl_data = data[['SPY']]
spy_hl_data.dropna(inplace=True)
# Generate returns and isolate the return series for simplicity
spy_ret = (spy_hl_data['SPY']/spy_hl_data['SPY'].shift(1)) - 1
# Rename for validation later
spy_ret.rename('SPY_returns', inplace=True)
# Create our weights series for SPY by copying the SPY price series
target_weights = spy_hl_data['SPY'].copy()
# Let's clear it and set all of them to None (you'll see why)
target_weights[:] = None
# We're going to start our strategy on day 1 with 100% SPY, so let's set the first weight to 1.0 (100%)
target_weights.iloc[0] = 1.0
# Now we need to fill in the dates where we know we want to make a change:
target_weights[spy_ret < 0.02] = 1.0
target_weights[spy_ret >= 0.02] = 0.5
# Weights need to be a DataFrame, not a series
target_weights = pd.DataFrame(target_weights)
# Now we want to fill each value forward to keep its previous allocation until we get an update
# That is, since we initially set every day's weight to have no value,
# and we only filled in day 1 at 100%, and filled in the days when we drop or gain,
# we need to maintain the previous day's allocation until we get a change.
# So, we use ffill to forward-fill our weights, which will fill in our nulls in order using
# the most recent value seen that's not None/null.
target_weights.ffill(inplace=True)
# Let's make sure our prices, returns, and weights look ok
validation = pd.concat([spy_hl_data, spy_ret, target_weights], axis=1)
print(validation.tail(50))
name = 'spy_high_low'
strategy_3 = bt.Strategy(
name,
[
bt.algos.RunDaily(),
bt.algos.SelectAll(),
bt.algos.WeighTarget(target_weights),
bt.algos.Rebalance()
]
)
backtest_3 = bt.Backtest(strategy_3, spy_hl_data)
res = bt.run(backtest_3, benchmark_test)
res.plot()
res.plot_weights() # Let's also plot our weights this time
res.display()
plt.show()
# How many days are our target weights at 0.5 divided by the total number of days
print(target_weights[target_weights == 0.5].count()/len(target_weights))
###### Strategy 4 ######
# Load in our VX continuous futures stream from our CSV,
# specifying the 'Date' column as the index and converting it to datetime (so we can concat)
vx_cont = | pd.read_csv('vx_cont.csv', index_col='Date') | pandas.read_csv |
# -*- coding: utf-8 -*-
import pdb, importlib, inspect, time, datetime, json
# from PyFin.api import advanceDateByCalendar
# from data.polymerize import DBPolymerize
from data.storage_engine import StorageEngine
import time
import pandas as pd
import numpy as np
from datetime import timedelta, datetime
from financial import factor_per_share_indicators
from data.model import BalanceMRQ, BalanceTTM, BalanceReport
from data.model import CashFlowTTM, CashFlowReport
from data.model import IndicatorReport
from data.model import IncomeReport, IncomeTTM
from vision.table.valuation import Valuation
from vision.db.signletion_engine import *
from data.sqlengine import sqlEngine
# pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
# from ultron.cluster.invoke.cache_data import cache_data
class CalcEngine(object):
def __init__(self, name, url,
methods=[{'packet': 'financial.factor_pre_share_indicators', 'class': 'FactorPerShareIndicators'}, ]):
self._name = name
self._methods = methods
self._url = url
def get_trade_date(self, trade_date, n, days=365):
"""
获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。
:param days:
:param trade_date: 当前交易日
:param n:
:return:
"""
syn_util = SyncUtil()
trade_date_sets = syn_util.get_all_trades('001002', '19900101', trade_date)
trade_date_sets = trade_date_sets['TRADEDATE'].values
time_array = datetime.strptime(str(trade_date), "%Y%m%d")
time_array = time_array - timedelta(days=days) * n
date_time = int(datetime.strftime(time_array, "%Y%m%d"))
if str(date_time) < min(trade_date_sets):
# print('date_time %s is out of trade_date_sets' % date_time)
return str(date_time)
else:
while str(date_time) not in trade_date_sets:
date_time = date_time - 1
# print('trade_date pre %s year %s' % (n, date_time))
return str(date_time)
def _func_sets(self, method):
# 私有函数和保护函数过滤
return list(filter(lambda x: not x.startswith('_') and callable(getattr(method, x)), dir(method)))
def loading_data(self, trade_date):
"""
获取基础数据
按天获取当天交易日所有股票的基础数据
:param trade_date: 交易日
:return:
"""
# 转换时间格式
time_array = datetime.strptime(trade_date, "%Y-%m-%d")
trade_date = datetime.strftime(time_array, '%Y%m%d')
# 读取目前涉及到的因子
engine = sqlEngine()
columns = ['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id', 'trade_date']
# Report data
cash_flow_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowReport,
[CashFlowReport.FINALCASHBALA, # 期末现金及现金等价物余额
], dates=[trade_date])
for col in columns:
if col in list(cash_flow_sets.keys()):
cash_flow_sets = cash_flow_sets.drop(col, axis=1)
cash_flow_sets = cash_flow_sets.rename(columns={'FINALCASHBALA': 'cash_and_equivalents_at_end', # 期末现金及现金等价物余额
})
income_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
[IncomeReport.BIZINCO, # 营业收入
IncomeReport.BIZTOTINCO, # 营业总收入
IncomeReport.PERPROFIT, # 营业利润
IncomeReport.DILUTEDEPS, # 稀释每股收益
], dates=[trade_date])
for col in columns:
if col in list(income_sets.keys()):
income_sets = income_sets.drop(col, axis=1)
income_sets = income_sets.rename(columns={'BIZINCO': 'operating_revenue', # 营业收入
'BIZTOTINCO': 'total_operating_revenue', # 营业总收入
'PERPROFIT': 'operating_profit', # 营业利润
'DILUTEDEPS': 'diluted_eps', # 稀释每股收益
})
balance_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceReport,
[BalanceReport.PARESHARRIGH, # 归属于母公司的所有者权益
BalanceReport.CAPISURP,
BalanceReport.RESE,
BalanceReport.UNDIPROF,
], dates=[trade_date])
for col in columns:
if col in list(balance_sets.keys()):
balance_sets = balance_sets.drop(col, axis=1)
balance_sets = balance_sets.rename(columns={'PARESHARRIGH': 'total_owner_equities', # 归属于母公司的所有者权益
'CAPISURP': 'capital_reserve_fund', # 资本公积
'RESE': 'surplus_reserve_fund', # 盈余公积
'UNDIPROF': 'retained_profit', # 未分配利润
})
indicator_sets = engine.fetch_fundamentals_pit_extend_company_id(IndicatorReport,
[IndicatorReport.FCFE, # 股东自由现金流量
IndicatorReport.FCFF, # 企业自由现金流量
IndicatorReport.EPSBASIC, # 基本每股收益
IndicatorReport.DPS, # 每股股利(税前)
], dates=[trade_date])
for col in columns:
if col in list(indicator_sets.keys()):
indicator_sets = indicator_sets.drop(col, axis=1)
indicator_sets = indicator_sets.rename(columns={'FCFE': 'shareholder_fcfps', # 股东自由现金流量
'FCFF': 'enterprise_fcfps', # 企业自由现金流量
'EPSBASIC': 'basic_eps', # 基本每股收益
'DPS': 'dividend_receivable', # 每股股利(税前)
})
# TTM data
cash_flow_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowTTM,
[CashFlowTTM.CASHNETI, # 现金及现金等价物净增加额
CashFlowTTM.MANANETR, # 经营活动现金流量净额
], dates=[trade_date])
for col in columns:
if col in list(cash_flow_ttm_sets.keys()):
cash_flow_ttm_sets = cash_flow_ttm_sets.drop(col, axis=1)
cash_flow_ttm_sets = cash_flow_ttm_sets.rename(
columns={'CASHNETI': 'cash_equivalent_increase_ttm', # 现金及现金等价物净增加额
'MANANETR': 'net_operate_cash_flow_ttm', # 经营活动现金流量净额
})
income_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM,
[IncomeTTM.PARENETP, # 归属于母公司所有者的净利润
IncomeTTM.PERPROFIT, # 营业利润
IncomeTTM.BIZINCO, # 营业收入
IncomeTTM.BIZTOTINCO, # 营业总收入
], dates=[trade_date])
for col in columns:
if col in list(income_ttm_sets.keys()):
income_ttm_sets = income_ttm_sets.drop(col, axis=1)
income_ttm_sets = income_ttm_sets.rename(columns={'PARENETP': 'np_parent_company_owners_ttm', # 归属于母公司所有者的净利润
'PERPROFIT': 'operating_profit_ttm', # 营业利润
'BIZINCO': 'operating_revenue_ttm', # 营业收入
'BIZTOTINCO': 'total_operating_revenue_ttm', # 营业总收入
})
column = ['trade_date']
valuation_data = get_fundamentals(query(Valuation.security_code,
Valuation.trade_date,
Valuation.capitalization,
).filter(Valuation.trade_date.in_([trade_date])))
for col in column:
if col in list(valuation_data.keys()):
valuation_data = valuation_data.drop(col, axis=1)
valuation_sets = | pd.merge(cash_flow_sets, income_sets, on='security_code') | pandas.merge |
import numpy as np
import pandas as pd
def fourier(s: pd.Series) -> pd.Series:
# TODO need to resample.. (fill with avgs?) warn about large gaps?
ts = list(range(len(s)))
# FIXME assert index is continuous? otherwise it's not properly sampled
f = ts[1] - ts[0]
vals = list(s)
ft = np.abs(np.fft.rfft(vals))
freqs = np.fft.rfftfreq(len(vals), f)
mags = abs(ft)
periods = 1 / freqs
# todo hmm. plotting with periods results in basically 'exponential' axis... not sure what to do
return pd.Series(mags, index=freqs)
# TODO strip away .iloc[1:]? it's always big
def periods(s: pd.Series, *, n: int=3):
from scipy.signal import find_peaks # type: ignore
f = fourier(s)
f.index = 1 / f.index
pidx, _ = find_peaks(list(f))
# todo yield instead?
for _, p in list(reversed(sorted((f.iloc[p], p) for p in pidx)))[:n]:
print(f'{p:5d} {f.index[p]:6.2f} {f.iloc[p]:7.2f}')
def deseasonalize(df):
from statsmodels.tsa.seasonal import seasonal_decompose
# TODO meh, not sure if should be here..
df = df.resample('D').interpolate('linear')
dec = seasonal_decompose(df) # (can pass period=)
# todo wonder which periods is it guessing..
# TODO wonder how it calculates seasonal? also it's not catching yearly trends?
# ddd = dec.seasonal + dec.trend + dec.resid
return df - dec.seasonal
def test_periods():
ts = np.arange(0, 1500, 1)
vals = [
60 + \
10 * np.sin(6.29 / 365 * i) + \
20 * np.sin(6.29 / 7 * i) + \
5 * np.sin(6.29 / 28 * i)
for i in ts]
s = | pd.Series(vals, index=ts) | pandas.Series |
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier, BaggingClassifier
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.model_selection._split import _BaseKFold
from sklearn.metrics import roc_curve, classification_report, log_loss, accuracy_score, recall_score, auc, average_precision_score, f1_score
def getTrainTimes(t1, testTimes):
'''
Given testTimes, find the times of the training observations.
There are three conditions that would make a sample to be dropped. Let i be
the index of a train sample and j the index of a test sample. Let 0,1 be the
start and end of a sample, then:
- t_{j,0} <= t_{i,0} <= t_{j,1} --> train starts between test
- t_{j,0} <= t_{i,1} <= t_{j,1} --> train ends between test
- t_{i,0} <= t_{j,0} <= t_{j,1} <= t_{i,1} --> test is contained in train
See Advances in Financial Analytics, snippet 7.1, page 106.
@param t1 A pandas Series where the index tells when the observation started
and the value when it ended.
@param testTimes Times of testing observations.
@return A purged t1.
'''
trn = t1.copy(deep=True)
for i, j in testTimes.iteritems():
# Train stars with index
df0 = trn[(i <= trn.index) & (trn.index <= j)].index
# Train ends within test
df1 = trn[(i <= trn) & (trn <= j)].index
# Train envelops test
df2 = trn[(trn.index <= i) & (j <= trn)].index
# Removes the union of the previous three data frames.
trn = trn.drop(df0.union(df1).union(df2))
return trn
def getEmbargoTimes(times, pctEmbargo):
'''
Drops 2 * pctEmbargo percentage of samples at the beginning and end of times
to further prevent leakage.
See Advances in Financial Analytics, snippet 7.2, page 108.
@param times A data series of times to drop labels from.
@param pctEmbargo The percentage of times's size to drop.
@return A copy of times but with dropped items at the beginning and end
because of pctEmbargo.
'''
step = int(times.shape[0] * pctEmbargo)
if step == 0:
mbrg = pd.Series(times, index=times)
else:
mbrg = | pd.Series(times[step:], index=times[:-step]) | pandas.Series |
import os
from pprint import pprint
import pandas as pd
import requests
from utils import load_json, save_json
class TwitterApi:
def __init__(self, timeline_params_path="timeline_params.json"):
self.bearer_token = self._auth()
self.headers = self._create_headers()
self.timeline_params = load_json(timeline_params_path)
def build_user_dataset(self, user_name, params=None, data_dir="data"):
filepath = os.path.join(
data_dir, f"{user_name}.json")
tweets = self.load_storred_tweets(filepath)
latest_stored_tweet = self.get_latest_stored_tweet(tweets)
if latest_stored_tweet:
latest_stored_tweet_id = latest_stored_tweet["id"]
if params:
params["since_id"] = latest_stored_tweet_id
else:
params = {"since_id": latest_stored_tweet_id}
user_id = self.query_user_data_by_name(
user_name, params={"user.fields": "id"})["id"]
new_tweets = self.get_user_tweets(user_id, params)
tweets += new_tweets
save_json(tweets, filepath)
print(f"{len(new_tweets)} tweets queried and stored.")
def get_latest_stored_tweet(self, tweets):
if tweets:
latest_stored_tweet = | pd.DataFrame(tweets) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [pd.Series(i, index=['b', 'a', 'c'], name=str(i))
for i in range(3)]
result = pd.DataFrame(series)
expected = pd.DataFrame({'b': [0, 1, 2],
'a': [0, 1, 2],
'c': [0, 1, 2]},
columns=['b', 'a', 'c'],
index=['0', '1', '2'])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with pytest.raises(ValueError, match='arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match='ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_from_dict_columns_parameter(self):
# GH 18529
# Test new columns parameter for from_dict that was added to make
# from_items(..., orient='index', columns=[...]) easier to replicate
result = DataFrame.from_dict(OrderedDict([('A', [1, 2]),
('B', [4, 5])]),
orient='index', columns=['one', 'two'])
expected = DataFrame([[1, 2], [4, 5]], index=['A', 'B'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
orient='columns', columns=['one', 'two'])
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
columns=['one', 'two'])
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name='x')
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
pytest.raises(ValueError, DataFrame, s, columns=[1, 2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
# series with name and w/o
s1 = Series(arr, name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({'x': s1, 'Unnamed 0': arr},
columns=['x', 'Unnamed 0'])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_named_and_columns(self):
# GH 9232 validation
s0 = Series(range(5), name=0)
s1 = Series(range(5), name=1)
# matching name and column gives standard frame
tm.assert_frame_equal(pd.DataFrame(s0, columns=[0]),
s0.to_frame())
tm.assert_frame_equal(pd.DataFrame(s1, columns=[1]),
s1.to_frame())
# non-matching produces empty frame
assert pd.DataFrame(s0, columns=[1]).empty
assert pd.DataFrame(s1, columns=[0]).empty
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
assert df1.columns[0] == 'x'
tm.assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
assert df2.columns[0] == 0
tm.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
tm.assert_index_equal(result.index, Index(index))
tm.assert_index_equal(result.columns, Index(columns))
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items)
tm.assert_frame_equal(recons, self.frame)
# pass some columns
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
tm.assert_frame_equal(recons, self.frame.loc[:, ['C', 'B', 'A']])
# orient='index'
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert recons['A'].dtype == np.float64
msg = "Must pass columns with orient='index'"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items(row_items, orient='index')
# orient='index', but thar be tuples
arr = construct_1d_object_array_from_listlike(
[('bar', 'baz')] * len(self.mixed_frame))
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert isinstance(recons['foo'][0], tuple)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index',
columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
tm.assert_frame_equal(rs, xp)
def test_constructor_from_items_scalars(self):
# GH 17312
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 4)])
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 2)], columns=['col1'],
orient='index')
def test_from_items_deprecation(self):
# GH 17320
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
columns=['col1', 'col2', 'col3'],
orient='index')
def test_constructor_mix_series_nonseries(self):
df = DataFrame({'A': self.frame['A'],
'B': list(self.frame['B'])}, columns=['A', 'B'])
tm.assert_frame_equal(df, self.frame.loc[:, ['A', 'B']])
msg = 'does not match index length'
with pytest.raises(ValueError, match=msg):
DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
tm.assert_frame_equal(df, expected)
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=['a', 'a'])
edf = DataFrame([[8, 5]])
edf.columns = ['a', 'a']
tm.assert_frame_equal(df, edf)
idf = DataFrame.from_records([(8, 5)],
columns=['a', 'a'])
tm.assert_frame_equal(idf, edf)
pytest.raises(ValueError, DataFrame.from_dict,
OrderedDict([('b', 8), ('a', 5), ('a', 6)]))
def test_constructor_empty_with_string_dtype(self):
# GH 9428
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype='U5')
tm.assert_frame_equal(df, expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df,
DataFrame(np.zeros(df.shape).astype('float64'),
df.index, df.columns))
df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'),
df.index, df.columns))
df = DataFrame('a', index=[1, 2], columns=['a', 'c'])
tm.assert_frame_equal(df, DataFrame(np.array([['a', 'a'], ['a', 'a']],
dtype=object),
index=[1, 2], columns=['a', 'c']))
pytest.raises(ValueError, DataFrame, 'a', [1, 2])
pytest.raises(ValueError, DataFrame, 'a', columns=['a', 'c'])
msg = 'incompatible data and dtype'
with pytest.raises(TypeError, match=msg):
DataFrame('a', [1, 2], ['a', 'c'], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame({'A': 1, 'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime(2001, 1, 2, 0, 0)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, datetime64name: 2, objectname: 2})
result.sort_index()
expected.sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0
# ndarray with a dtype specified)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array(1., dtype=floatname),
intname: np.array(1, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = {objectname: 1}
if intname == 'int64':
expected['int64'] = 2
else:
expected['int64'] = 1
expected[intname] = 1
if floatname == 'float64':
expected['float64'] = 2
else:
expected['float64'] = 1
expected[floatname] = 1
result = result.sort_index()
expected = Series(expected).sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array([1.] * 10, dtype=floatname),
intname: np.array([1] * 10, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
result = result.sort_index()
tm.assert_series_equal(result, expected)
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
assert datetime_s.dtype == 'M8[ns]'
df = DataFrame({'datetime_s': datetime_s})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame({'datetimes': datetimes, 'dates': dates})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 7594
# don't coerce tz-aware
import pytz
tz = pytz.timezone('US/Eastern')
dt = tz.localize(datetime(2012, 1, 1))
df = DataFrame({'End Date': dt}, index=[0])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
df = DataFrame([{'End Date': dt}])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101', periods=3)
df = DataFrame({'value': dr})
assert df.iat[0, 0].tz is None
dr = date_range('20130101', periods=3, tz='UTC')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'UTC'
dr = date_range('20130101', periods=3, tz='US/Eastern')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'US/Eastern'
# GH 7822
# preserver an index with a tz on dict construction
i = date_range('1/1/2011', periods=5, freq='10s', tz='US/Eastern')
expected = DataFrame(
{'a': i.to_series(keep_tz=True).reset_index(drop=True)})
df = DataFrame()
df['a'] = i
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': i})
tm.assert_frame_equal(df, expected)
# multiples
i_no_tz = date_range('1/1/2011', periods=5, freq='10s')
df = DataFrame({'a': i, 'b': i_no_tz})
expected = DataFrame({'a': i.to_series(keep_tz=True)
.reset_index(drop=True), 'b': i_no_tz})
tm.assert_frame_equal(df, expected)
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [np.array([None, None, None, None,
datetime.now(), None]),
np.array([None, None, datetime.now(), None])]:
result = DataFrame(arr).get_dtype_counts()
expected = Series({'datetime64[ns]': 1})
tm.assert_series_equal(result, expected)
def test_constructor_for_list_with_dtypes(self):
# TODO(wesm): unused
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int64': 5})
df = DataFrame([np.array(np.arange(5), dtype='int32')
for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int32': 5})
# overflow issue? (we always expecte int64 upcasting here)
df = DataFrame({'a': [2 ** 31, 2 ** 31 + 1]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
# GH #2751 (construction with no index specified), make sure we cast to
# platform values
df = DataFrame([1, 2])
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame([1., 2.])
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1, 2]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1., 2.]})
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1.}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
# with object list
df = DataFrame({'a': [1, 2, 4, 7], 'b': [1.2, 2.3, 5.1, 6.3],
'c': list('abcd'),
'd': [datetime(2000, 1, 1) for i in range(4)],
'e': [1., 2, 4., 7]})
result = df.get_dtype_counts()
expected = Series(
{'int64': 1, 'float64': 2, datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_constructor_frame_copy(self):
cop = DataFrame(self.frame, copy=True)
cop['A'] = 5
assert (cop['A'] == 5).all()
assert not (self.frame['A'] == 5).all()
def test_constructor_ndarray_copy(self):
df = DataFrame(self.frame.values)
self.frame.values[5] = 5
assert (df.values[5] == 5).all()
df = DataFrame(self.frame.values, copy=True)
self.frame.values[6] = 6
assert not (df.values[6] == 6).all()
def test_constructor_series_copy(self):
series = self.frame._series
df = DataFrame({'A': series['A']})
df['A'][:] = 5
assert not (series['A'] == 5).all()
def test_constructor_with_nas(self):
# GH 5016
# na's in indices
def check(df):
for i in range(len(df.columns)):
df.iloc[:, i]
indexer = np.arange(len(df.columns))[isna(df.columns)]
# No NaN found -> error
if len(indexer) == 0:
def f():
df.loc[:, np.nan]
pytest.raises(TypeError, f)
# single nan should result in Series
elif len(indexer) == 1:
tm.assert_series_equal(df.iloc[:, indexer[0]],
df.loc[:, np.nan])
# multiple nans should result in DataFrame
else:
tm.assert_frame_equal(df.iloc[:, indexer],
df.loc[:, np.nan])
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[1, np.nan])
check(df)
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0, 1, 2, 3], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
# GH 21428 (non-unique columns)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1, 2, 2])
check(df)
def test_constructor_lists_to_object_dtype(self):
# from #1074
d = DataFrame({'a': [np.nan, False]})
assert d['a'].dtype == np.object_
assert not d['a'][1]
def test_constructor_categorical(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([Categorical(list('abc')), Categorical(list('abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
pytest.raises(ValueError,
lambda: DataFrame([Categorical(list('abc')),
Categorical(list('abdefg'))]))
# ndim > 1
pytest.raises(NotImplementedError,
lambda: Categorical(np.array([list('abcd')])))
def test_constructor_categorical_series(self):
items = [1, 2, 3, 1]
exp = Series(items).astype('category')
res = Series(items, dtype='category')
tm.assert_series_equal(res, exp)
items = ["a", "b", "c", "a"]
exp = Series(items).astype('category')
res = Series(items, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=('i4,f4,a10'))
arr[:] = [(1, 2., 'Hello'), (2, 3., "World")]
# TODO(wesm): unused
frame = DataFrame.from_records(arr) # noqa
index = pd.Index(np.arange(len(arr))[::-1])
indexed_frame = DataFrame.from_records(arr, index=index)
tm.assert_index_equal(indexed_frame.index, index)
# without names, it should go to last ditch
arr2 = np.zeros((2, 3))
tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
# wrong length
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame.from_records(arr, index=index[:-1])
indexed_frame = DataFrame.from_records(arr, index='f1')
# what to do?
records = indexed_frame.to_records()
assert len(records.dtype.names) == 3
records = indexed_frame.to_records(index=False)
assert len(records.dtype.names) == 2
assert 'index' not in records.dtype.names
def test_from_records_nones(self):
tuples = [(1, 2, None, 3),
(1, 2, None, 3),
(None, 2, 5, 3)]
df = DataFrame.from_records(tuples, columns=['a', 'b', 'c', 'd'])
assert np.isnan(df['c'][0])
def test_from_records_iterator(self):
arr = np.array([(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5., 5., 6, 6),
(7., 7., 8, 8)],
dtype=[('x', np.float64), ('u', np.float32),
('y', np.int64), ('z', np.int32)])
df = DataFrame.from_records(iter(arr), nrows=2)
xp = DataFrame({'x': np.array([1.0, 3.0], dtype=np.float64),
'u': np.array([1.0, 3.0], dtype=np.float32),
'y': np.array([2, 4], dtype=np.int64),
'z': np.array([2, 4], dtype=np.int32)})
tm.assert_frame_equal(df.reindex_like(xp), xp)
# no dtypes specified here, so just compare with the default
arr = [(1.0, 2), (3.0, 4), (5., 6), (7., 8)]
df = DataFrame.from_records(iter(arr), columns=['x', 'y'],
nrows=2)
tm.assert_frame_equal(df, xp.reindex(columns=['x', 'y']),
check_dtype=False)
def test_from_records_tuples_generator(self):
def tuple_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield (i, letters[i % len(letters)], i / length)
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in tuple_generator(
10)] for j in range(len(columns_names))]
data = {'Integer': columns[0],
'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = tuple_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_lists_generator(self):
def list_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield [i, letters[i % len(letters)], i / length]
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in list_generator(
10)] for j in range(len(columns_names))]
data = {'Integer': columns[0],
'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = list_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_columns_not_modified(self):
tuples = [(1, 2, 3),
(1, 2, 3),
(2, 5, 3)]
columns = ['a', 'b', 'c']
original_columns = list(columns)
df = DataFrame.from_records(tuples, columns=columns, index='a') # noqa
assert columns == original_columns
def test_from_records_decimal(self):
from decimal import Decimal
tuples = [(Decimal('1.5'),), (Decimal('2.5'),), (None,)]
df = DataFrame.from_records(tuples, columns=['a'])
assert df['a'].dtype == object
df = DataFrame.from_records(tuples, columns=['a'], coerce_float=True)
assert df['a'].dtype == np.float64
assert np.isnan(df['a'].values[-1])
def test_from_records_duplicates(self):
result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
expected = DataFrame([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
tm.assert_frame_equal(result, expected)
def test_from_records_set_index_name(self):
def create_dict(order_id):
return {'order_id': order_id, 'quantity': np.random.randint(1, 10),
'price': np.random.randint(1, 10)}
documents = [create_dict(i) for i in range(10)]
# demo missing data
documents.append({'order_id': 10, 'quantity': 5})
result = DataFrame.from_records(documents, index='order_id')
assert result.index.name == 'order_id'
# MultiIndex
result = DataFrame.from_records(documents,
index=['order_id', 'quantity'])
assert result.index.names == ('order_id', 'quantity')
def test_from_records_misc_brokenness(self):
# #2179
data = {1: ['foo'], 2: ['bar']}
result = DataFrame.from_records(data, columns=['a', 'b'])
exp = DataFrame(data, columns=['a', 'b'])
tm.assert_frame_equal(result, exp)
# overlap in index/index_names
data = {'a': [1, 2, 3], 'b': [4, 5, 6]}
result = DataFrame.from_records(data, index=['a', 'b', 'c'])
exp = DataFrame(data, index=['a', 'b', 'c'])
tm.assert_frame_equal(result, exp)
# GH 2623
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi']) # test col upconverts to obj
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts()
expected = Series({'datetime64[ns]': 1, 'object': 1})
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 1])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts().sort_index()
expected = Series({'datetime64[ns]': 1, 'int64': 1})
tm.assert_series_equal(results, expected)
def test_from_records_empty(self):
# 3562
result = DataFrame.from_records([], columns=['a', 'b', 'c'])
expected = DataFrame(columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
result = DataFrame.from_records([], columns=['a', 'b', 'b'])
expected = DataFrame(columns=['a', 'b', 'b'])
tm.assert_frame_equal(result, expected)
def test_from_records_empty_with_nonempty_fields_gh3682(self):
a = np.array([(1, 2)], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(a, index='id')
tm.assert_index_equal(df.index, Index([1], name='id'))
assert df.index.name == 'id'
tm.assert_index_equal(df.columns, Index(['value']))
b = np.array([], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(b, index='id')
tm.assert_index_equal(df.index, Index([], name='id'))
assert df.index.name == 'id'
def test_from_records_with_datetimes(self):
# this may fail on certain platforms because of a numpy issue
# related GH6140
if not is_platform_little_endian():
pytest.skip("known failure of test on non-little endian")
# construction with a null in a recarray
# GH 6140
expected = DataFrame({'EXPIRY': [datetime(2005, 3, 1, 0, 0), None]})
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [('EXPIRY', '<M8[ns]')]
try:
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
except (ValueError):
pytest.skip("known failure of numpy rec array creation")
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
# coercion should work too
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [('EXPIRY', '<M8[m]')]
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
def test_from_records_sequencelike(self):
df = DataFrame({'A': np.array(np.random.randn(6), dtype=np.float64),
'A1': np.array(np.random.randn(6), dtype=np.float64),
'B': np.array(np.arange(6), dtype=np.int64),
'C': ['foo'] * 6,
'D': np.array([True, False] * 3, dtype=bool),
'E': np.array(np.random.randn(6), dtype=np.float32),
'E1': np.array(np.random.randn(6), dtype=np.float32),
'F': np.array(np.arange(6), dtype=np.int32)})
# this is actually tricky to create the recordlike arrays and
# have the dtypes be intact
blocks = df._to_dict_of_blocks()
tuples = []
columns = []
dtypes = []
for dtype, b in compat.iteritems(blocks):
columns.extend(b.columns)
dtypes.extend([(c, np.dtype(dtype).descr[0][1])
for c in b.columns])
for i in range(len(df.index)):
tup = []
for _, b in compat.iteritems(blocks):
tup.extend(b.iloc[i].values)
tuples.append(tuple(tup))
recarray = np.array(tuples, dtype=dtypes).view(np.recarray)
recarray2 = df.to_records()
lists = [list(x) for x in tuples]
# tuples (lose the dtype info)
result = (DataFrame.from_records(tuples, columns=columns)
.reindex(columns=df.columns))
# created recarray and with to_records recarray (have dtype info)
result2 = (DataFrame.from_records(recarray, columns=columns)
.reindex(columns=df.columns))
result3 = (DataFrame.from_records(recarray2, columns=columns)
.reindex(columns=df.columns))
# list of tupels (no dtype info)
result4 = (DataFrame.from_records(lists, columns=columns)
.reindex(columns=df.columns))
tm.assert_frame_equal(result, df, check_dtype=False)
tm.assert_frame_equal(result2, df)
tm.assert_frame_equal(result3, df)
tm.assert_frame_equal(result4, df, check_dtype=False)
# tuples is in the order of the columns
result = DataFrame.from_records(tuples)
tm.assert_index_equal(result.columns, pd.Index(lrange(8)))
# test exclude parameter & we are casting the results here (as we don't
# have dtype info to recover)
columns_to_test = [columns.index('C'), columns.index('E1')]
exclude = list(set(range(8)) - set(columns_to_test))
result = DataFrame.from_records(tuples, exclude=exclude)
result.columns = [columns[i] for i in sorted(columns_to_test)]
tm.assert_series_equal(result['C'], df['C'])
tm.assert_series_equal(result['E1'], df['E1'].astype('float64'))
# empty case
result = DataFrame.from_records([], columns=['foo', 'bar', 'baz'])
assert len(result) == 0
tm.assert_index_equal(result.columns,
pd.Index(['foo', 'bar', 'baz']))
result = DataFrame.from_records([])
assert len(result) == 0
assert len(result.columns) == 0
def test_from_records_dictlike(self):
# test the dict methods
df = DataFrame({'A': np.array(np.random.randn(6), dtype=np.float64),
'A1': np.array(np.random.randn(6), dtype=np.float64),
'B': np.array(np.arange(6), dtype=np.int64),
'C': ['foo'] * 6,
'D': np.array([True, False] * 3, dtype=bool),
'E': np.array(np.random.randn(6), dtype=np.float32),
'E1': np.array(np.random.randn(6), dtype=np.float32),
'F': np.array(np.arange(6), dtype=np.int32)})
# columns is in a different order here than the actual items iterated
# from the dict
blocks = df._to_dict_of_blocks()
columns = []
for dtype, b in compat.iteritems(blocks):
columns.extend(b.columns)
asdict = {x: y for x, y in compat.iteritems(df)}
asdict2 = {x: y.values for x, y in compat.iteritems(df)}
# dict of series & dict of ndarrays (have dtype info)
results = []
results.append(DataFrame.from_records(
asdict).reindex(columns=df.columns))
results.append(DataFrame.from_records(asdict, columns=columns)
.reindex(columns=df.columns))
results.append(DataFrame.from_records(asdict2, columns=columns)
.reindex(columns=df.columns))
for r in results:
tm.assert_frame_equal(r, df)
def test_from_records_with_index_data(self):
df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
data = np.random.randn(10)
df1 = DataFrame.from_records(df, index=data)
tm.assert_index_equal(df1.index, Index(data))
def test_from_records_bad_index_column(self):
df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
# should pass
df1 = DataFrame.from_records(df, index=['C'])
tm.assert_index_equal(df1.index, Index(df.C))
df1 = DataFrame.from_records(df, index='C')
tm.assert_index_equal(df1.index, Index(df.C))
# should fail
pytest.raises(ValueError, DataFrame.from_records, df, index=[2])
pytest.raises(KeyError, DataFrame.from_records, df, index=2)
def test_from_records_non_tuple(self):
class Record(object):
def __init__(self, *args):
self.args = args
def __getitem__(self, i):
return self.args[i]
def __iter__(self):
return iter(self.args)
recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]
tups = lmap(tuple, recs)
result = DataFrame.from_records(recs)
expected = DataFrame.from_records(tups)
tm.assert_frame_equal(result, expected)
def test_from_records_len0_with_columns(self):
# #2633
result = DataFrame.from_records([], index='foo',
columns=['foo', 'bar'])
expected = Index(['bar'])
assert len(result) == 0
assert result.index.name == 'foo'
tm.assert_index_equal(result.columns, expected)
def test_to_frame_with_falsey_names(self):
# GH 16114
result = Series(name=0).to_frame().dtypes
expected = Series({0: np.float64})
tm.assert_series_equal(result, expected)
result = DataFrame(Series(name=0)).dtypes
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, 'uint8', 'category'])
def test_constructor_range_dtype(self, dtype):
# GH 16804
expected = DataFrame({'A': [0, 1, 2, 3, 4]}, dtype=dtype or 'int64')
result = DataFrame({'A': range(5)}, dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_frame_from_list_subclass(self):
# GH21226
class List(list):
pass
expected = DataFrame([[1, 2, 3], [4, 5, 6]])
result = DataFrame(List([List([1, 2, 3]), List([4, 5, 6])]))
tm.assert_frame_equal(result, expected)
class TestDataFrameConstructorWithDatetimeTZ(TestData):
def test_from_dict(self):
# 8260
# support datetime64 with tz
idx = Index(date_range('20130101', periods=3, tz='US/Eastern'),
name='foo')
dr = date_range('20130110', periods=3)
# construction
df = DataFrame({'A': idx, 'B': dr})
assert df['A'].dtype, 'M8[ns, US/Eastern'
assert df['A'].name == 'A'
tm.assert_series_equal(df['A'], Series(idx, name='A'))
tm.assert_series_equal(df['B'], Series(dr, name='B'))
def test_from_index(self):
# from index
idx2 = date_range('20130101', periods=3, tz='US/Eastern', name='foo')
df2 = DataFrame(idx2)
tm.assert_series_equal(df2['foo'], Series(idx2, name='foo'))
df2 = DataFrame(Series(idx2))
tm.assert_series_equal(df2['foo'], Series(idx2, name='foo'))
idx2 = date_range('20130101', periods=3, tz='US/Eastern')
df2 = DataFrame(idx2)
tm.assert_series_equal(df2[0], | Series(idx2, name=0) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 12 12:29:19 2019
@author: sdenaro
"""
import matplotlib.pyplot as plt
import pandas as pd
from datetime import datetime as dt
from datetime import timedelta
import numpy as np
import numpy.matlib as matlib
import seaborn as sns
from sklearn import linear_model
#from sklearn.metrics import mean_squared_error, r2_score
from scipy import stats
def r2(x, y):
return stats.pearsonr(x, y)[0] ** 2
#Set Preference Customers reduction percent (number)
custom_redux=0
# Yearly firm loads (aMW)
# upload BPA firm load column from file
df_load=pd.read_excel('../DATA/net_rev_data.xlsx',sheet_name=0,skiprows=[0,1], usecols=[9])
#Save as Preference Firm (PF), Industrial Firm (IF) an Export (ET)
PF_load_y=df_load.loc[[13]].values - custom_redux*df_load.loc[[13]].values
IP_load_y=df_load.loc[[3]].values - custom_redux* df_load.loc[[3]].values
ET_load_y=df_load.loc[[14]]
# Hourly hydro generation from FCRPS stochastic simulation
#df_hydro=pd.read_csv('../../CAPOW/CAPOW_SD/Stochastic_engine/PNW_hydro/FCRPS/BPA_owned_dams.csv', header=None)
df_hydro=pd.read_csv('new_BPA_hydro_daily.csv', usecols=([1]))
BPA_hydro=pd.DataFrame(data=df_hydro.loc[0:365*1200-1,:].sum(axis=1)/24, columns=['hydro'])
BPA_hydro[BPA_hydro>45000]=45000
#Remove CAISO bad_years
BPA_hydro=pd.DataFrame(np.reshape(BPA_hydro.values, (365,1200), order='F'))
BPA_hydro.drop([82, 150, 374, 377, 540, 616, 928, 940, 974, 980, 1129, 1191],axis=1, inplace=True)
#reshuffle
#BPA_hydro[[1, 122, 364, 543]]=BPA_hydro[[16, 126, 368, 547]]
BPA_hydro=pd.DataFrame(np.reshape(BPA_hydro.values, (365*1188), order='F'))
# Yearly resources other than hydro (aMW)
df_resources= | pd.read_excel('../DATA/net_rev_data.xlsx',sheet_name=1,skiprows=[0,1], usecols=[9]) | pandas.read_excel |
# -*- coding: utf-8 -*-
"""
Tools for calculating open-water evaporation using the aerodynmaic mass-transfer approach.
"""
import cmath as cm
import numpy as np
import math as m
import pandas as pd
import multiprocessing as mp
class Aero(object):
"""
Manages meterological time series input/output for aerodynamic
mass-transfer evaporation calculation and contains methods for batch and
single calculations.
An :obj:`Aero` object allows the aerodynamic mass-transfer evaporation
estimation to be calculated from meterological data that is stored in a
:obj:`pandas.DataFrame` with a date or datetime-like index. The
:attr:`Aero.df` can be assigned on initialization or later, it can also be
reassigned at anytime.
The :meth:`Aero.single_calc` static method calculates evaporation for a
single measurement set and can be used without creating an :obj:`Aero`
object, e.g. in another module. For calculating evaporation for a time
series of input meterological data use the :meth:`Aero.run` method which
uses multiple processors (if they are available).
"""
def __init__(self, df=None):
if df is not None and not isinstance(df, pd.DataFrame):
raise TypeError("Must assign a pandas.DataFrame object")
self._df = df
def run(self, sensor_height, timestep, variable_names=None, nproc=None):
"""
Run aerodynamic mass-transfer evaporation routine on time series data
that contains necessary input in-place and in parallel.
Arguments:
sensor_height (float): height of sensor in meters.
timestep (float or int): sensor sampling frequency in seconds.
Keyword Arguments:
variable_names (None or dict): default None. Dictionary with user
variable names as keys and variable names needed for
:mod:`aeroevap` as values. If None, the needed input variables
must be named correctly in the :attr:`Aero.df` dataframe: 'WS',
'P', 'T_air', 'T_skin', and 'RH' for windspeed, air pressure,
air temperature, skin temperature, and relative humidity
resepctively.
nproc (None or int): default None. If none use half of the available
cores for parallel calculations.
Returns:
None
Hint:
A :obj:`pandas.DataFrame` must be assigned to the :attr:`Aero.df`
instance property before calling :meth:`Aero.run`. If the names of
the required meterological variables in the dataframe are not
named correctly you may pass a dictionary to the ``variable_names``
argument which maps your names to those used by ``AeroEvap``. For
example if your surface temperature column is named 'surface_temp'
then
>>> variable_names = {'surface_temp' : 'T_skin'}
"""
if not isinstance(self._df, pd.DataFrame):
print(
'ERROR: no pandas.DataFrame assigned to Aero.df, please '
'assign first.'
)
return
if variable_names is not None:
df = self._df.rename(columns=variable_names)
else:
df = self._df
df['date'] = df.index
df['SH'] = sensor_height
df['dt'] = timestep
input_vars = ['date', 'WS', 'P', 'T_air', 'T_skin', 'RH', 'SH', 'dt']
if not set(input_vars).issubset(df.columns):
print(
'ERROR: missing on or more needed columns for calculation:\n'
'{}'.format(', '.join(input_vars))
)
return
numeric_vars = ['WS', 'P', 'T_air', 'T_skin', 'RH', 'SH']
df[numeric_vars] = df[numeric_vars].astype(float)
# run each input using n processors
inputs = df[input_vars].values.tolist()
if not nproc:
nproc = mp.cpu_count() // 2 # use half cores
pool = mp.Pool(processes=nproc)
results = pool.map(_calc,inputs)
pool.close()
pool.join()
results_df = | pd.concat(results) | pandas.concat |
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize("align_axis", [0, 1, "index", "columns"])
def test_compare_axis(align_axis):
# GH#30429
s1 = pd.Series(["a", "b", "c"])
s2 = pd.Series(["x", "b", "z"])
result = s1.compare(s2, align_axis=align_axis)
if align_axis in (1, "columns"):
indices = pd.Index([0, 2])
columns = pd.Index(["self", "other"])
expected = pd.DataFrame(
[["a", "x"], ["c", "z"]], index=indices, columns=columns
)
tm.assert_frame_equal(result, expected)
else:
indices = pd.MultiIndex.from_product([[0, 2], ["self", "other"]])
expected = pd.Series(["a", "x", "c", "z"], index=indices)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"keep_shape, keep_equal",
[
(True, False),
(False, True),
(True, True),
# False, False case is already covered in test_compare_axis
],
)
def test_compare_various_formats(keep_shape, keep_equal):
s1 = pd.Series(["a", "b", "c"])
s2 = pd.Series(["x", "b", "z"])
result = s1.compare(s2, keep_shape=keep_shape, keep_equal=keep_equal)
if keep_shape:
indices = | pd.Index([0, 1, 2]) | pandas.Index |
import gc as _gc
import pandas as _pd
import numpy as _np
from . import databases as _databases
from . import profiles as _profiles
class Columns(_databases.Columns):
"""
Container for the columns names defined in this module.
"""
SPLIT_SUF = '_SPLIT'
REF = 'REF'
QRY = 'QRY'
REF_SPLIT = '{}{}'.format(REF, SPLIT_SUF)
QRY_SPLIT = '{}{}'.format(QRY, SPLIT_SUF)
PROF_Q = _databases.Columns.PROF_Q
PROF_A = _databases.Columns.PROF_A
STR_SEP = '|'
def get_IDs_names(
species,
):
"""
Returns dict of KEGG Organism IDs as keys and biological names as values.
Parameters
-------
species: list of str
List of full biological names to convert into KEGG Organism IDs.
Returns
------
dict
"""
kegg_db = _databases.KEGG('Orthology')
kegg_db.parse_organism_info(
organism=None,
reference_species=species,
IDs=None,
X_ref=None,
KOs=None,
IDs_only=True,
)
return {k.lower(): v for k, v in kegg_db.ID_name.items()}
def profilize_organism(*args, **kwargs):
"""
Returns pandas.DataFrame with Phylogenetic Profile for each ORF name of an
organism.
Parameters
-------
organism: str
Full biological name of the organism.
reference_species: list of str
List of full biological names to build the Phylogenetic Profile.
IDs: str, path
Filename of the KEGG Organism IDs. Downloaded to a temporary file if
<None>.
X_ref: str, path
Filename of the ORF-KEGG Orthology Group cross-reference.
Downloaded to a temporary file if <None>.
KOs: str, path
Filename of the KEGG Orthology Group-Organism cross-reference.
Downloaded to a temporary file if <None>.
threads: int
Number of threads to utilize when downloading from KEGG. More means
faster but can make KEGG block the download temporarily. Default: <2>
Returns
------
pandas.DataFrame
"""
kegg_db = _databases.KEGG('Orthology')
kegg_db.parse_organism_info(*args, **kwargs)
return kegg_db.organism_info.drop(columns=_databases.Columns.KEGG_ID)
def read_sga(
filename,
version=2,
):
"""
Returns pandas.DataFrame with Genetic Interaction Network from
the Costanzo's SGA experiment either version 1 or 2.
Parameters
-------
filename: str, path
Filename of the SGA.
version: int
Version number of the Costanzo's SGA experiment. 1 or 2 available.
Returns
-------
pandas.DataFrame
"""
if version == 1:
sga = _databases.SGA1()
elif version == 2:
sga = _databases.SGA2()
else:
raise errors.ParserError("Only versions 1 and 2 of Costanzo's SGA experiment are supported.")
sga.parse(filename=filename)
return sga.sga
def read_profiles(
filename,
**kwargs
):
"""
Returns pandas.Series with prwlr.profiles.Profile objects from CSV file.
Together with prwlr.core.save_profiles provides a convenient way of
saving/reading-in prwlr.profiles.Profile objects to/from a flat text file.
Parameters
-------
filename: str, path
CSV file name.
Returns
------
pandas.Series
"""
ref_qry_df = _pd.read_csv(filename, **kwargs)
ref_qry_df[Columns.REF_SPLIT] = ref_qry_df[Columns.REF].str.split(Columns.STR_SEP)
ref_qry_df[Columns.QRY_SPLIT] = ref_qry_df[Columns.QRY].str.split(Columns.STR_SEP)
return ref_qry_df[[Columns.REF_SPLIT, Columns.QRY_SPLIT]].apply(
lambda x: _profiles.Profile(
reference=x[Columns.REF_SPLIT],
query=x[Columns.QRY_SPLIT],
),
axis=1,
)
def save_profiles(
series,
filename,
**kwargs
):
"""
Writes pandas.Series with prwlr.profiles.Profile objects to CSV file.
Together with prwlr.core.read_profiles provides a convenient way of
saving/reading-in prwlr.profiles.Profile objects to/from a flat text file.
Parameters
-------
Filename: str, path
CSV file name.
"""
_pd.DataFrame(
{
Columns.REF: series.apply(lambda x: x.reference).str.join(Columns.STR_SEP),
Columns.QRY: series.apply(lambda x: x.query).str.join(Columns.STR_SEP),
},
).to_csv(filename, **kwargs)
def read_network(
filename,
**kwargs
):
"""
Returns pandas.DataFrame representing a Genetic Interaction Network with
prwlr.profiles.Profile objects from CSV file. Together with
prwlr.core.save_profiles provides a convenient way of saving/reading-in
prwlr.profiles.Profile objects to/from a flat text file.
Parameters
-------
filename: str, path
CSV file name.
Returns
-------
pandas.DataFrame
"""
qry_ref_col = '{}_{}'.format(Columns.PROF_Q, Columns.REF)
qry_qry_col = '{}_{}'.format(Columns.PROF_Q, Columns.QRY)
arr_ref_col = '{}_{}'.format(Columns.PROF_A, Columns.REF)
arr_qry_col = '{}_{}'.format(Columns.PROF_A, Columns.QRY)
df = | _pd.read_csv(filename, **kwargs) | pandas.read_csv |
import matplotlib
import pandas as pd
matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
import string
import glob
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as clr
import numpy as np
from matplotlib.colors import ListedColormap
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import pandas as pd
import matplotlib.pylab as plt
import numpy as np
import scipy
import seaborn as sns
import glob
cadd = pd.read_csv("DeepSEA_CADD_GERP/gRNA_all_A.CADD.vcf",sep="\t")
deepsea = pd.read_csv("DeepSEA_CADD_GERP/DeepSEA.out.funsig",index_col=0)
gerp =pd.read_csv("DeepSEA_CADD_GERP/gRNA_all_GERP.tsv",sep="\t")
gerp.index = gerp['chrom']+":"+gerp['start'].astype(str)+"-"+gerp['end'].astype(str)
deepsea['name'] = deepsea['chr']+":"+(deepsea['pos']-1).astype(str)+"-"+deepsea['pos'].astype(str)
deepsea.index = deepsea['name']
cadd['name'] = "chr"+cadd['#Chrom'].astype(str)+":"+(cadd['Pos']-1).astype(str)+"-"+cadd['Pos'].astype(str)
cadd.index = cadd['name']
df = pd.read_csv("Editable_A_scores.tsv",sep="\t",index_col=0)
df['CADD'] = cadd['PHRED']
df['DeepSEA'] = deepsea['Functional significance score']
df['GERP'] = gerp['gerp_bp_score']
df['DeepSEA'] = df['DeepSEA'].apply(lambda x:-np.log10(x))
df.index = df.coord
df[['CADD','DeepSEA','GERP','HbFBase']].to_csv("Editable_A_scores.combined.scores.csv")
df = pd.read_csv("Editable_A_scores.combined.scores.csv",index_col=0)
# deepsea violin
from decimal import Decimal
sns.set_style("whitegrid")
plt.figure()
top_n = df[df['HbFBase']>=50]['DeepSEA'].tolist()
bot_n = df[df['HbFBase']==0]['DeepSEA'].tolist()
plot_df = | pd.DataFrame([top_n,bot_n]) | pandas.DataFrame |
#!/usr/bin/env python
from __future__ import print_function
import pandas as pd
import numpy as np
import random
import progressbar
import argparse
import json
import os
import sys
import glob
import csv
import re
import pickle
ap = argparse.ArgumentParser()
ap.add_argument("-o", "--outdir", required=True,
help="path to the output directory")
ap.add_argument("-d", "--dataset", required=True,
help="path to directory containing images")
ap.add_argument("-v", "--validfrac", type=float, default=0.02,
help="fraction of data to use for validation [0.02]")
ap.add_argument("-l", "--labels", default=[], nargs='+',
help="list of allowed class labels []")
ap.add_argument("-m", "--merge", action='append', nargs='+', default=[],
help="list of classes to merge []")
ap.add_argument("-c", "--clip", type=int, default=0,
help="clip the total number of images in each class [no clip]")
ap.add_argument("-s", "--sample", type=float, default=0.0,
help="oversample class if < frac of overlaps [no oversample]")
ap.add_argument("-r", "--dateRange", action='append', nargs=2, default=[],
help='filter for data between dates [no filter]')
ap.add_argument("-nD", "--notDate", action="store_true",
help="treat date selection as exclusion mask")
ap.add_argument("-p", "--places", default=[], nargs='+', metavar="beachname",
help="list of allowed places []")
ap.add_argument("-nP", "--notPlace", action="store_true",
help="treat place selection as exclusion mask")
ap.add_argument("-D", "--debug", action="store_true",
help="print debug information")
ap.add_argument("-b", "--batch", action="store_false",
help="batch mode (do not pause)")
args = vars(ap.parse_args())
def parse_daterange(dateRange):
startDateStr = dateRange[0]
endDateStr = dateRange[1]
dateRe = re.compile(r".*(\d{4})-(\d{2})-(\d{2}).*")
if dateRe.match(startDateStr) and dateRe.match(endDateStr):
return startDateStr, endDateStr
else:
return None, None
startDateLst = []
endDateLst = []
for e in args["dateRange"]:
startDateStr, endDateStr = parse_daterange(e)
startDateLst.append(startDateStr)
endDateLst.append(endDateStr)
annFile = os.path.join(args['dataset'], "BOXES.csv")
print("[INFO] loading {}".format(annFile))
annTab = pd.read_csv(annFile, header=None, skipinitialspace=True,
names=["imageName","x1","y1","x2","y2","label",
"nXpix", "nYpix", "date", "location", "qual"])
annTab.drop(columns=["nXpix", "nYpix"], inplace=True)
s = annTab[annTab.label == "NEG"].duplicated(subset=["imageName"])
indices = s[s].index
annTab.drop(indices, inplace=True)
print("[INFO] dropped {:d} duplicate empty (NEG) images".format(len(indices)))
allowedLabels = args["labels"]
if len(allowedLabels) > 0:
print("[INFO] restricting labels to {}".format(allowedLabels))
annTab = annTab[annTab["label"].isin(allowedLabels)]
annTab["date"] = pd.to_datetime(annTab["date"])
maskLst = []
for startDateStr, endDateStr in zip(startDateLst, endDateLst):
if startDateStr is not None and endDateStr is not None:
print("[INFO] filtering between dates {} and {}".format(startDateStr,
endDateStr))
mask = (annTab["date"] >= startDateStr) & (annTab["date"] <= endDateStr)
maskLst.append(mask)
for mask in maskLst[1:]:
maskLst[0] = maskLst[0] | mask
if len(maskLst) > 0:
if args["notDate"]:
print("[INFO] treating date mask as exclusion")
maskLst[0] = ~maskLst[0]
print("[INFO] applying date mask accepting {:d} / {:d} boxes"
.format(maskLst[0].sum(), len(maskLst[0])))
annTab = annTab[maskLst[0]]
if len(args["places"]) > 0:
mask = annTab["location"].isin(args["places"])
if args["notPlace"]:
print("[INFO] treating place mask as exclusion")
mask = ~mask
print("[INFO] applying place mask accepting {:d} / {:d} boxes"
.format(mask.sum(), len(mask)))
annTab = annTab[mask]
if len(annTab) == 0:
exit("[ERR] table has no entries - check selection criteria. Exiting ...")
else:
print("[INFO] processing data from {}".format(annTab["location"].unique()))
annTab.drop(columns=["date", "location"], inplace=True)
uniqueLabels = annTab["label"].unique().tolist()
uniqueLabels.sort()
labLookup = dict(zip(uniqueLabels, uniqueLabels))
for mergeLst in args["merge"]:
if len(mergeLst) < 2:
exit("[ERR] list of keys to be merged is too short: "
"{}".format(mergeLst))
print("[INFO] merging labels ['{}']<-{}".format(mergeLst[0], mergeLst[1:]))
for k in mergeLst[1:]:
labLookup[k] = mergeLst[0]
for k, v in labLookup.items():
annTab.loc[annTab.label == k, "label"] = v
uniqueLabels = annTab["label"].unique().tolist()
uniqueLabels.sort()
nClasses = len(uniqueLabels)
print("[INFO] labels in dataset: {}".format(uniqueLabels))
labGroups = annTab.groupby("label")
boxCntSer = labGroups.count()["imageName"]
nTargetGlobal = boxCntSer.max()
if args["clip"] > 0:
nTargetGlobal = args["clip"]
print("[INFO] aiming for {:d} samples in each class".format(nTargetGlobal))
posCntLst = []
olapCntLst = []
for label in uniqueLabels:
imgLst = annTab[annTab.label == label]["imageName"].unique().tolist()
posCntLst.append(len(imgLst))
tmpTab = annTab[annTab["imageName"].isin(imgLst)]
imgLst = tmpTab[tmpTab.label != label]["imageName"].unique().tolist()
olapCntLst.append(len(imgLst))
imgCntSer = pd.Series(posCntLst, uniqueLabels)
olapCntSer = pd.Series(olapCntLst, uniqueLabels)
olapFracSer = olapCntSer / imgCntSer
olapFracSer.sort_values(ascending=False, inplace=True)
overlapTab = np.zeros((nClasses, nClasses), dtype=np.int)
for i in range(nClasses):
labX = uniqueLabels[i]
imgXlst = set(labGroups["imageName"].get_group(labX))
for j in range(i+1, nClasses):
labY = uniqueLabels[j]
imgYlst = set(labGroups["imageName"].get_group(labY))
overlapTab[j, i] = len(imgXlst & imgYlst)
print("[INFO] image overlap matrix:\n")
for i in range(nClasses):
print(uniqueLabels[i], end=" ")
for j in range(0, i+1):
print("{:6d}".format(overlapTab[i, j]), end=" ")
print(" "*(nClasses - j -1), end="")
print(" {:9d}".format(boxCntSer[uniqueLabels[i]]), end="")
print(" {:9d}".format(imgCntSer[uniqueLabels[i]]))
print(" " + " ".join(uniqueLabels), end="")
print(" #BOXES #IMAGES")
print("\nPercentage of images with overlaps in class:")
print(" ", end=" ")
for i in range(nClasses):
print(" {:3.0f}".format(olapFracSer[uniqueLabels[i]]*100), end="")
print("\n")
if args["batch"]:
input("\nPress <Return> to continue ...")
splitSer = pd.Series(np.zeros(len(uniqueLabels), dtype="i8"), uniqueLabels,
name="splitCount")
splitSer.index.name = "label"
validGrpLst = []
testGrpLst = []
trainGrpLst = []
print("[INFO] splitting off validation set {:2.2f}%".format(
args["validfrac"]*100))
for label, olapFracCnt in olapFracSer.items():
nTarget = int(min(nTargetGlobal, boxCntSer[label]) * args["validfrac"]
- splitSer[label])
if nTarget <=0:
continue
imgGroups = annTab[annTab["label"] == label].groupby("imageName")
imgCntTab = imgGroups.count()["label"].to_frame("nCurrentObjects")
imgCntTab.reset_index(inplace=True)
imgCntTab = imgCntTab.sample(frac=1).reset_index(drop=True)
cumSumArr = imgCntTab["nCurrentObjects"].cumsum().to_numpy()
indx = np.argwhere(cumSumArr >= nTarget)
imgSel = imgCntTab["imageName"][:indx[0, 0]+1]
selAnnTab = annTab[annTab["imageName"].isin(imgSel)]
indices = annTab[annTab["imageName"].isin(imgSel)].index
annTab.drop(indices, inplace=True)
selGrpLst = [df for _, df in selAnnTab.groupby("imageName")]
random.shuffle(selGrpLst)
validGrpLst += selGrpLst
selTab = pd.concat(selGrpLst, ignore_index=True)
labSelGroups = selTab.groupby("label")
splitCntSer = labSelGroups.count()["imageName"]
splitCntSer.name = "splitCount"
splitSer = splitSer.add(splitCntSer, fill_value=0)
random.shuffle(validGrpLst)
print("[INFO] validation split (number of boxes):")
print(splitSer)
masterImgGroups = annTab.groupby("imageName")
masterImgCountTab= masterImgGroups.count()["label"].to_frame("nObjects")
masterImgCountTab.reset_index(inplace=True)
masterImgCountTab["splitCount"] = 0
splitSer[:] = 0
for label, olapFracCnt in olapFracSer.items():
print("[INFO] processing the '{}' class with {:02.2f}% overlap". format(label, olapFracCnt*100))
nTarget = max(0, nTargetGlobal - splitSer[label])
print("[INFO] > previously selected boxes {:d}". format(int(splitSer[label])))
print("[INFO] > target boxes to select is {:d}".format(int(nTarget)))
if nTarget == 0:
print("[INFO] > target number of boxes already reached")
continue
imgPosGroups = annTab[annTab["label"] == label].groupby("imageName")
imgPosCntTab = imgPosGroups.count()["label"].to_frame("nCurrentObjects")
imgPosCntTab.reset_index(inplace=True)
imgLst = imgPosGroups.groups.keys()
annLocTab = annTab[annTab["imageName"].isin(imgLst)]
imgNegGroups = annLocTab[annLocTab["label"] != label].groupby("imageName")
imgNegCntTab = imgNegGroups.count()["label"].to_frame("nOtherObjects")
imgNegCntTab.reset_index(inplace=True)
imgCntTab = imgPosCntTab.merge(imgNegCntTab, how="left",
left_on="imageName", right_on="imageName")
imgCntTab.fillna(0, inplace=True)
if False:
print("Percentage free = {:02.2f} ({:d})".format(
np.sum(imgCntTab["nOtherObjects"] == 0) *100 / len(imgCntTab),
len(imgCntTab)))
print(imgCntTab.head())
imgCntTab = imgCntTab.merge(masterImgCountTab, how="left",
left_on="imageName", right_on="imageName")
imgCntTab = imgCntTab[imgCntTab["splitCount"] == 0]
if len(imgCntTab) == 0:
print("[INFO] > all images in this class have already been split")
continue
imgCntTab = imgCntTab.sample(frac=1).reset_index(drop=True)
cumSumArr = imgCntTab["nCurrentObjects"].cumsum().to_numpy()
indx = np.argwhere(cumSumArr >= nTarget)
if len(indx) == 0:
imgSel = imgCntTab["imageName"]
else:
imgSel = imgCntTab["imageName"][:indx[0, 0]+1]
selAnnTab = annLocTab[annLocTab["imageName"].isin(imgSel)]
selGrpLst = [df for _, df in selAnnTab.groupby("imageName")]
random.shuffle(selGrpLst)
tmp = pd.concat(selGrpLst).reset_index(drop=True)
curBoxCnt = len(tmp[tmp.label == label])
print("[INFO] > currently available boxes: {:d}".format(curBoxCnt))
if args["sample"] > 0 and args["sample"] < 1 and curBoxCnt < nTarget:
if olapFracCnt <= args["sample"]:
imgCntTab = imgCntTab[imgCntTab["nOtherObjects"] == 0]
cumSumArr = imgCntTab["nCurrentObjects"].cumsum().to_numpy()
print("[INFO] > replicating using {:d} images ". format(len(imgCntTab)), end="")
print("containing {:d} boxes".format(cumSumArr[-1]))
n = int((nTarget - curBoxCnt) / cumSumArr[-1])
r = nTarget - cumSumArr[-1] * (1 + n)
if r <= 0:
r = 0
indx = np.argwhere(cumSumArr >= r)
if len(indx) == 0:
indx = 0
else:
indx = indx[0, 0]
repLst = []
if n > 0:
imgSel = imgCntTab["imageName"]
selAnnTab = annLocTab[annLocTab["imageName"].isin(imgSel)] .reset_index(drop=True)
tmpGrpLst = [df for _, df in selAnnTab.groupby("imageName")]
print("[INFO] > replicating whole table x {:d}".format(n))
repLst = tmpGrpLst * n
random.shuffle(repLst)
if indx > 0:
imgSel = imgCntTab["imageName"].iloc[:indx+1]
selAnnTab = annLocTab[annLocTab["imageName"].isin(imgSel)] .reset_index(drop=True)
tmpGrpLst = [df for _, df in selAnnTab.groupby("imageName")]
print("[INFO] > replicating to index {:d}".format(indx))
repLst += tmpGrpLst
random.shuffle(repLst)
if len(repLst)>1:
selGrpLst += repLst
trainGrpLst += selGrpLst
selTab = pd.concat(selGrpLst, ignore_index=True)
labSelGroups = selTab.groupby("label")
splitCntSer = labSelGroups.count()["imageName"]
splitCntSer.name = "splitCount"
splitSer = splitSer.add(splitCntSer, fill_value=0)
if args["debug"]:
print(splitSer)
random.shuffle(validGrpLst)
random.shuffle(trainGrpLst)
print("[INFO] merging master tables (make take a little while) ... ")
trainTab = | pd.concat(trainGrpLst) | pandas.concat |
# -*- coding: utf-8 -*-
"""Data_Analysis.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/106zvCx_5_p0TlKI3zkCcEb0VbnWwdahx
"""
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import SGDRegressor
import xgboost as xgb
from sklearn.metrics import mean_squared_error, r2_score
import re
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
"""
1. Preprocessing Functions:
"""
def calc_change_sentiment(data):
change_in_sent = []
change_in_sent.append(data['compound'][0])
for i in range(1,len(data['compound'])):
if data['compound'][i] == 0:
change_in_sent.append(0)
elif data['compound'][i] < 0 or data['compound'][i] > 0:
dif = data['compound'][i] - data['compound'][(i-1)]
change_in_sent.append(dif)
return change_in_sent
def remove_pattern(input_txt, pattern):
r = re.findall(pattern, input_txt)
for i in r:
input_txt = re.sub(i, '', input_txt)
return input_txt
def clean_tweets(tweets):
tweets = np.vectorize(remove_pattern)(tweets, "RT @[\w]*:")
tweets = np.vectorize(remove_pattern)(tweets, "@[\w]*")
tweets = np.vectorize(remove_pattern)(tweets, "https?://[A-Za-z0-9./]*")
tweets = np.core.defchararray.replace(tweets, "[^a-zA-Z]", " ")
return tweets
def classify_news(dataframe):
day23, day24, day25, day26, day27, day28, day29, day30, day31, day32, day33, day34, day35, day36, day37, day38 = [],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]
for i in range(len(dataframe['timestamp'])):
if dataframe['timestamp'][i].day == 23 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day23.append(i)
elif dataframe['timestamp'][i].day == 24 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day24.append(i)
elif dataframe['timestamp'][i].day == 25 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day25.append(i)
elif dataframe['timestamp'][i].day == 26 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day26.append(i)
elif dataframe['timestamp'][i].day == 27 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day27.append(i)
elif dataframe['timestamp'][i].day == 28 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day28.append(i)
elif dataframe['timestamp'][i].day == 29 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day29.append(i)
elif dataframe['timestamp'][i].day == 30 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day30.append(i)
elif dataframe['timestamp'][i].day == 1 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day31.append(i)
elif dataframe['timestamp'][i].day == 2 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day32.append(i)
elif dataframe['timestamp'][i].day == 3 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day33.append(i)
elif dataframe['timestamp'][i].day == 4 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day34.append(i)
elif dataframe['timestamp'][i].day == 5 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day35.append(i)
elif dataframe['timestamp'][i].day == 6 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day36.append(i)
elif dataframe['timestamp'][i].day == 7 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day37.append(i)
elif dataframe['timestamp'][i].day == 8 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day38.append(i)
else:
pass
news_d23,news_d24,news_d25,news_d26,news_d27,news_d28,news_d29,news_d30,news_d31,news_d32,news_d33,news_d34,news_d35,news_d36,news_d37,news_d38 = dataframe.iloc[day23],dataframe.iloc[day24],dataframe.iloc[day25], dataframe.iloc[day26], dataframe.iloc[day27],dataframe.iloc[day28],dataframe.iloc[day29],dataframe.iloc[day30],dataframe.iloc[day31], dataframe.iloc[day32],dataframe.iloc[day33],dataframe.iloc[day34],dataframe.iloc[day35],dataframe.iloc[day36],dataframe.iloc[day37],dataframe.iloc[day38]
return news_d23,news_d24,news_d25,news_d26,news_d27,news_d28,news_d29,news_d30,news_d31,news_d32,news_d33,news_d34,news_d35,news_d36,news_d37,news_d38
def preprocess_headlines(data):
data.drop_duplicates(subset='headline',keep=False, inplace=True)
data.drop(['ticker','neg','neu','pos'], axis=1, inplace=True)
data.rename(columns={'date_time':'timestamp'},inplace=True)
data.set_index('timestamp', inplace=True)
data_30m = data.resample('30min').median().ffill().reset_index()
headline_sma = data_30m['compound'].rolling(3).mean()
data_30m['Compound SMA(3) Headlines'] = headline_sma
change_in_sent=calc_change_sentiment(data_30m)
data_30m['change in sentiment headlines'] = change_in_sent
data_30m['change in sentiment headlines (t-1)'] = data_30m['change in sentiment headlines'].shift(1)
news_d23,news_d24,news_d25,news_d26,news_d27,news_d28,news_d29,news_d30,news_d31,news_d32,news_d33,news_d34,news_d35,news_d36,news_d37,news_d38 = classify_news(data_30m)
news_d23_red,news_d24_red, news_d25_red, news_d28_red,news_d29_red,news_d30_red,news_d31_red,news_d32_red,news_d35_red,news_d36_red,news_d37_red,news_d38_red = news_d23.iloc[4:],news_d24.iloc[1:],news_d25.iloc[1:],news_d28.iloc[1:],news_d29.iloc[1:],news_d30.iloc[1:],news_d31.iloc[1:],news_d32.iloc[1:],news_d35.iloc[1:],news_d36.iloc[1:],news_d37.iloc[1:],news_d38.iloc[1:]
frames_news = [news_d23_red,news_d24_red, news_d25_red, news_d28_red,news_d29_red,news_d30_red,news_d31_red,news_d32_red,news_d35_red,news_d36_red,news_d37_red,news_d38_red]
processed_headlines = pd.concat(frames_news)
return processed_headlines
def preprocess_posts(dataframe):
dataframe.drop(['neg','neu','pos','followers_count'],axis=1,inplace=True)
dataframe['timestamp'] = dataframe['timestamp'].dt.tz_localize('UTC').dt.tz_convert('America/Montreal').dt.tz_localize(None)
dataframe.set_index('timestamp', inplace=True)
twitter_df_30m = dataframe.resample('30min').median().ffill().reset_index()
change_in_sent = calc_change_sentiment(twitter_df_30m)
twitter_sma = twitter_df_30m['compound'].rolling(3).mean()
twitter_df_30m['Compound SMA(3) Twitter'] = twitter_sma
twitter_df_30m['change in sentiment twitter'] = change_in_sent
twitter_df_30m['change in sentiment twitter (t-1)'] = twitter_df_30m['change in sentiment twitter'].shift(1)
tw_news_d23,tw_news_d24,tw_news_d25,tw_news_d26,tw_news_d27,tw_news_d28,tw_news_d29,tw_news_d30,tw_news_d31,tw_news_d32,tw_news_d33,tw_news_d34,tw_news_d35,tw_news_d36,tw_news_d37,tw_news_d38 = classify_news(twitter_df_30m)
tw_news_d23_30m,tw_news_d24_30m,tw_news_d25_30m, tw_news_d28_30m,tw_news_d29_30m,tw_news_d30_30m,tw_news_d31_30m,tw_news_d32_30m,tw_news_d35_30m,tw_news_d36_30m,tw_news_d37_30m,tw_news_d38_30m = tw_news_d23.iloc[4:],tw_news_d24.iloc[1:],tw_news_d25.iloc[1:],tw_news_d28.iloc[1:],tw_news_d29.iloc[1:],tw_news_d30.iloc[1:],tw_news_d31.iloc[1:],tw_news_d32.iloc[1:],tw_news_d35.iloc[1:],tw_news_d36.iloc[1:],tw_news_d37.iloc[1:],tw_news_d38.iloc[1:]
frames = [tw_news_d23_30m,tw_news_d24_30m,tw_news_d25_30m,tw_news_d28_30m,tw_news_d29_30m,tw_news_d30_30m,tw_news_d31_30m,tw_news_d32_30m,tw_news_d35_30m,tw_news_d36_30m,tw_news_d37_30m,tw_news_d38_30m]
processed_tweets = pd.concat(frames)
return processed_tweets
"""2 Modeling Functions:"""
def baseline_model(data):
pred = data['SMA(3)'][3:]
actu = data['Adj Close'][3:]
rmse = np.sqrt(mean_squared_error(actu,pred))
r2_sco = r2_score(actu,pred)
return rmse, r2_sco
def linear_modeling_no_sentiment(dataframe):
x_var = ['Adj Close','Scaled Volume','SMA(3)']
i = len(dataframe['Percent Price Change Within Period (t+1)'])-4
y_train, y_test = dataframe['Percent Price Change Within Period (t+1)'][3:i], dataframe['Percent Price Change Within Period (t+1)'][i:-1]
X_train, X_test = dataframe[x_var][3:i], dataframe[x_var][i:-1]
lm = LinearRegression()
lm.fit(X_train,y_train)
predictions = lm.predict(X_test)
rmse = np.sqrt(mean_squared_error(y_test,predictions))
r2_sco = r2_score(y_test,predictions)
reg = SGDRegressor(random_state=42)
reg.fit(X_train, y_train)
predictions2 = reg.predict(X_test)
rmse2 = np.sqrt(mean_squared_error(y_test,predictions2))
r2_sco2 = r2_score(y_test,predictions2)
return rmse,r2_sco,rmse2,r2_sco2
def linear_modeling_headlines(dataframe):
x_var = ['Adj Close','Scaled Volume','compound','Compound SMA(3) Headlines','SMA(3)','change in sentiment headlines','change in sentiment headlines (t-1)']
i = len(dataframe['Percent Price Change Within Period (t+1)'])-4
y_train, y_test = dataframe['Percent Price Change Within Period (t+1)'][:i], dataframe['Percent Price Change Within Period (t+1)'][i:-1]
X_train, X_test = dataframe[x_var][:i], dataframe[x_var][i:-1]
lm = LinearRegression()
lm.fit(X_train,y_train)
predictions = lm.predict(X_test)
rmse = np.sqrt(mean_squared_error(y_test,predictions))
r2_sco = r2_score(y_test,predictions)
reg = SGDRegressor(random_state=42)
reg.fit(X_train, y_train)
predictions2 = reg.predict(X_test)
rmse2 = np.sqrt(mean_squared_error(y_test,predictions2))
r2_sco2 = r2_score(y_test,predictions2)
xg_reg = xgb.XGBRegressor(colsample_bytree= 0.3, gamma= 0.0, learning_rate= 0.2, max_depth= 5, n_estimators= 20000)
# xg_reg = xgb.XGBRegressor(colsample_bytree= 0.4, gamma= 0.4, learning_rate= 0.05, max_depth= 4, n_estimators= 10000)
xg_reg.fit(X_train,y_train)
preds3 = xg_reg.predict(X_test)
rmse3 = np.sqrt(mean_squared_error(y_test, preds3))
r2_sco3 = r2_score(y_test,preds3)
svr = SVR(kernel='rbf', C=0.01, epsilon=0.001)
svr.fit(X_train,y_train)
preds4 = svr.predict(X_test)
rmse4 = np.sqrt(mean_squared_error(y_test,preds4))
r2_sco4 = r2_score(y_test,preds4)
return rmse,r2_sco,rmse2,r2_sco2,rmse3,r2_sco3,rmse4,r2_sco4
def linear_model_twitter(dataframe):
x_var = ['Adj Close','Scaled Volume','compound','Compound SMA(3) Twitter','SMA(3)','change in sentiment twitter','change in sentiment twitter (t-1)']
i = len(dataframe['Percent Price Change Within Period (t+1)'])-4
y_train, y_test = dataframe['Percent Price Change Within Period (t+1)'][:i], dataframe['Percent Price Change Within Period (t+1)'][i:-1]
X_train, X_test = dataframe[x_var][:i], dataframe[x_var][i:-1]
lm = LinearRegression()
lm.fit(X_train,y_train)
predictions = lm.predict(X_test)
rmse = np.sqrt(mean_squared_error(y_test,predictions))
r2_sco = r2_score(y_test,predictions)
reg = SGDRegressor(random_state=42)
reg.fit(X_train, y_train)
predictions2 = reg.predict(X_test)
rmse2 = np.sqrt(mean_squared_error(y_test,predictions2))
r2_sco2 = r2_score(y_test,predictions2)
xg_reg = xgb.XGBRegressor(colsample_bytree= 0.3, gamma= 0.0, learning_rate= 0.2, max_depth= 5, n_estimators= 20000)
# xg_reg = xgb.XGBRegressor(colsample_bytree= 0.4, gamma= 0.4, learning_rate= 0.05, max_depth= 4, n_estimators= 10000)
xg_reg.fit(X_train,y_train)
preds3 = xg_reg.predict(X_test)
rmse3 = np.sqrt(mean_squared_error(y_test, preds3))
r2_sco3 = r2_score(y_test,preds3)
svr = SVR(kernel='rbf', C=0.01, epsilon=0.001)
svr.fit(X_train,y_train)
preds4 = svr.predict(X_test)
rmse4 = np.sqrt(mean_squared_error(y_test,preds4))
r2_sco4 = r2_score(y_test,preds4)
return rmse,r2_sco,rmse2,r2_sco2,rmse3,r2_sco3,rmse4,r2_sco4
def multi_model_full(dataframe):
x_var = ['Adj Close','Scaled Volume','compound_y','compound_x','Compound SMA(3) Headlines','Compound SMA(3) Twitter','SMA(3)','change in sentiment headlines','change in sentiment headlines (t-1)','change in sentiment twitter','change in sentiment twitter (t-1)']
i = len(dataframe['Percent Price Change Within Period (t+1)'])-4
y_train, y_test = dataframe['Percent Price Change Within Period (t+1)'][:i], dataframe['Percent Price Change Within Period (t+1)'][i:-1]
X_train, X_test = dataframe[x_var][:i], dataframe[x_var][i:-1]
lm = LinearRegression()
lm.fit(X_train,y_train)
predictions = lm.predict(X_test)
rmse = np.sqrt(mean_squared_error(y_test,predictions))
r2_sco = r2_score(y_test,predictions)
reg = SGDRegressor(random_state=42)
reg.fit(X_train, y_train)
predictions2 = reg.predict(X_test)
rmse2 = np.sqrt(mean_squared_error(y_test,predictions2))
r2_sco2 = r2_score(y_test,predictions2)
xg_reg = xgb.XGBRegressor(colsample_bytree= 0.3, gamma= 0.0, learning_rate= 0.2, max_depth= 5, n_estimators= 20000)
xg_reg.fit(X_train,y_train)
preds3 = xg_reg.predict(X_test)
rmse3 = np.sqrt(mean_squared_error(y_test, preds3))
r2_sco3 = r2_score(y_test,preds3)
rf_regr = RandomForestRegressor(n_estimators=20, max_depth=600, random_state=42)
rf_regr.fit(X_train,y_train)
preds4 = rf_regr.predict(X_test)
rmse4 = np.sqrt(mean_squared_error(y_test, preds4))
r2_sco4 = r2_score(y_test,preds4)
svr = SVR(kernel='rbf', C=0.01, epsilon=0.001)
svr.fit(X_train,y_train)
preds5 = svr.predict(X_test)
rmse5 = np.sqrt(mean_squared_error(y_test,preds5))
r2_sco5 = r2_score(y_test,preds5)
return rmse,r2_sco,rmse2,r2_sco2,rmse3,r2_sco3,rmse4,r2_sco4,rmse5,r2_sco5
"""## 2. Evaluate Model with Individual Stocks:"""
def import_data(ticker):
# 1. Historical Stock Data:
stock_df = pd.read_csv('Dataset/1.Stock_Data/'+ticker+'_data.csv', index_col=0, parse_dates=['Datetime'])
stock_df['Percent Price Change Within Period (t+1)'] = stock_df['Percent Price Change Within Period'].shift(-1)
# 2. Headline Data:
headlines1 = pd.read_csv('Dataset/2.FinViz_Headline_Data/'+ticker+'_2020-09-23_2020-10-07.csv', index_col=0, parse_dates=['date_time'])
frames = [headlines1]
headlines_df = pd.concat(frames)
headlines_df.drop_duplicates(subset='headline',keep='first',inplace=True)
# 3. Twitter Data:
twitter1 = pd.read_csv('Dataset/3.Twitter_Data/'+ticker+'_2020-09-23_2020-10-07.csv', index_col=0, parse_dates=['timestamp'])
# twitter2 = pd.read_csv('3.Twitter_Data/'+ticker+'_2020-10-07.csv',index_col=0, parse_dates=['timestamp'])
# twitter3 = pd.read_csv('Dataset/3.Twitter_Data/'+ticker+'_2020-10-07_2.csv',index_col=0, parse_dates=['timestamp'])
frames = [twitter1]
twitter_df = pd.concat(frames)
twitter_df.drop_duplicates(subset='tweet_text',keep='first', inplace=True)
twitter_df.sort_values('timestamp',ascending=False,inplace=True)
twitter_df.reset_index(drop=True)
# twitter_df.to_csv('Dataset/3.Twitter_Data/'+ticker+'_2020-09-23_2020-10-07.csv')
return stock_df,headlines_df,twitter_df
def evaluate_models(baseline_df, headline_df, twitter_df):
#1. Baseline:
baseline_rmse, baseline_r2 = baseline_model(baseline_df)
baseline_df2 = baseline_df
baseline_df2['Percent Price Change Within Period (t+1)'] = baseline_df2['Percent Price Change Within Period'].shift(-1)
lm_baseline_rmse, lm_baseline_r2, sgd_baseline_rmse, sgd_baseline_r2 = linear_modeling_no_sentiment(baseline_df2)
#2. Headline Final Merge:
headlines_final = preprocess_headlines(headline_df)
with_headlines_df = stock_df.merge(headlines_final, left_on='Datetime', right_on='timestamp').drop('timestamp',axis=1)
with_headlines_df['Percent Price Change Within Period (t+1)'] = with_headlines_df['Percent Price Change Within Period'].shift(-1)
#3. Twitter Final Merge:
final_twitter = preprocess_posts(twitter_df)
with_twitter_df = stock_df.merge(final_twitter, left_on='Datetime', right_on='timestamp').drop('timestamp',axis=1)
with_twitter_df['Percent Price Change Within Period (t+1)'] = with_twitter_df['Percent Price Change Within Period'].shift(-1)
full_df = with_twitter_df.merge(headlines_final, left_on='Datetime', right_on='timestamp').drop('timestamp',axis=1)
full_df['Percent Price Change Within Period (t+1)'] = full_df['Percent Price Change Within Period'].shift(-1)
#5. Evaluating Models:
lm_headlines_rmse, lm_headlines_r2, sgd_headlines_rmse, sgd_headlines_r2,xgb_headlines_rmse,xgb_headlines_r2,svr_headlines_rmse,svr_headlines_r2 = linear_modeling_headlines(with_headlines_df)
lm_twitter_rmse, lm_twitter_r2, sgd_twitter_rmse, sgd_twitter_r2,xgb_twitter_rmse,xgb_twitter_r2,svr_twitter_rmse,svr_twitter_r2 = linear_model_twitter(with_twitter_df)
lm_all_rmse, lm_all_r2, sgd_all_rmse, sgd_all_r2, xgb_all_rmse, xgb_all_r2, rf_all_rmse, rf_all_r2,svr_all_rmse,svr_all_r2 = multi_model_full(full_df)
result_dict = {
'RMSE - Baseline':baseline_rmse, 'R2 - Baseline':baseline_r2, 'Linear RMSE - Baseline':lm_baseline_rmse, 'SGD RMSE - Baseline':sgd_baseline_rmse,
'Linear RMSE - Only Headlines': lm_headlines_rmse, 'SGD RMSE - Only Headlines':sgd_headlines_rmse, 'XGB RMSE - Only Headlines':xgb_headlines_rmse, 'SVR RMSE - Only Headlines':svr_headlines_rmse,
'Linear RMSE - Only Twitter':lm_twitter_rmse, 'SGD RMSE - Only Twitter':sgd_twitter_rmse, 'XGB RMSE - Only Twitter':xgb_twitter_rmse, 'SVR RMSE - Only Twitter':svr_twitter_rmse,
'Linear RMSE - All':lm_all_rmse, 'SGD RMSE - All':sgd_all_rmse, 'XGB RMSE - All':xgb_all_rmse, 'RF RMSE - All':rf_all_rmse,'SVR RMSE - All':svr_all_rmse
}
#7. Convert to DataFrame:
result_df = pd.DataFrame.from_dict(result_dict, orient='index', columns=['Values'])
#result_df.to_csv('~/LighthouseLabs-Final/Report_Analysis/AAPL_complete_analysis.csv')
return result_df, full_df
stock_df,headlines_df,twitter_df = import_data('AAPL')
result_df, full_df = evaluate_models(stock_df,headlines_df,twitter_df)
import seaborn as sn
from matplotlib.pyplot import figure
corrMatrix = full_df.corr()
plt.figure(figsize=(20,15))
sn.heatmap(corrMatrix, annot=True)
plt.show()
i = round(len(full_df['t+1'])*0.6)
x_var_base=['Adj Close','Scaled Volume','SMA(3)']
x_var_headlines=['Adj Close','Scaled Volume','compound_y','Compound SMA(3) Headlines','SMA(3)','change in sentiment headlines','change in sentiment headlines (t-1)']
x_var_twitter=['Adj Close','Scaled Volume','compound_x','Compound SMA(3) Twitter','SMA(3)','change in sentiment twitter','change in sentiment twitter (t-1)']
x_var_full=['Adj Close','Scaled Volume','compound_y','compound_x','Compound SMA(3) Headlines','Compound SMA(3) Twitter','SMA(3)','change in sentiment headlines','change in sentiment headlines (t-1)','change in sentiment twitter','change in sentiment twitter (t-1)']
X_train_base,X_test_base=full_df[x_var_base][:i],full_df[x_var_base][i:-1]
X_predic_base = full_df[x_var_base][:-1]
X_train_headlines,X_test_headlines=full_df[x_var_headlines][:i],full_df[x_var_headlines][i:-1]
X_predic_headlines = full_df[x_var_headlines][:-1]
X_train_twitter,X_test_twitter=full_df[x_var_twitter][:i],full_df[x_var_twitter][i:-1]
X_predic_twitter = full_df[x_var_twitter][:-1]
X_train_full, X_test_full = full_df[x_var_full][:i], full_df[x_var_full][i:-1]
X_predic_full = full_df[x_var_full][:-1]
y_train, y_test = full_df['Percent Price Change Within Period (t+1)'][:i], full_df['t+1'][i:-1]
lm = LinearRegression()
lm.fit(X_train_base,y_train)
preds1 = lm.predict(X_predic_base)
preds1 = np.append(preds1,np.NaN)
full_df['base price predictions linear'] = ((preds1/100) * full_df['Adj Close']) + full_df['Adj Close']
svr = SVR(kernel='rbf', C=0.2, epsilon=0.0001)
svr.fit(X_train_headlines,y_train)
preds3 = svr.predict(X_predic_headlines)
preds3 = np.append(preds3,np.NaN)
full_df['headlines price predictions svr'] = ((preds3/100) * full_df['Adj Close']) + full_df['Adj Close']
svr = SVR(kernel='rbf', C=0.2, epsilon=0.0001)
svr.fit(X_train_twitter,y_train)
preds4 = svr.predict(X_predic_twitter)
preds4 = np.append(preds4,np.NaN)
full_df['twitter price predictions svr'] = ((preds4/100) * full_df['Adj Close']) + full_df['Adj Close']
# lm = LinearRegression()
# lm.fit(X_train_full,y_train)
# preds5 = lm.predict(X_predic_full)
svr = SVR(kernel='rbf', C=0.2, epsilon=0.0001)
svr.fit(X_train_full,y_train)
preds5 = svr.predict(X_predic_full)
preds5 = np.append(preds5,np.NaN)
full_df['full price predictions linear'] = ((preds5/100) * full_df['Adj Close']) + full_df['Adj Close']
fig = plt.figure(figsize=(20,30))
price_ax = plt.subplot(2,1,1)
price_ax.plot(full_df.index[:-1], full_df['Adj Close'][:-1], label='Adj Close')
price_ax.plot(full_df.index[:-1], full_df['SMA(3)'][:-1], label='SMA(3)')
price_ax.plot(full_df.index[:-1], full_df['base price predictions linear'][:-1], label='Predictions (base)',linewidth=2)
# price_ax.plot(full_df.index[:-1], full_df['full price predictions svr'][:-1], label='Full SVR training fit')
price_ax.plot(full_df.index[:-1], full_df['full price predictions linear'][:-1], label='Predictions (Full)',linewidth=2)
# price_ax.plot(full_df.index[:-1], full_df['headlines price predictions svr'][:-1], label='Headlines SVR training fit')
# price_ax.plot(full_df.index[i:-1], full_df['headlines price predictions svr'][i:-1], label='Predictions (Headlines)',linewidth=2)
# price_ax.plot(full_df.index[:-1], full_df['twitter price predictions svr'][:-1], label='Twitter SVR training fit')
# price_ax.plot(full_df.index[i:-1], full_df['twitter price predictions svr'][i:-1], label='Predictions (Twitter)',linewidth=2)
price_ax.set_ylabel('Price ($)')
price_ax.grid(which='major', color='k', linestyle='-.', linewidth=0.5)
price_ax.minorticks_on()
price_ax.grid(which='minor', color='k', linestyle=':', linewidth=0.3)
price_ax.legend()
roc_ax = plt.subplot(2,1,2, sharex=price_ax)
roc_ax.plot(full_df.index, full_df['Compound SMA(3) Headlines'],label='Headline')
roc_ax.plot(full_df.index, full_df['Compound SMA(3) Twitter'],label='Twitter')
roc_ax.set_xlabel('Time Period (30 minutes)')
roc_ax.set_ylabel('Headline Sentiment')
roc_ax.grid(which="major", color='k', linestyle='-.', linewidth=0.5)
roc_ax.minorticks_on()
roc_ax.grid(which='minor', color='k', linestyle=':', linewidth=0.3)
roc_ax.legend()
fig.subplots_adjust(hspace=0.1)
"""## 3. Evaluate Model with Multiple Stocks:"""
def import_data2(ticker,ticker2,ticker3,ticker4,ticker5,ticker6,ticker7,ticker8,ticker9,ticker10,ticker11,ticker12,ticker13):
stock_path = 'Dataset/1.Stock_Data/'
headline_path = 'Dataset/2.FinViz_Headline_Data/'
twitter_path = '3.Twitter_Data/'
latest_headlines='10-07'
# 1. Historical Stock Data:------------------------------------------------------------------------------------------
stock_df1 = pd.read_csv(stock_path+ticker+'_data.csv', index_col=0,parse_dates=['Datetime'])
stock_df2 = pd.read_csv(stock_path+ticker2+'_data.csv',index_col=0, parse_dates=['Datetime'])
stock_df3 = pd.read_csv(stock_path+ticker3+'_data.csv',index_col=0, parse_dates=['Datetime'])
stock_df4 = pd.read_csv(stock_path+ticker4+'_data.csv',index_col=0, parse_dates=['Datetime'])
stock_df5 = pd.read_csv(stock_path+ticker4+'_data.csv',index_col=0, parse_dates=['Datetime'])
stock_df6 = pd.read_csv(stock_path+ticker4+'_data.csv',index_col=0, parse_dates=['Datetime'])
stock_df7 = pd.read_csv(stock_path+ticker4+'_data.csv',index_col=0, parse_dates=['Datetime'])
stock_df8 = pd.read_csv(stock_path+ticker4+'_data.csv',index_col=0, parse_dates=['Datetime'])
stock_df9 = | pd.read_csv(stock_path+ticker4+'_data.csv',index_col=0, parse_dates=['Datetime']) | pandas.read_csv |
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
__author__ = ["<NAME>"]
__all__ = []
import numpy as np
import pandas as pd
from sklearn.utils import check_random_state
def _make_series(
n_timepoints=50,
n_columns=1,
all_positive=True,
index_type=None,
return_numpy=False,
random_state=None,
add_nan=False,
):
"""Helper function to generate univariate or multivariate time series"""
rng = check_random_state(random_state)
data = rng.normal(size=(n_timepoints, n_columns))
if add_nan:
# add some nan values
data[int(len(data) / 2)] = np.nan
data[0] = np.nan
data[-1] = np.nan
if all_positive:
data -= np.min(data, axis=0) - 1
if return_numpy:
if n_columns == 1:
data = data.ravel()
return data
else:
index = _make_index(n_timepoints, index_type)
if n_columns == 1:
return pd.Series(data.ravel(), index)
else:
return pd.DataFrame(data, index)
def _make_index(n_timepoints, index_type=None):
"""Helper function to make indices for unit testing"""
if index_type == "period":
start = "2000-01"
freq = "M"
return pd.period_range(start=start, periods=n_timepoints, freq=freq)
elif index_type == "datetime" or index_type is None:
start = "2000-01-01"
freq = "D"
return | pd.date_range(start=start, periods=n_timepoints, freq=freq) | pandas.date_range |
import pandas as pd
import numpy as np
from dateutil.relativedelta import relativedelta
#### Utilities
def get_first_visit_date(data_patient):
''' Determines the first visit for a given patient'''
#IDEA Could be parallelized in Dask
data_patient['first_visit_date'] = min(data_patient.visit_date)
return data_patient
def subset_analysis_data(data, date_analysis):
''' Function that subsets the full dataset to only the data available for a certain analysis date'''
if type(data.date_entered.iloc[0]) is str :
data.date_entered = pd.to_datetime(data.date_entered)
data = data[data.date_entered < date_analysis]
return data
def subset_cohort(data, horizon_date, horizon_time, bandwidth):
''' Function that subsets data from a cohort that has initiated care a year before the horizon_date, and after a year + bandwith'''
horizon_date = pd.to_datetime(horizon_date)
data['first_visit_date'] = pd.to_datetime(data['first_visit_date'])
cohort_data = data[(data['first_visit_date'] >= horizon_date - relativedelta(days=horizon_time + bandwidth)) &
(data['first_visit_date'] < horizon_date - relativedelta(days=horizon_time))]
return cohort_data
#### Standard reporting
def status_patient(data_patient, reference_date, grace_period):
''' Determines the status of a patient at a given reference_date, given the data available at a given analysis_date
TODO Also select the available data for Death and Transfer and other outcomes based on data entry time
'''
#IDEA Could be parallelized in Dask
data_patient = get_first_visit_date(data_patient)
date_out = pd.NaT
date_last_appointment = pd.to_datetime(max(data_patient.next_visit_date))
late_time = reference_date - date_last_appointment
if late_time.days > grace_period:
status = 'LTFU'
date_out = date_last_appointment
if late_time.days <= grace_period:
status = 'Followed'
if (data_patient.reasonDescEn.iloc[0] is not np.nan) & (pd.to_datetime(data_patient.discDate.iloc[0]) < reference_date):
status = data_patient.reasonDescEn.iloc[0]
date_out = | pd.to_datetime(data_patient.discDate.iloc[0]) | pandas.to_datetime |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import copy
import sys
import pickle
from collections import defaultdict
from itertools import islice, combinations
from datetime import datetime as dt
import warnings
warnings.filterwarnings("ignore")
from utils import timer_func
@timer_func
def merge_attributes(df: pd.DataFrame, *args: str) -> None:
"""
dtype df: dataframe
dtype *args: strings (attribute names that want to be combined)
"""
iterables = [df[arg].astype(str) for arg in args]
columnName = '&'.join([*args])
fs = [''.join([v for v in var]) for var in zip(*iterables)]
df.loc[:, columnName] = fs
class Import_declarations():
""" Class for dataset engineering """
def __init__(self, path):
self.path = path
self.df = pd.read_csv(self.path, encoding = "ISO-8859-1")
self.profile_candidates = None
def firstCheck(self):
""" Sorting and indexing necessary for data preparation """
self.df = self.df.dropna(subset=["illicit"])
self.df = self.df[~self.df.isin({'quantity': [0], 'gross.weight': [0], 'cif.value': [0]}).any(1)]
self.df = self.df.sort_values("sgd.date")
self.df = self.df.reset_index(drop=True)
@timer_func
def mask_labels(self, df: pd.DataFrame, ir_init: float, initial_masking: str) -> pd.DataFrame:
"""
Masking certain amount of data for semi-supervised learning by specific strategy - This function is used for masking initial training set.
initial_masking = importer
Masking certain amount of importer_id, to mimic the situation that not all imports are inspected.
initial_masking = = random
Masking transactions by random sampling.
ir_init is the inspection ratio at the beginning.
"""
print('Before masking:\n', df['illicit'].value_counts())
# To do: For more consistent results, we can control the random seed while selecting inspected_id.
if initial_masking == "importer":
if self.args.data in ['synthetic-k']:
importer_id = '신고인부호'
else:
importer_id = 'importer.id'
inspected_id = {}
train_id = list(set(df[importer_id]))
inspected_id[ir_init] = np.random.choice(train_id, size= int(len(train_id) * ir_init / 100), replace=False)
d = {}
for id in train_id:
d[id] = float('nan')
for id in inspected_id[ir_init]:
d[id] = 1
df['illicit'] = df[importer_id].apply(lambda x: d[x]) * df['illicit']
df['revenue'] = df[importer_id].apply(lambda x: d[x]) * df['revenue']
elif initial_masking == "random":
sampled_idx = list(df.sample(frac=1 - ir_init / 100, replace=False).index)
df.loc[sampled_idx,"illicit"] = df.loc[sampled_idx,"illicit"]* np.nan
df.loc[sampled_idx,"revenue"] = df.loc[sampled_idx,"revenue"]* np.nan
else:
return df
print('After masking:\n', df['illicit'].value_counts())
return df
@timer_func
def split(self, train_start_day, valid_start_day, test_start_day, test_end_day, valid_length, test_length, args):
""" Split data into train / valid / test """
self.train_start_day = train_start_day.strftime('%y-%m-%d')
self.valid_start_day = valid_start_day.strftime('%y-%m-%d')
self.test_start_day = test_start_day.strftime('%y-%m-%d')
self.test_end_day = test_end_day.strftime('%y-%m-%d')
self.valid_length = valid_length
self.test_length = test_length
self.args = args
self.train = self.df[(self.df["sgd.date"] >= self.train_start_day) & (self.df["sgd.date"] < self.valid_start_day)]
self.valid = self.df[(self.df["sgd.date"] >= self.valid_start_day) & (self.df["sgd.date"] < self.test_start_day)]
self.test = self.df[(self.df["sgd.date"] >= self.test_start_day) & (self.df["sgd.date"] < self.test_end_day)]
if len(self.train) == 0:
print('Training data is unavailable - Set the parameter \'train_from\' according to the dataset you are using and running main.py')
sys.exit()
# Intentionally masking datasets to simulate partially labeled scenario, note that our dataset is 100% inspected.
# If your dataset is partially labeled already, does not need this procedure.
if args.data in ['synthetic', 'synthetic-k', 'real-n', 'real-m', 'real-t', 'real-c']:
self.train = self.mask_labels(self.train, args.initial_inspection_rate, args.initial_masking)
self.train_lab = self.train[self.train['illicit'].notna()]
self.valid_lab = self.valid[self.valid['illicit'].notna()]
if self.args.semi_supervised == 1:
self.train_unlab = self.train[self.train['illicit'].isna()]
self.valid_unlab = self.valid[self.valid['illicit'].isna()]
# save labels
self.train_cls_label = self.train_lab["illicit"].values
self.valid_cls_label = self.valid_lab["illicit"].values
self.test_cls_label = self.test["illicit"].values
self.train_reg_label = self.train_lab['revenue'].values
self.valid_reg_label = self.valid_lab['revenue'].values
self.test_reg_label = self.test['revenue'].values
# Normalize revenue labels for later model fitting
self.norm_revenue_train, self.norm_revenue_valid, self.norm_revenue_test = np.log(self.train_reg_label+1), np.log(self.valid_reg_label+1), np.log(self.test_reg_label+1)
global_max = max(self.norm_revenue_train)
self.norm_revenue_train = self.norm_revenue_train/global_max
self.norm_revenue_valid = self.norm_revenue_valid/global_max
self.norm_revenue_test = self.norm_revenue_test/global_max
self.train_valid_lab = pd.concat([self.train_lab, self.valid_lab])
if self.args.semi_supervised == 1:
self.train_valid_unlab = pd.concat([self.train_unlab, self.valid_unlab])
@timer_func
def find_risk_profile(self, df: pd.DataFrame, feature: str, topk_ratio: float, adj: float, option: str) -> list or dict:
"""
dtype feature: str
dtype topk_ratio: float (range: 0-1)
dtype adj: float (to modify the mean)
dtype option: str ('topk', 'ratio')
rtype: list(option='topk') or dict(option='ratio')
The option topk is usually better than the ratio because of overfitting.
"""
# Top-k suspicious item flagging
if option == 'topk':
total_cnt = df.groupby([feature])['illicit']
nrisky_profile = int(topk_ratio*len(total_cnt))+1
# prob_illicit = total_cnt.mean() # Simple mean
adj_prob_illicit = total_cnt.sum() / (total_cnt.count()+adj) # Smoothed mean
return list(adj_prob_illicit.sort_values(ascending=False).head(nrisky_profile).index)
# Illicit-ratio encoding (Mean target encoding)
# Refer: http://docs.h2o.ai/h2o/latest-stable/h2o-docs/data-munging/target-encoding.html
# Refer: https://towardsdatascience.com/why-you-should-try-mean-encoding-17057262cd0
elif option == 'ratio':
# For target encoding, we just use 70% of train data to avoid overfitting (otherwise, test AUC drops significantly)
total_cnt = df.sample(frac=0.7).groupby([feature])['illicit']
nrisky_profile = int(topk_ratio*len(total_cnt))+1
# prob_illicit = total_cnt.mean() # Simple mean
adj_prob_illicit = total_cnt.sum() / (total_cnt.count()+adj) # Smoothed mean
return adj_prob_illicit.to_dict()
@timer_func
def tag_risky_profiles(self, df: pd.DataFrame, profile: str, profiles: list or dict, option: str) -> pd.DataFrame:
"""
dtype df: dataframe
dtype profile: str
dtype profiles: list(option='topk') or dictionary(option='ratio')
dtype option: str ('topk', 'ratio')
rtype: dataframe
The option topk is usually better than the ratio because of overfitting.
"""
if len(df) == 0:
return df
# Top-k suspicious item flagging
if option == 'topk':
d = defaultdict(int)
for id in profiles:
d[id] = 1
# print(list(islice(d.items(), 10))) # For debugging
df.loc[:, 'RiskH.'+profile] = df[profile].apply(lambda x: d[x])
# Illicit-ratio encoding
elif option == 'ratio':
# overall_ratio_train = 0
overall_ratio_train = np.mean(self.train_lab.illicit) # When scripting, saving it as a class variable is clearer.
df.loc[:, 'RiskH.'+profile] = df[profile].apply(lambda x: profiles.get(x), overall_ratio_train)
return df
@timer_func
def preprocess(self, df: pd.DataFrame) -> pd.DataFrame:
"""
dtype df: dataframe
rtype df: dataframe
"""
if len(df) == 0:
return df
if self.args.data in ['synthetic', 'real-n', 'real-m', 'real-t', 'real-c']:
df = df.dropna(subset=['cif.value', 'total.taxes', 'quantity'])
df.loc[:, ['cif.value', 'total.taxes', 'quantity', 'gross.weight']] = np.log(df.loc[:, ['cif.value', 'total.taxes', 'quantity', 'gross.weight']] + 1)
df.loc[:, 'Unitprice'] = df['cif.value']/df['quantity']
df.loc[:, 'WUnitprice'] = df['cif.value']/df['gross.weight']
df.loc[:, 'TaxRatio'] = df['total.taxes'] / df['cif.value']
df.loc[:, 'TaxUnitquantity'] = df['total.taxes'] / df['quantity']
df.loc[:, 'HS6'] = df['tariff.code'] // 10000
df.loc[:, 'HS4'] = df['HS6'] // 100
df.loc[:, 'HS2'] = df['HS4'] // 100
# candFeaturesCombine = ['office.id','importer.id','country','HS6','declarant.id']
# for subset in combinations(candFeaturesCombine, 2):
# merge_attributes(df, *subset)
# for subset in combinations(candFeaturesCombine, 3):
# merge_attributes(df, *subset)
merge_attributes(df, 'office.id','importer.id')
merge_attributes(df, 'office.id','HS6')
merge_attributes(df, 'office.id','country')
merge_attributes(df, 'HS6','country')
df['sgd.date'] = df['sgd.date'].apply(lambda x: dt.strptime(x, '%y-%m-%d'))
df.loc[:, 'SGD.DayofYear'] = df['sgd.date'].dt.dayofyear
df.loc[:, 'SGD.WeekofYear'] = df['sgd.date'].dt.weekofyear
df.loc[:, 'SGD.MonthofYear'] = df['sgd.date'].dt.month
elif self.args.data in ['synthetic-k', 'synthetic-k-partial', 'real-k']:
df.loc[:, 'WUnitprice'] = df['과세가격원화금액']/df['신고중량(KG)']
df.loc[:, 'HS6'] = df['HS10단위부호'] // 10000
df.loc[:, 'HS4'] = df['HS6'] // 100
df.loc[:, 'HS2'] = df['HS4'] // 100
return df
@timer_func
def featureEngineering(self):
""" Feature engineering, """
try:
self.offset = self.test.index[0]
except IndexError:
pass
# If sampler requires preprocessing for unlabeled data
if self.args.semi_supervised == 1:
# Preprocessing
self.train_lab = self.preprocess(self.train_lab)
self.train_unlab = self.preprocess(self.train_unlab)
self.valid_lab = self.preprocess(self.valid_lab)
self.valid_unlab = self.preprocess(self.valid_unlab)
self.test = self.preprocess(self.test)
# Add a few more risky profiles
risk_profiles = {}
profile_candidates = self.profile_candidates + [col for col in self.train_lab.columns if '&' in col]
for profile in profile_candidates:
option = self.args.risk_profile # topk or ratio
risk_profiles[profile] = self.find_risk_profile(self.train_lab, profile, 0.1, 10, option=option)
self.train_lab = self.tag_risky_profiles(self.train_lab, profile, risk_profiles[profile], option=option)
self.train_unlab = self.tag_risky_profiles(self.train_unlab, profile, risk_profiles[profile], option=option)
self.valid_lab = self.tag_risky_profiles(self.valid_lab, profile, risk_profiles[profile], option=option)
self.valid_unlab = self.tag_risky_profiles(self.valid_unlab, profile, risk_profiles[profile], option=option)
self.test = self.tag_risky_profiles(self.test, profile, risk_profiles[profile], option=option)
# If sampler does not require preprocessing for unlabeled data
elif self.args.semi_supervised == 0:
self.train_lab = self.preprocess(self.train_lab)
self.valid_lab = self.preprocess(self.valid_lab)
self.test = self.preprocess(self.test)
risk_profiles = {}
profile_candidates = self.profile_candidates + [col for col in self.train_lab.columns if '&' in col]
for profile in profile_candidates:
option = self.args.risk_profile # topk or ratio
risk_profiles[profile] = self.find_risk_profile(self.train_lab, profile, 0.1, 10, option=option)
self.train_lab = self.tag_risky_profiles(self.train_lab, profile, risk_profiles[profile], option=option)
self.valid_lab = self.tag_risky_profiles(self.valid_lab, profile, risk_profiles[profile], option=option)
self.test = self.tag_risky_profiles(self.test, profile, risk_profiles[profile], option=option)
# Features to use in a classifier
numeric_variables = ['cif.value', 'total.taxes', 'gross.weight', 'quantity', 'Unitprice', 'WUnitprice', 'TaxRatio', 'TaxUnitquantity', 'tariff.code', 'HS6', 'HS4', 'HS2', 'SGD.DayofYear', 'SGD.WeekofYear', 'SGD.MonthofYear']
flagged_variables = [col for col in self.train_lab.columns if 'RiskH' in col]
if self.args.data in ['synthetic-k', 'synthetic-k-partial', 'real-k']:
numeric_variables = ['신고중량(KG)', '관세율']
self.column_to_use = numeric_variables + flagged_variables
self.X_train_lab = self.train_lab[self.column_to_use].values
if not self.valid_lab.empty:
self.X_valid_lab = self.valid_lab[self.column_to_use].values
else:
self.X_valid_lab = np.asarray([])
if not self.test.empty:
self.X_test = self.test[self.column_to_use].values
else:
self.X_test = np.asarray([])
if self.args.semi_supervised == 1:
if not self.train_unlab.empty:
self.X_train_unlab = self.train_unlab[self.column_to_use].values
else:
self.X_train_unlab = np.asarray([])
if not self.valid_unlab.empty:
self.X_valid_unlab = self.valid_unlab[self.column_to_use].values
else:
self.X_valid_unlab = np.asarray([])
if self.args.semi_supervised == 1:
print(f'Data size - Train labeled: {self.train_lab.shape}, Train unlabeled: {self.train_unlab.shape}, Valid labeled: {self.valid_lab.shape}, Valid unlabeled: {self.valid_unlab.shape}, Test: {self.test.shape}')
elif self.args.semi_supervised == 0:
print(f'Data size - Train labeled: {self.train_lab.shape}, Valid labeled: {self.valid_lab.shape}, Test: {self.test.shape}')
# impute nan
self.X_train_lab = np.nan_to_num(self.X_train_lab, 0)
self.X_valid_lab = np.nan_to_num(self.X_valid_lab, 0)
self.X_test = np.nan_to_num(self.X_test, 0)
if self.args.semi_supervised == 1:
self.X_train_unlab = np.nan_to_num(self.X_train_unlab, 0)
self.X_valid_unlab = np.nan_to_num(self.X_valid_unlab, 0)
# from collections import Counter
# print("Checking illicit rate: ")
# cnt = Counter(self.train_cls_label)
# print("Training:",round(cnt[1]/(cnt[0]+cnt[1]), 3))
# cnt = Counter(self.valid_cls_label)
# try:
# print("Validation:",round(cnt[1]/(cnt[0]+cnt[1]), 3))
# except ZeroDivisionError:
# print("No validation set")
# cnt = Counter(self.test_cls_label)
# try:
# print("Testing:", round(cnt[1]/(cnt[0]+cnt[1]), 3))
# except ZeroDivisionError:
# print("No test set")
self.dftrainx_lab = | pd.DataFrame(self.X_train_lab,columns=self.column_to_use) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
High level functions for multiresolution analysis of spectrograms
Code licensed under both GPL and BSD licenses
Authors: <NAME> <<EMAIL>>
<NAME> <<EMAIL>>
"""
# Load required modules
from __future__ import print_function
import numpy as np
import pandas as pd
from scipy import ndimage as ndi
import itertools as it
import matplotlib.pyplot as plt
from skimage.io import imsave
from skimage import transform, measure
from scipy import ndimage
from maad import sound
from skimage.filters import gaussian
from maad.util import format_rois, rois_to_imblobs, normalize_2d
def _sigma_prefactor(bandwidth):
"""
Function from skimage.
Parameters
----------
Returns
-------
"""
b = bandwidth
# See http://www.cs.rug.nl/~imaging/simplecell.html
return 1.0 / np.pi * np.sqrt(np.log(2) / 2.0) * \
(2.0 ** b + 1) / (2.0 ** b - 1)
def gabor_kernel_nodc(frequency, theta=0, bandwidth=1, gamma=1,
n_stds=3, offset=0):
"""
Return complex 2D Gabor filter kernel with no DC offset.
This function is a modification of the gabor_kernel function of scikit-image
Gabor kernel is a Gaussian kernel modulated by a complex harmonic function.
Harmonic function consists of an imaginary sine function and a real
cosine function. Spatial frequency is inversely proportional to the
wavelength of the harmonic and to the standard deviation of a Gaussian
kernel. The bandwidth is also inversely proportional to the standard
deviation.
Parameters
----------
frequency : float
Spatial frequency of the harmonic function. Specified in pixels.
theta : float, optional
Orientation in radians. If 0, the harmonic is in the x-direction.
bandwidth : float, optional
The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`
and `sigma_y` will decrease with increasing frequency. This value is
ignored if `sigma_x` and `sigma_y` are set by the user.
gamma : float, optional
gamma changes the aspect ratio (ellipsoidal) of the gabor filter.
By default, gamma=1 which means no aspect ratio (circle)
if gamma>1, the filter is larger (x-dir)
if gamma<1, the filter is higher (y-dir)
This value is ignored if `sigma_x` and `sigma_y` are set by the user.
sigma_x, sigma_y : float, optional
Standard deviation in x- and y-directions. These directions apply to
the kernel *before* rotation. If `theta = pi/2`, then the kernel is
rotated 90 degrees so that `sigma_x` controls the *vertical* direction.
n_stds : scalar, optional
The linear size of the kernel is n_stds (3 by default) standard
deviations
offset : float, optional
Phase offset of harmonic function in radians.
Returns
-------
g_nodc : complex 2d array
A single gabor kernel (complex) with no DC offset
References
----------
.. [1] http://en.wikipedia.org/wiki/Gabor_filter
.. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf
Examples
--------
>>> from skimage.filters import gabor_kernel
>>> from skimage import io
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> gk = gabor_kernel(frequency=0.2)
>>> plt.figure() # doctest: +SKIP
>>> io.imshow(gk.real) # doctest: +SKIP
>>> io.show() # doctest: +SKIP
>>> # more ripples (equivalent to increasing the size of the
>>> # Gaussian spread)
>>> gk = gabor_kernel(frequency=0.2, bandwidth=0.1)
>>> plt.figure() # doctest: +SKIP
>>> io.imshow(gk.real) # doctest: +SKIP
>>> io.show() # doctest: +SKIP
"""
# set gaussian parameters
b = bandwidth
sigma_pref = 1.0 / np.pi * np.sqrt(np.log(2) / 2.0) * (2.0 ** b + 1) / (2.0 ** b - 1)
sigma_y = sigma_pref / frequency
sigma_x = sigma_y/gamma
# meshgrid
x0 = np.ceil(max(np.abs(n_stds * sigma_x * np.cos(theta)),
np.abs(n_stds * sigma_y * np.sin(theta)), 1))
y0 = np.ceil(max(np.abs(n_stds * sigma_y * np.cos(theta)),
np.abs(n_stds * sigma_x * np.sin(theta)), 1))
y, x = np.mgrid[-y0:y0 + 1, -x0:x0 + 1]
# rotation matrix
rotx = x * np.cos(theta) + y * np.sin(theta)
roty = -x * np.sin(theta) + y * np.cos(theta)
# combine gambor and
g = np.zeros(y.shape, dtype=np.complex)
g[:] = np.exp(-0.5 * (rotx ** 2 / sigma_x ** 2 + roty ** 2 / sigma_y ** 2))
g /= 2 * np.pi * sigma_x * sigma_y # gaussian envelope
oscil = np.exp(1j * (2 * np.pi * frequency * rotx + offset)) # harmonic / oscilatory function
g_dc = g*oscil
# remove dc component by subtracting the envelope weighted by K
K = np.sum(g_dc)/np.sum(g)
g_nodc = g_dc - K*g
return g_nodc
def _plot_filter_bank(kernels, frequency, ntheta, bandwidth, gamma, **kwargs):
"""
Display filter bank
Parameters
----------
kernels: list
List of kernels from filter_bank_2d_nodc()
frequency: 1d ndarray of scalars
Spatial frequencies used to built the Gabor filters. Values should be
in [0;1]
ntheta: int
Number of angular steps between 0° to 90°
bandwidth: scalar, optional, default is 1
This parameter modifies the frequency of the Gabor filter
gamma: scalar, optional, default is 1
This parameter change the Gaussian window that modulates the continuous
sine.
1 => same gaussian window in x and y direction (circle)
<1 => elongation of the filter size in the y direction (elipsoid)
>1 => reduction of the filter size in the y direction (elipsoid)
**kwargs, optional. This parameter is used by plt.plot and savefig functions
figsize : tuple of integers, optional, default: (13,13)
width, height in inches.
dpi : integer, optional
Dot per inch.
For printed version, choose high dpi (i.e. dpi=300) => slow
For screen version, choose low dpi (i.e. dpi=96) => fast
interpolation : string, optional, default is 'nearest'
Pixels interpolation
aspect : string, optional, default is 'auto'
fontsize : scalar, optional, default is 8/0.22*hmax*100/dpi)
size of the font use to print the parameters of each filter
... and more, see matplotlib
Returns
-------
fig : Figure
The Figure instance
ax : Axis
The Axis instance
"""
params = []
for theta in range(ntheta):
theta = theta/ntheta * np.pi
for freq in frequency:
params.append([freq, theta, bandwidth, gamma])
w = []
h = []
for kernel in kernels:
ylen, xlen = kernel.shape
w.append(xlen)
h.append(ylen)
plt.gray()
fig = plt.figure()
dpi =kwargs.pop('dpi',fig.get_dpi())
figsize =kwargs.pop('figsize',(13,13))
interpolation =kwargs.pop('interpolation','nearest')
aspect =kwargs.pop('aspect','auto')
fig.set_figwidth(figsize[0])
fig.set_figheight(figsize[1])
w = np.asarray(w)/dpi
h = np.asarray(h)/dpi
wmax = np.max(w)*1.25
hmax = np.max(h)*1.05
fontsize =kwargs.pop('fontsize',8/0.22*hmax*100/dpi)
params_label = []
for param in params:
params_label.append('theta=%d f=%.2f \n bandwidth=%.1f \n gamma=%.1f'
% (param[1] * 180 / np.pi, param[0], param[2],
param[3]))
n = len(frequency)
for ii, kernel in enumerate(kernels):
ax = plt.axes([(ii%n)*wmax + (wmax-w[ii])/2,(ii//n)*hmax + (hmax-h[ii])/2,w[ii],h[ii]])
ax.imshow(np.real(kernel),interpolation=interpolation, aspect =aspect, **kwargs)
ax.set_xticks([])
ax.set_yticks([])
ax.set_ylabel(params_label[ii],fontsize=fontsize)
ax.axis('tight')
plt.show()
return ax, fig
def _plot_filter_results(im_ref, im_list, kernels, params, m, n):
"""
Display the result after filtering
Parameters
----------
im_ref : 2D array
Reference image
im_list : list
List of filtered images
kernels: list
List of kernels from filter_bank_2d_nodc()
m: int
number of columns
n: int
number of rows
Returns
-------
Returns
-------
fig : Figure
The Figure instance
ax : Axis
The Axis instance
"""
ncols = m
nrows = n
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(15, 5))
plt.gray()
fig.suptitle('Image responses for Gabor filter kernels', fontsize=12)
axes[0][0].axis('off')
# Plot original images
axes[0][1].imshow(im_ref, origin='lower')
axes[0][1].set_title('spectrogram', fontsize=9)
axes[0][1].axis('off')
plt.tight_layout
params_label = []
for param in params:
params_label.append('theta=%d,\nf=%.2f' % (param[1] * 180 / np.pi, param[0]))
ii = 0
for ax_row in axes[1:]:
plotGabor = True
for ax in ax_row:
if plotGabor == True:
# Plot Gabor kernel
print(params_label[ii])
ax.imshow(np.real(kernels[ii]), interpolation='nearest')
ax.set_ylabel(params_label[ii], fontsize=7)
ax.set_xticks([])
ax.set_yticks([])
plotGabor = False
else:
im_filtered = im_list[ii]
ax.imshow(im_filtered, origin='lower')
ax.axis('off')
plotGabor = True
ii=ii+1
plt.show()
return ax, fig
def filter_mag(im, kernel):
"""
Normalizes the image and computes im and real part of filter response using
the complex kernel and the modulus operation
Parameters
----------
im: 2D array
Input image to process
kernel: 2D array
Complex kernel (or filter)
Returns
-------
im_out: Modulus operand on filtered image
"""
im = (im - im.mean()) / im.std()
im_out = np.sqrt(ndi.convolve(im, np.real(kernel), mode='reflect')**2 +
ndi.convolve(im, np.imag(kernel), mode='reflect')**2)
return im_out
def filter_multires(im_in, kernels, npyr=4, rescale=True):
"""
Computes 2D wavelet coefficients at multiple octaves/pyramids
Parameters
----------
im_in: list of 2D arrays
List of input images to process
kernels: list of 2D arrays
List of 2D wavelets to filter the images
npyr: int
Number of pyramids to compute
rescale: boolean
Indicates if the reduced images should be rescaled
Returns
-------
im_out: list of 2D arrays
List of images filtered by each 2D kernel
"""
# Downscale image using gaussian pyramid
if npyr<2:
print('Warning: npyr should be int and larger than 2 for multiresolution')
im_pyr = tuple(transform.pyramid_gaussian(im_in, downscale=2,
max_layer=1, multichannel=False))
else:
im_pyr = tuple(transform.pyramid_gaussian(im_in, downscale=2,
max_layer=npyr-1, multichannel=False))
# filter 2d array at multiple resolutions using gabor kernels
im_filt=[]
for im in im_pyr: # for each pyramid
for kernel, param in kernels: # for each kernel
im_filt.append(filter_mag(im, kernel)) # magnitude response of filter
# Rescale image using gaussian pyramid
if rescale:
dims_raw = im_in.shape
im_out=[]
for im in im_filt:
ratio = np.array(dims_raw)/np.array(im.shape)
if ratio[0] > 1:
im = transform.rescale(im, scale = ratio, mode='reflect',
multichannel=False, anti_aliasing=True)
else:
pass
im_out.append(im)
else:
pass
return im_out
def filter_bank_2d_nodc(frequency, ntheta, bandwidth=1, gamma=1, display=False,
savefig=None, **kwargs):
"""
Build a Gabor filter bank with no offset component
Parameters
----------
frequency: 1d ndarray of scalars
Spatial frequencies used to built the Gabor filters. Values should be
in [0;1]
ntheta: int
Number of angular steps between 0° to 90°
bandwidth: scalar, optional, default is 1
This parameter modifies the frequency of the Gabor filter
gamma: scalar, optional, default is 1
This parameter change the Gaussian window that modulates the continuous
sine.
1 => same gaussian window in x and y direction (circle)
<1 => elongation of the filter size in the y direction (elipsoid)
>1 => reduction of the filter size in the y direction (elipsoid)
Returns
-------
params: 2d structured array
Parameters used to calculate 2D gabor kernels.
Params array has 4 fields (theta, freq, bandwidth, gamma)
kernels: 2d ndarray of scalars
Gabor kernels
"""
theta = np.arange(ntheta)
theta = theta / ntheta * np.pi
params=[i for i in it.product(theta,frequency)]
kernels = []
for param in params:
kernel = gabor_kernel_nodc(frequency=param[1],
theta=param[0],
bandwidth=bandwidth,
gamma=gamma,
offset=0,
n_stds=3)
kernels.append((kernel, param))
if display:
_, fig = _plot_filter_bank(kernels, frequency, ntheta, bandwidth,
gamma, **kwargs)
if savefig is not None :
dpi =kwargs.pop('dpi',96)
format=kwargs.pop('format','png')
filename = savefig+'_filter_bank2D.'+format
fig.savefig(filename, bbox_inches='tight', dpi=dpi, format=format,
**kwargs)
return params, kernels
def shape_features(im, im_blobs=None, resolution='low', opt_shape=None):
"""
Computes shape of 2D signal (image or spectrogram) at multiple resolutions
using 2D Gabor filters
Parameters
----------
im: 2D array
Input image to process
im_blobs: 2D array, optional
Optional binary array with '1' on the region of interest and '0' otherwise
opt: dictionary
options for the filter bank (kbank_opt) and the number of scales (npyr)
Returns
-------
shape: 1D array
Shape coeficients of each filter
params: 2D numpy structured array
Corresponding parameters of the 2D fileters used to calculate the
shape coefficient. Params has 4 fields (theta, freq, pyr_level, scale)
bbox:
If im_blobs provided, corresponding bounding box
"""
# unpack settings
opt_shape = opt_shape_presets(resolution, opt_shape)
npyr = opt_shape['npyr']
# build filterbank
params, kernels = filter_bank_2d_nodc(ntheta=opt_shape['ntheta'],
bandwidth=opt_shape['bandwidth'],
frequency=opt_shape['frequency'],
gamma=opt_shape['gamma'])
# filter images
im_rs = filter_multires(im, kernels, npyr, rescale=True)
# Get mean intensity
shape = []
if im_blobs is None:
for im in im_rs:
shape.append(np.mean(im))
rois_bbox=None
shape = [shape] # for dataframe formating below
else:
for im in im_rs:
labels = measure.label(im_blobs)
rprops = measure.regionprops(labels, intensity_image=im)
roi_mean = [roi.mean_intensity for roi in rprops]
shape.append(roi_mean)
rois_bbox = [roi.bbox for roi in rprops]
shape = list(map(list, zip(*shape))) # transpose shape
# organise parameters
params = np.asarray(params)
orient = params[:,0]*180/np.pi
orient = orient.tolist()*npyr
pyr_level = np.sort(np.arange(npyr).tolist()*len(params))+1
freq = params[:,1].tolist()*npyr
#params_multires = np.vstack((np.asarray(orient), freq, pyr_level))
nparams = len(params)*npyr
params_multires = np.zeros(nparams, dtype={'names':('theta', 'freq', 'pyr_level','scale'),
'formats':('f8', 'f8', 'f8','f8')})
params_multires['theta'] = orient
params_multires['freq'] = freq
params_multires['scale'] = 1/np.asarray(freq)
params_multires['pyr_level'] = pyr_level
params_multires = pd.DataFrame(params_multires)
# format shape into dataframe
cols=['shp_' + str(idx).zfill(3) for idx in range(1,len(shape[0])+1)]
shape = pd.DataFrame(data=np.asarray(shape),columns=cols)
# format rois into dataframe
rois_bbox = pd.DataFrame(rois_bbox, columns=['min_y','min_x',
'max_y','max_x'])
# compensate half-open interval of bbox from skimage
rois_bbox.max_y = rois_bbox.max_y - 1
rois_bbox.max_x = rois_bbox.max_x - 1
return rois_bbox, params_multires, shape
def centroid(im, im_blobs=None):
"""
Computes intensity centroid of the 2D signal (usually time-frequency representation)
along a margin, frequency (0) or time (1).
Parameters
----------
im: 2D array
Input image to process
im_blobs: 2D array, optional
Optional binary array with '1' on the region of interest and '0' otherwise
margin: 0 or 1
Margin of the centroid, frequency=1, time=0
Returns
-------
centroid: 1D array
centroid of image. If im_blobs provided, centroid for each region of interest
"""
centroid=[]
rois_bbox=[]
if im_blobs is None:
centroid = ndimage.center_of_mass(im)
else:
labels = measure.label(im_blobs)
rprops = measure.regionprops(labels, intensity_image=im)
centroid = [roi.weighted_centroid for roi in rprops]
rois_bbox = [roi.bbox for roi in rprops]
# variables to dataframes
centroid = | pd.DataFrame(centroid, columns=['y', 'x']) | pandas.DataFrame |
from __future__ import print_function, unicode_literals
from PyInquirer import style_from_dict, Token, prompt, Separator
from pprint import pprint
import os
import tarfile
from art import *
from colorama import Fore, Back, Style
from colorama import init
import re
import pandas as pd
import openpyxl
from itertools import groupby
import matplotlib.pyplot as plt
from pathlib import Path
import shutil
import easygui
#################################################################
init(convert=True)
#pint program name
tprint('<<<Tar Analysis 2.0>>>')
#ask for folder name
print(Fore.CYAN)
#name = input("Please enter the Tar folder name:")
print(Style.RESET_ALL)
name = easygui.fileopenbox()
name = name[-12:-7]
##thammarak get home directory
home = str(Path.home())
download_path = os.path.join(home, "Downloads\\")
#so we dont have to tipe .tar.gz
namet = name + ".tar.gz"
path = os.path.join(download_path, namet)
nameU = name+'unzip'
#open and unzip tar folder
tar = tarfile.open(path,"r:gz")
tar.extractall()
tar.close()
os.mkdir(nameU)
#### to unzip files inside zip folder:)
for i in os.listdir(name):
foldername = os.path.join(name, i)
os.makedirs(i)
y = tarfile.open(foldername,"r:gz")
y.extractall(i)
y.close()
shutil.move(i,nameU)
################################################################
#CLI to ask what errors to look for
style = style_from_dict({
Token.Separator: '#cc5454',
Token.QuestionMark: '#673ab7',
Token.Selected: '#cc5454', # default
Token.Pointer: '#673ab7',
Token.Instruction: '', # default
Token.Answer: '#f44336',
Token.Question: '',
})
questions = [
{
'type': 'checkbox',
'message': 'Please select errors to look for',
'name' : 'variables',
'choices': [
Separator('= Errors to look for ='),
{
'name': 'FAILED VALIDATION!!',
'checked': True
},
{
'name': 'FAILED VALIDATION while executing command',
},
{
'name': 'FAILED VALIDATION - Reported',
},
{
'name': 'FAILED - COMMAND TIMED OUT',
},
{
'name': 'Test(s) failed:',
},
{
'name': '***err',
},
{
'name': 'FAIL**',
},
{
'name': 'err-disable',
},
{
'name': 'Write your own',
},
{
'name': 'RegEx query',
},
{
'name': '->Make Excel report',
},
Separator('= Enter a Command and print switch log for it ='),
{
'name': 'Enter Command :',
},
],
'validate': lambda answer: 'You must choose at least one error to look for.' \
if len(answer) == 0 else True
}
]
answers = prompt(questions, style=style)
###################################################################
#reads for new entry "write yout own"
if 'Write your own' in answers["variables"]:
own = input("Please enter the error you are looking for: ")
answers["variables"] = [own if i=='Write your own' else i for i in answers["variables"]]
###################################################################
#reads for new entry "write yout own"
if 'RegEx query' in answers["variables"]:
regex = input("Please enter the regex pattern to search for: ")
regex = r"{}".format(regex)
answers["variables"] = [regex if i=='RegEx query' else i for i in answers["variables"]]
regex_flag = True
else: regex_flag = False
###################################################################
#opens all the files and writes line by line in log.txt document
logs = open('logs.txt',"w+")
for path,subdirs,files in os.walk(nameU):
for i in files:
filename = os.path.join(path, i)
### writing to text file
with open(filename) as infile:
for line in infile:
logs.write(line)
####################################################################
#reads text file to find corners
cornerCount1 = 0
corner1=[]
with open("logs.txt") as L:
for line in L:
if 'Corner Name :' in line and 'PST' not in line and 'PDT' not in line:
cornerCount1 +=1
###################################################################
#reads for new entry "Enter Command"
cmdDo = 0
if 'Enter Command :' in answers["variables"]:
cmd = input("please enter Command to output log:")
print ('There are ' ,cornerCount1,'corners, Example for entry : 1 2 3 ')
cornerPrint1 = input("Write the number of corner separated by a space(type 0 for all):")
cornerPrint = cornerPrint1.split()
#makes list int
for i in range(0, len(cornerPrint)):
cornerPrint[i] = int(cornerPrint[i])
##looks for next comand to know where to stop
with open("logs.txt") as C:
for line in C:
line = line.split(",",1)
if cmdDo == 1:
cmdEnd = line[0]
cmdDo = 0
if cmd in line:
cmdDo = 1
###################################################################
#look up for errors line by line
#varibles
count = 0
corner = ''
fails = []
switch = []
switchNumber = 'first777#$'
#opens text file to read
with open("logs.txt") as L:
for line in L:
# look for corner
if 'Corner Name :' in line and 'PST' not in line and 'PDT' not in line and line not in corner:
corner = line
count = 0
print(Fore.BLACK)
print (Back.WHITE+' > '+corner)
print(Style.RESET_ALL)
#Looks for Testcase number
if 'TESTCASE START -' in line :
testn = line
# using re.py to search for switch number
if 'TESTCASE START -' in line and switchNumber not in line:
count = 0
switchNumber = re.search(r'\w\w\w\w\w\w\d(\d)?', line).group(0)
print (Fore.GREEN+(switchNumber))
print(Style.RESET_ALL)
#adding to list with errors
for i in answers["variables"]:
if i in line and i not in fails:
fails.append(line)
elif regex_flag and re.search(i, line):
fails.append(line)
if len(fails) > 0 and 'TESTCASE END -' in line:
count += 1
print(Fore.YELLOW +str(count)+"--"+(testn))
print(Style.RESET_ALL)
print (*fails, sep = "\n")
#clearing error list
if 'TESTCASE END -' in line:
fails.clear()
#################################################################
#looking for command output log
cmdLog = []
cmdStart = 10000000
ii = 0
full = 0
cornerCount = 0
switchNumber1 = 'first777#$'
if 'Enter Command :' in answers["variables"]:
corner = ''
cmd = cmd + ' '
print(Fore.BLACK)
print (Back.RED+'Command Log Output')
print(Style.RESET_ALL)
with open("logs.txt") as B:
for line in B:
ii += 1
# look for corner
if 'Corner Name :' in line and 'PST' not in line and 'PDT' not in line and line not in corner:
corner = line
print(Fore.BLACK)
print (Back.WHITE+' > '+corner)
print(Style.RESET_ALL)
cornerCount += 1
#Looks for Testcase number
if 'TESTCASE START -' in line :
testn = line
# using re.py to search for switch number
if 'TESTCASE START -' in line and switchNumber1 not in line:
switchNumber1 = re.search(r'\w\w\w\w\w\w\d(\d)?', line).group(0)
print (Fore.GREEN+(switchNumber1))
print(Style.RESET_ALL)
# looking for comand output
if line.startswith(cmd) and line not in cmdLog:
cmdLog.append(line)
cmdStart = ii
cmdStop = 10000000
full = 1
if ii >= cmdStart and ii < cmdStop and line not in cmdLog:
cmdLog.append(line)
if cmdEnd in line or 'Done executing all the given commands' in line:
cmdStop = ii
#print command output
if 'TESTCASE END' and full == 1:
for i in cornerPrint:
if i == cornerCount or i == 0:
cmdLog = list(filter(None, cmdLog))
print(*cmdLog, sep = "\n")
cmdLog.clear()
full = 0
#################################################################
#this part is to make an excel report on test
##variables
testName = []
jobID = []
nameEx = name + ".xlsx"
one = []
two = []
three = []
testExcel = []
switchExcel = 'first777#$'
failsE = []
cornerE = ''
####
four = []
fourCorner = ''
fourSwitch = 'first777#$'
fourFails = 0
do = 0
sfp = []
sfpee = str("sfpee ")
sfpSwitch = 'first777#$'
InfoSwitch = 'first777#$'
sfpCorner = 0
sfpCount2 = 0
switch_count = []
countE=0
#makes first sheet in excel - Test info
if '->Make Excel report' in answers["variables"]:
with open("logs.txt") as E:
for line in E:
if "Starting Job Id " in line and len(one)<1:
one.append(line)
if "job_name" in line and len(one)<2:
one.append(line)
if 'TESTCASE START -' in line and InfoSwitch not in line:
InfoSwitch = re.search(r'\w\w\w\w\w\w\d(\d)?', line).group(0)
InfoSwitch = '---------------------->' + InfoSwitch
if InfoSwitch not in one:
one.append(InfoSwitch)
if "MODEL_NUM" in line and line not in one:
one.append(line)
if "MOTHERBOARD_SERIAL_NUM" in line and line not in one:
one.append(line)
if "SYSTEM_SERIAL_NUM" in line and line not in one:
one.append(line)
# look for corner
############ add sfp info to SFP sheet in excel
if 'TESTCASE START -' in line and sfpSwitch not in line:
sfpSwitch = re.search(r'\w\w\w\w\w\w\d(\d)?', line).group(0)
if sfpSwitch not in switch_count:
switch_count.append(sfpSwitch)
if '{sfpee}' in line:
sfpCount2 += 1
if sfpCount2 <= len(switch_count):
sfp.append('*************************************************')
sfp.append(sfpSwitch)
sfp.append('*************************************************')
if "EEPROM in port" in line and sfpCount2 <= len(switch_count):
sfp.append(line)
if " Transceiver" in line and sfpCount2 <= len(switch_count):
sfp.append(line)
if " Vendor PN" in line and sfpCount2 <= len(switch_count):
sfp.append(line)
if " Vendor SN" in line and sfpCount2 <= len(switch_count):
sfp.append(line)
if " Extended ID" in line and sfpCount2 <= len(switch_count):
sfp.append(line)
############ add errors to second sheet in excel
if 'Corner Name :' in line and 'PST' not in line and 'PDT' not in line and line not in corner:
cornerE = line
countE = 0
two.append(cornerE)
fourCorner = re.search(r"\{.*?}", line).group(0)
#Looks for Testcase number
if 'TESTCASE START -' in line :
testExcel = line
# using re.py to search for switch number
if 'TESTCASE START -' in line and switchExcel not in line:
switchExcel = re.search(r'\w\w\w\w\w\w\d(\d)?', line).group(0)
fourSwitch = switchExcel
two.append(switchExcel)
#adding to list with errors
for i in answers["variables"]:
if i in line and i not in failsE:
failsE.append(line)
fourFails += 1
elif regex_flag and re.search(i, line):
failsE.append(line)
fourFails += 1
if len(failsE) > 0 and 'TESTCASE END -' in line:
countE += 1
testExcel = str(countE) + "--" + testExcel
two.append(testExcel)
two.extend(failsE)
### to make a graph
four.append(fourCorner)
four.append(fourSwitch)
four.append(countE)
four.append(fourFails)
#clearing error list
if 'TESTCASE END -' in line:
failsE.clear()
###########################################################
#### graph to excel
#varibles
count_graph = 0
corner = ''
fails = []
switch = []
testcase_graph = 'first777#$'
switchNumber_graph = 'first777#$'
switch_graph1 = []
group_graph = []
#opens text file to read
with open("logs.txt") as L:
for line in L:
# look for corner
if 'TESTCASE START ' in line and 'Testcase' in line and 'PDT' in line or 'PST' in line:
if count_graph >= 1:
switch_graph1 = [switchNumber_graph +" - "+ testcase_graph,'Failed']
else:
switch_graph1 = [switchNumber_graph +" - "+ testcase_graph,'Passed']
switchNumber_graph = re.search(r'\w\w\w\w\w\w\d(\d)?', line).group(0)
testcase_graph = line[line.index('{') + len('{'):]
testcase_graph = testcase_graph.replace('}\n',"")
switchNumber_graph = re.sub("[^0123456789\.]","",switchNumber_graph)
count_graph = 0
group_graph.append(switch_graph1)
for i in answers["variables"]:
if i in line:
count_graph += 1
#################################################################
## makes the data for the graph nicer
## makes the data for the graph nicer
group_graph.pop(0)
group_graphD = pd.DataFrame(group_graph, columns = ['switch - Testcase','error'])
group_graph_count = group_graphD.pivot_table(index=['switch - Testcase','error'], aggfunc='size')
###########################################################
############add commannd log if any to third sheet in excel
#variables
cmdLogE = []
cmdStartE = 10000000
iE = 0
fullE = 0
cornerLE = ''
switchNumber1E = 'first777#$'
three = []
cornerCountE = 0
if 'Enter Command :' in answers["variables"]:
with open("logs.txt") as B:
for line in B:
iE += 1
# look for corner
if 'Corner Name :' in line and 'PST' not in line and 'PDT' not in line and line not in cornerLE:
cornerLE = line
cornerLE = '---------------'+ cornerLE
countLogE = 0
cornerCountE += 1
three.append(cornerLE)
#Looks for Testcase number
if 'TESTCASE START -' in line :
testLog = line
# using re.py to search for switch number
if 'TESTCASE START -' in line and switchNumber1E not in line:
countLogE = 0
switchNumber1E = re.search(r'\w\w\w\w\w\w\d(\d)?', line).group(0)
three.append(switchNumber1E)
# looking for comand output
if line.startswith(cmd) and line not in cmdLogE:
cmdLogE.append(line)
cmdStartE = iE
cmdStopE = 10000000
fullE = 1
if iE >= cmdStartE and iE < cmdStopE and line not in cmdLogE:
cmdLogE.append(line)
if cmdEnd in line or 'Done executing all the given commands' in line:
cmdStopE = iE
#print command output
if 'TESTCASE END' and fullE == 1:
for i in cornerPrint:
if i == cornerCountE or i == 0:
cmdLogE = list(filter(None, cmdLogE))
three.extend(cmdLogE)
cmdLogE.clear()
fullE = 0
################################################################
#print to excel
if len(one)>0:
one = pd.Series(one)
if len(two)>0:
two = pd.Series(two)
if 'Enter Command :' in answers["variables"]:
three = pd.Series(three)
if len(four)>0:
four = pd.Series(four)
if len(sfp)>0:
sfp = pd.Series(sfp)
w = | pd.ExcelWriter(nameEx) | pandas.ExcelWriter |
#
# Copyright (c) 2020 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import os
import tempfile
import unittest
# noinspection PyPackageRequirements
import pytest
from pandas.tests.extension import base
from text_extensions_for_pandas.array.test_span import ArrayTestBase
from text_extensions_for_pandas.array.span import *
from text_extensions_for_pandas.array.token_span import *
class TokenSpanTest(ArrayTestBase):
def test_create(self):
toks = self._make_spans_of_tokens()
s1 = TokenSpan(toks, 0, 1)
self.assertEqual(s1.covered_text, "This")
# Begin too small
with self.assertRaises(ValueError):
TokenSpan(toks, -2, 4)
# End too small
with self.assertRaises(ValueError):
TokenSpan(toks, 1, -1)
# End too big
with self.assertRaises(ValueError):
TokenSpan(toks, 1, 10)
# Begin null, end not null
with self.assertRaises(ValueError):
TokenSpan(toks, TokenSpan.NULL_OFFSET_VALUE, 0)
def test_repr(self):
toks = self._make_spans_of_tokens()
s1 = TokenSpan(toks, 0, 2)
self.assertEqual(repr(s1), "[0, 7): 'This is'")
toks2 = SpanArray(
"This is a really really really really really really really really "
"really long string.",
np.array([0, 5, 8, 10, 17, 24, 31, 38, 45, 52, 59, 66, 73, 78, 84]),
np.array([4, 7, 9, 16, 23, 30, 37, 44, 51, 58, 65, 72, 77, 84, 85]),
)
self._assertArrayEquals(
toks2.covered_text,
[
"This",
"is",
"a",
"really",
"really",
"really",
"really",
"really",
"really",
"really",
"really",
"really",
"long",
"string",
".",
],
)
s2 = TokenSpan(toks2, 0, 4)
self.assertEqual(repr(s2), "[0, 16): 'This is a really'")
s2 = TokenSpan(toks2, 0, 15)
self.assertEqual(
repr(s2),
"[0, 85): 'This is a really really really really really really "
"really really really [...]'"
)
def test_equals(self):
toks = self._make_spans_of_tokens()
other_toks = toks[:-1].copy()
s1 = TokenSpan(toks, 0, 2)
s2 = TokenSpan(toks, 0, 2)
s3 = TokenSpan(toks, 0, 3)
s4 = TokenSpan(other_toks, 0, 2)
s5 = Span(toks.target_text, s4.begin, s4.end)
s6 = Span(toks.target_text, s4.begin, s4.end + 1)
self.assertEqual(s1, s2)
self.assertNotEqual(s1, s3)
self.assertEqual(s1, s4)
self.assertEqual(s1, s5)
self.assertEqual(s5, s1)
self.assertNotEqual(s1, s6)
def test_less_than(self):
toks = self._make_spans_of_tokens()
s1 = TokenSpan(toks, 0, 3)
s2 = TokenSpan(toks, 2, 3)
s3 = TokenSpan(toks, 3, 4)
self.assertLess(s1, s3)
self.assertLessEqual(s1, s3)
self.assertFalse(s1 < s2)
def test_add(self):
toks = self._make_spans_of_tokens()
s1 = TokenSpan(toks, 0, 3)
s2 = TokenSpan(toks, 2, 3)
s3 = TokenSpan(toks, 3, 4)
char_s1 = Span(s1.target_text, s1.begin, s1.end)
char_s2 = Span(s2.target_text, s2.begin, s2.end)
self.assertEqual(s1 + s2, s1)
self.assertEqual(char_s1 + s2, char_s1)
self.assertEqual(s2 + char_s1, char_s1)
self.assertEqual(char_s2 + char_s1, char_s1)
self.assertEqual(s2 + s3, TokenSpan(toks, 2, 4))
def test_hash(self):
toks = self._make_spans_of_tokens()
s1 = TokenSpan(toks, 0, 3)
s2 = TokenSpan(toks, 0, 3)
s3 = TokenSpan(toks, 3, 4)
d = {s1: "foo"}
self.assertEqual(d[s1], "foo")
self.assertEqual(d[s2], "foo")
d[s2] = "bar"
d[s3] = "fab"
self.assertEqual(d[s1], "bar")
self.assertEqual(d[s2], "bar")
self.assertEqual(d[s3], "fab")
class TokenSpanArrayTest(ArrayTestBase):
def _make_spans(self):
toks = self._make_spans_of_tokens()
return TokenSpanArray(toks, [0, 1, 2, 3, 0, 2, 0], [1, 2, 3, 4, 2, 4, 4])
def test_create(self):
arr = self._make_spans()
self._assertArrayEquals(
arr.covered_text,
["This", "is", "a", "test", "This is", "a test", "This is a test"],
)
with self.assertRaises(TypeError):
TokenSpanArray(self._make_spans_of_tokens(), "Not a valid begins list", [42])
def test_dtype(self):
arr = self._make_spans()
self.assertTrue(isinstance(arr.dtype, TokenSpanDtype))
def test_len(self):
self.assertEqual(len(self._make_spans()), 7)
def test_getitem(self):
arr = self._make_spans()
self.assertEqual(arr[2].covered_text, "a")
self._assertArrayEquals(arr[2:4].covered_text, ["a", "test"])
def test_setitem(self):
arr = self._make_spans()
arr[1] = arr[2]
self._assertArrayEquals(arr.covered_text[0:4], ["This", "a", "a", "test"])
arr[3] = None
self._assertArrayEquals(arr.covered_text[0:4], ["This", "a", "a", None])
with self.assertRaises(ValueError):
arr[0] = "Invalid argument for __setitem__()"
arr[0:2] = arr[0]
self._assertArrayEquals(arr.covered_text[0:4], ["This", "This", "a", None])
arr[[0, 1, 3]] = None
self._assertArrayEquals(arr.covered_text[0:4], [None, None, "a", None])
arr[[2, 1, 3]] = arr[[4, 5, 6]]
self._assertArrayEquals(
arr.covered_text[0:4], [None, "a test", "This is", "This is a test"]
)
def test_equals(self):
arr = self._make_spans()
self._assertArrayEquals(arr[0:4] == arr[1], [False, True, False, False])
arr2 = self._make_spans()
self._assertArrayEquals(arr == arr, [True] * 7)
self._assertArrayEquals(arr == arr2, [True] * 7)
self._assertArrayEquals(arr[0:3] == arr[3:6], [False, False, False])
arr3 = SpanArray(arr.target_text, arr.begin, arr.end)
self._assertArrayEquals(arr == arr3, [True] * 7)
self._assertArrayEquals(arr3 == arr, [True] * 7)
def test_not_equals(self):
arr = self._make_spans()
arr2 = self._make_spans()
self._assertArrayEquals(arr[0:4] != arr[1], [True, False, True, True])
self._assertArrayEquals(arr != arr2, [False] * 7)
self._assertArrayEquals(arr[0:3] != arr[3:6], [True, True, True])
def test_concat_same_type(self):
arr = self._make_spans()
arr2 = self._make_spans()
# Type: TokenSpanArray
arr3 = TokenSpanArray._concat_same_type((arr, arr2))
self._assertArrayEquals(arr3.covered_text, np.tile(arr2.covered_text, 2))
def test_from_factorized(self):
arr = self._make_spans()
spans_list = [arr[i] for i in range(len(arr))]
arr2 = TokenSpanArray._from_factorized(spans_list, arr)
self._assertArrayEquals(arr.covered_text, arr2.covered_text)
def test_from_sequence(self):
arr = self._make_spans()
spans_list = [arr[i] for i in range(len(arr))]
arr2 = TokenSpanArray._from_sequence(spans_list)
self._assertArrayEquals(arr.covered_text, arr2.covered_text)
def test_nulls(self):
arr = self._make_spans()
self._assertArrayEquals(arr.isna(), [False] * 7)
self.assertFalse(arr.have_nulls)
arr[2] = TokenSpan.make_null(arr.tokens)
self.assertIsNone(arr.covered_text[2])
self._assertArrayEquals(arr[0:4].covered_text, ["This", "is", None, "test"])
self._assertArrayEquals(arr[0:4].isna(), [False, False, True, False])
self.assertTrue(arr.have_nulls)
def test_copy(self):
arr = self._make_spans()
arr2 = arr.copy()
self._assertArrayEquals(arr.covered_text, arr2.covered_text)
self.assertEqual(arr[1], arr2[1])
arr[1] = TokenSpan.make_null(arr.tokens)
self.assertNotEqual(arr[1], arr2[1])
# Double underscore because you can't call a test case "test_take"
def test_take(self):
arr = self._make_spans()
arr2 = arr.take([1, 1, 2, 3, 5, -1])
self._assertArrayEquals(
arr2.covered_text, ["is", "is", "a", "test", "a test", "This is a test"]
)
arr3 = arr.take([1, 1, 2, 3, 5, -1], allow_fill=True)
self._assertArrayEquals(
arr3.covered_text, ["is", "is", "a", "test", "a test", None]
)
def test_less_than(self):
tokens = self._make_spans_of_tokens()
arr1 = TokenSpanArray(tokens, [0, 2], [4, 3])
s1 = TokenSpan(tokens, 0, 1)
s2 = TokenSpan(tokens, 3, 4)
arr2 = TokenSpanArray(tokens, [0, 3], [0, 4])
self._assertArrayEquals(s1 < arr1, [False, True])
self._assertArrayEquals(s2 > arr1, [False, True])
self._assertArrayEquals(arr1 < s1, [False, False])
self._assertArrayEquals(arr1 < arr2, [False, True])
def test_add(self):
toks = self._make_spans_of_tokens()
s1 = TokenSpan(toks, 0, 3)
s2 = TokenSpan(toks, 2, 3)
s3 = TokenSpan(toks, 3, 4)
s4 = TokenSpan(toks, 2, 4)
s5 = TokenSpan(toks, 0, 3)
char_s1 = Span(s1.target_text, s1.begin, s1.end)
char_s2 = Span(s2.target_text, s2.begin, s2.end)
char_s3 = Span(s3.target_text, s3.begin, s3.end)
char_s4 = Span(s4.target_text, s4.begin, s4.end)
char_s5 = Span(s5.target_text, s5.begin, s5.end)
# TokenSpanArray + TokenSpanArray
self._assertArrayEquals(
TokenSpanArray._from_sequence([s1, s2, s3])
+ TokenSpanArray._from_sequence([s2, s3, s3]),
TokenSpanArray._from_sequence([s1, s4, s3]),
)
# SpanArray + TokenSpanArray
self._assertArrayEquals(
SpanArray._from_sequence([char_s1, char_s2, char_s3])
+ TokenSpanArray._from_sequence([s2, s3, s3]),
SpanArray._from_sequence([char_s1, char_s4, char_s3]),
)
# TokenSpanArray + SpanArray
self._assertArrayEquals(
TokenSpanArray._from_sequence([s1, s2, s3])
+ SpanArray._from_sequence([char_s2, char_s3, char_s3]),
SpanArray._from_sequence([char_s1, char_s4, char_s3]),
)
# TokenSpanArray + TokenSpan
self._assertArrayEquals(
TokenSpanArray._from_sequence([s1, s2, s3]) + s2,
TokenSpanArray._from_sequence([s5, s2, s4]),
)
# TokenSpan + TokenSpanArray
self._assertArrayEquals(
s2 + TokenSpanArray._from_sequence([s1, s2, s3]),
TokenSpanArray._from_sequence([s5, s2, s4]),
)
# TokenSpanArray + Span
self._assertArrayEquals(
TokenSpanArray._from_sequence([s1, s2, s3]) + char_s2,
SpanArray._from_sequence([char_s5, char_s2, char_s4]),
)
# Span + SpanArray
self._assertArrayEquals(
char_s2 + SpanArray._from_sequence([char_s1, char_s2, char_s3]),
SpanArray._from_sequence([char_s5, char_s2, char_s4]),
)
def test_reduce(self):
arr = self._make_spans()
self.assertEqual(arr._reduce("sum"), TokenSpan(arr.tokens, 0, 4))
# Remind ourselves to modify this test after implementing min and max
with self.assertRaises(TypeError):
arr._reduce("min")
def test_make_array(self):
arr = self._make_spans()
arr_series = pd.Series(arr)
toks_list = [arr[0], arr[1], arr[2], arr[3]]
self._assertArrayEquals(
TokenSpanArray.make_array(arr).covered_text,
["This", "is", "a", "test", "This is", "a test", "This is a test"],
)
self._assertArrayEquals(
TokenSpanArray.make_array(arr_series).covered_text,
["This", "is", "a", "test", "This is", "a test", "This is a test"],
)
self._assertArrayEquals(
TokenSpanArray.make_array(toks_list).covered_text,
["This", "is", "a", "test"],
)
def test_begin_and_end(self):
arr = self._make_spans()
self._assertArrayEquals(arr.begin, [0, 5, 8, 10, 0, 8, 0])
self._assertArrayEquals(arr.end, [4, 7, 9, 14, 7, 14, 14])
def test_normalized_covered_text(self):
arr = self._make_spans()
self._assertArrayEquals(
arr.normalized_covered_text,
["this", "is", "a", "test", "this is", "a test", "this is a test"],
)
def test_as_frame(self):
arr = self._make_spans()
df = arr.as_frame()
self._assertArrayEquals(
df.columns, ["begin", "end", "begin_token", "end_token", "covered_text"]
)
self.assertEqual(len(df), len(arr))
class TokenSpanArrayIOTests(ArrayTestBase):
def do_roundtrip(self, df):
with tempfile.TemporaryDirectory() as dirpath:
filename = os.path.join(dirpath, 'token_span_array_test.feather')
df.to_feather(filename)
df_read = pd.read_feather(filename)
pd.testing.assert_frame_equal(df, df_read)
def test_feather(self):
toks = self._make_spans_of_tokens()
# Equal token spans to tokens
ts1 = TokenSpanArray(toks, np.arange(len(toks)), np.arange(len(toks)) + 1)
df1 = pd.DataFrame({"ts1": ts1})
self.do_roundtrip(df1)
# More token spans than tokens
ts2 = TokenSpanArray(toks, [0, 1, 2, 3, 0, 2, 0], [1, 2, 3, 4, 2, 4, 4])
df2 = pd.DataFrame({"ts2": ts2})
self.do_roundtrip(df2)
# Less token spans than tokens, 2 splits no padding
ts3 = TokenSpanArray(toks, [0, 3], [3, 4])
df3 = | pd.DataFrame({"ts3": ts3}) | pandas.DataFrame |
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 mouse=a
import matplotlib
matplotlib.rcParams['figure.facecolor'] = '1.'
matplotlib.use('Agg')
import ants
import numpy as np
import pandas as pd
import os
import imageio
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as niu
import nibabel as nib
import shutil
import ntpath
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as niu
import nipype.interfaces.io as nio
import matplotlib.pyplot as plt
import seaborn as sns
import inspect
import json
import re
import time
import matplotlib.animation as animation
from skimage.feature import canny
from nibabel.processing import resample_to_output
from sklearn.metrics import normalized_mutual_info_score
from sklearn.ensemble import IsolationForest
from sklearn.cluster import DBSCAN
from sklearn.neighbors import LocalOutlierFactor
from sklearn.svm import OneClassSVM
from skimage.filters import threshold_otsu
from math import sqrt, log, ceil
from os import getcwd
from os.path import basename
from sys import argv, exit
from glob import glob
from src.outlier import kde, MAD
from sklearn.neighbors import LocalOutlierFactor
from src.utils import concat_df
from nipype.interfaces.base import (TraitedSpec, File, traits, InputMultiPath,
BaseInterface, OutputMultiPath, BaseInterfaceInputSpec, isdefined)
from scipy.ndimage.filters import gaussian_filter
from nipype.utils.filemanip import (load_json, save_json, split_filename, fname_presuffix, copyfile)
_file_dir, fn =os.path.split( os.path.abspath(__file__) )
def load_3d(fn, t=0):
print('Reading Frame %d'%t,'from', fn)
img = nib.load(fn)
vol = img.get_fdata()
hd = img.header
if len(vol.shape) == 4 :
vol = vol[:,:,:,t]
vol = vol.reshape(vol.shape[0:3] )
img = nib.Nifti1Image(vol, img.affine)
return img, vol
def get_spacing(aff, i) :
return aff[i, np.argmax(np.abs(aff[i,0:3]))]
######################
# Group-level QC #
######################
#datasink for dist metrics
#check how the calc outlier measure node is implemented, may need to be reimplemented
final_dir="qc"
def group_level_qc(opts, args):
#setup workflow
workflow = pe.Workflow(name=qc_err+opts.preproc_dir)
workflow.base_dir = opts.targetDir
#Datasink
datasink=pe.Node(interface=nio.DataSink(), name=qc_err+"output")
datasink.inputs.base_directory= opts.targetDir +os.sep +"qc"
datasink.inputs.substitutions = [('_cid_', ''), ('sid_', '')]
outfields=['coreg_metrics','tka_metrics','pvc_metrics']
paths={'coreg_metrics':"*/coreg_qc_metrics/*_metric.csv", 'tka_metrics':"*/results_tka/*_3d.csv",'pvc_metrics':"*/pvc_qc_metrics/*qc_metric.csv"}
#If any one of the sets of metrics does not exist because it has not been run at the scan level, then
#remove it from the list of outfields and paths that the datagrabber will look for.
for outfield, path in paths.items(): # zip(paths, outfields):
full_path = opts.targetDir + os.sep + opts.preproc_dir + os.sep + path
print(full_path)
if len(glob(full_path)) == 0 :
outfields.remove(outfield)
paths.pop(outfield)
#Datagrabber
datasource = pe.Node( interface=nio.DataGrabber( outfields=outfields, raise_on_empty=True, sort_filelist=False), name=qc_err+"datasource")
datasource.inputs.base_directory = opts.targetDir + os.sep +opts.preproc_dir
datasource.inputs.template = '*'
datasource.inputs.field_template = paths
#datasource.inputs.template_args = dict( coreg_metrics = [['preproc_dir']] )
##################
# Coregistration #
##################
qc_err=''
if opts.pvc_label_name != None :
qc_err += "_"+opts.pvc_label_name
if opts.quant_label_name != None :
qc_err += "_"+opts.quant_label_name
if opts.results_label_name != None :
qc_err += "_"+opts.results_label_name
qc_err += "_"
if 'coreg_metrics' in outfields:
#Concatenate distance metrics
concat_coreg_metricsNode=pe.Node(interface=concat_df(), name=qc_err+"concat_coreg_metrics")
concat_coreg_metricsNode.inputs.out_file="coreg_qc_metrics.csv"
workflow.connect(datasource, 'coreg_metrics', concat_coreg_metricsNode, 'in_list')
workflow.connect(concat_coreg_metricsNode, "out_file", datasink, 'coreg/metrics')
#Plot Coregistration Metrics
plot_coreg_metricsNode=pe.Node(interface=plot_qcCommand(), name=qc_err+"plot_coreg_metrics")
workflow.connect(concat_coreg_metricsNode, "out_file", plot_coreg_metricsNode, 'in_file')
workflow.connect(plot_coreg_metricsNode, "out_file", datasink, 'coreg/metrics_plot')
#Calculate Coregistration outlier measures
outlier_measureNode = pe.Node(interface=outlier_measuresCommand(), name=qc_err+"coregistration_outlier_measure")
workflow.connect(concat_coreg_metricsNode, 'out_file', outlier_measureNode, 'in_file')
workflow.connect(outlier_measureNode, "out_file", datasink, 'coreg/outlier')
#Plot coregistration outlier measures
plot_coreg_measuresNode=pe.Node(interface=plot_qcCommand(),name=qc_err+"plot_coreg_measures")
workflow.connect(outlier_measureNode,"out_file",plot_coreg_measuresNode,'in_file')
workflow.connect(plot_coreg_measuresNode,"out_file",datasink,'coreg/measures_plot')
#######
# PVC #
#######
if 'pvc_metrics' in outfields:
#Concatenate PVC metrics
concat_pvc_metricsNode=pe.Node(interface=concat_df(), name=qc_err+"concat_pvc_metrics")
concat_pvc_metricsNode.inputs.out_file="pvc_qc_metrics.csv"
workflow.connect(datasource, 'pvc_metrics', concat_pvc_metricsNode, 'in_list')
workflow.connect(concat_pvc_metricsNode, "out_file", datasink, 'pvc/metrics')
#Plot PVC Metrics
plot_pvc_metricsNode=pe.Node(interface=plot_qcCommand(), name=qc_err+"plot_pvc_metrics")
workflow.connect(concat_pvc_metricsNode, "out_file", plot_pvc_metricsNode, 'in_file')
workflow.connect(plot_pvc_metricsNode, "out_file", datasink, 'pvc/metrics_plot')
#Calculate PVC outlier measures
pvc_outlier_measureNode = pe.Node(interface=outlier_measuresCommand(), name=qc_err+"pvc_outlier_measure")
workflow.connect(concat_pvc_metricsNode, 'out_file', pvc_outlier_measureNode, 'in_file')
workflow.connect(pvc_outlier_measureNode, "out_file", datasink, 'pvc/outlier')
#Plot PVC outlier measures
plot_pvc_measuresNode=pe.Node(interface=plot_qcCommand(), name=qc_err+"plot_pvc_measures")
workflow.connect(pvc_outlier_measureNode,"out_file",plot_pvc_measuresNode,'in_file')
workflow.connect(plot_pvc_measuresNode, "out_file", datasink, 'pvc/measures_plot')
#######
# TKA #
#######
if 'tka_metrics' in outfields:
#Concatenate TKA metrics
concat_tka_metricsNode=pe.Node(interface=concat_df(), name=qc_err+"concat_tka_metrics")
concat_tka_metricsNode.inputs.out_file="tka_qc_metrics.csv"
workflow.connect(datasource, 'tka_metrics', concat_tka_metricsNode, 'in_list')
workflow.connect(concat_tka_metricsNode, "out_file", datasink, 'tka/metrics')
#Plot TKA Metrics
plot_tka_metricsNode=pe.Node(interface=plot_qcCommand(), name=qc_err+"plot_tka_metrics")
workflow.connect(concat_tka_metricsNode, "out_file", plot_tka_metricsNode, 'in_file')
workflow.connect(plot_tka_metricsNode, "out_file", datasink, 'tka/metrics_plot')
#Calculate TKA outlier measures
tka_outlier_measureNode = pe.Node(interface=outlier_measuresCommand(), name=qc_err+"tka_outlier_measure")
workflow.connect(concat_tka_metricsNode, 'out_file', tka_outlier_measureNode, 'in_file')
workflow.connect(tka_outlier_measureNode, "out_file", datasink, 'tka/outlier')
#Plot PVC outlier measures
plot_tka_measuresNode=pe.Node(interface=plot_qcCommand(), name=qc_err+"plot_tka_measures")
workflow.connect(tka_outlier_measureNode,"out_file",plot_tka_measuresNode,'in_file')
workflow.connect(plot_tka_measuresNode, "out_file", datasink, 'tka/measures_plot')
workflow.run()
####################
# Distance Metrics #
####################
__NBINS=-1
import copy
def pvc_mse(pvc_fn, pve_fn, fwhm):
pvc = nib.load(pvc_fn)
pvc.data = pvc.get_data()
pve = nib.load(pve_fn)
pve.data = pve.get_data()
mse = 0
if len(pvc.data.shape) > 3 :#if volume has more than 3 dimensions
t = int(pvc.data.shape[3]/2)
#for t in range(pvc.sizes[0]):
pve_frame = pve.data[:,:,:,t]
pvc_frame = pvc.data[:,:,:,t]
n = np.sum(pve.data[t,:,:,:]) # np.prod(pve.data.shape[0:4])
pvc_blur = gaussian_filter(pvc_frame,fwhm)
m = np.sum(np.sqrt((pve_frame - pvc_blur)**2))
mse += m
print(t, m)
else : #volume has 3 dimensions
n = np.sum(pve.data) # np.prod(pve.data.shape[0:3])
pvc_blur = gaussian_filter(pvc.data,fwhm)
m = np.sum(np.sqrt((pve.data - pvc_blur)**2))
mse += m
mse = -mse / n #np.sum(pve.data)
print("PVC MSE:", mse)
return mse
####################
# Outlier Measures #
####################
def _IsolationForest(X):
X = np.array(X)
if len(X.shape) == 1 :
X=X.reshape(-1,1)
rng = np.random.RandomState(42)
clf = IsolationForest(max_samples=X.shape[0], random_state=rng)
return clf.fit(X).predict(X)
def _LocalOutlierFactor(X):
X = np.array(X)
if len(X.shape) == 1 :
X=X.reshape(-1,1)
n=int(round(X.shape[0]*0.2))
clf = LocalOutlierFactor(n_neighbors=n)
clf.fit_predict(X)
return clf.negative_outlier_factor_
def _OneClassSVM(X):
clf = OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X)
return clf.predict(X)
def _dbscan(X):
db = DBSCAN(eps=0.3)
return db.fit_predict(X)
###########
# Globals #
###########
global distance_metrics
global outlier_measures
global metric_columns
global outlier_columns
outlier_measures={"KDE":kde, "LOF": _LocalOutlierFactor, "IsolationForest":_IsolationForest, "MAD":MAD} #, "DBSCAN":_dbscan, "OneClassSVM":_OneClassSVM }
metric_columns = ['analysis', 'sub','ses','task','run','acq','rec','roi','metric','value']
outlier_columns = ['analysis', 'sub','ses','task','roi','metric','measure','value']
#######################
### Outlier Metrics ###
#######################
### PVC Metrics
class pvc_qc_metricsOutput(TraitedSpec):
out_file = traits.File(desc="Output file")
class pvc_qc_metricsInput(BaseInterfaceInputSpec):
pve = traits.File(exists=True, mandatory=True, desc="Input PVE PET image")
pvc = traits.File(exists=True, mandatory=True, desc="Input PVC PET")
fwhm = traits.List(desc='FWHM of the scanner')
sub = traits.Str("Subject ID")
task = traits.Str("Task")
ses = traits.Str("Ses")
run = traits.Str("Run")
rec = traits.Str("Reconstruction")
acq = traits.Str("Acquisition")
out_file = traits.File(desc="Output file")
class pvc_qc_metrics(BaseInterface):
input_spec = pvc_qc_metricsInput
output_spec = pvc_qc_metricsOutput
def _gen_output(self, sid, ses, task,run,acq,rec, fname ="pvc_qc_metric.csv"):
dname = os.getcwd()
fn = dname+os.sep+'sub-'+sid+'_ses-'+ses+'_task-'+task
if isdefined(run) :
fn += '_run-'+str(run)
fn += "_acq-"+str(acq)+"_rec-"+str(rec)+fname
return fn
def _run_interface(self, runtime):
sub = self.inputs.sub
ses = self.inputs.ses
task = self.inputs.task
fwhm = self.inputs.fwhm
run = self.inputs.run
rec = self.inputs.rec
acq = self.inputs.acq
df = pd.DataFrame([], columns=metric_columns)
pvc_metrics={'mse':pvc_mse }
for metric_name, metric_function in pvc_metrics.items():
mse = pvc_mse(self.inputs.pvc, self.inputs.pve, fwhm)
temp = pd.DataFrame([['pvc', sub,ses,task,run,acq,rec,'02',metric_name,mse]], columns=metric_columns)
df = pd.concat([df, temp])
df.fillna(0, inplace=True)
if not isdefined(self.inputs.out_file):
self.inputs.out_file = self._gen_output(self.inputs.sub, self.inputs.ses, self.inputs.task, self.inputs.run, self.inputs.acq, self.inputs.rec)
df.to_csv(self.inputs.out_file, index=False)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.out_file):
self.inputs.out_file = self.inputs._gen_output(self.inputs.sid,self.inputs.ses, self.inputs.task, self.inputs.run, self.inputs.acq, self.inputs.rec)
outputs["out_file"] = self.inputs.out_file
return outputs
### Coregistration Metrics
class coreg_qc_metricsOutput(TraitedSpec):
out_file = traits.File(desc="Output file")
class coreg_qc_metricsInput(BaseInterfaceInputSpec):
pet = traits.File(exists=True, mandatory=True, desc="Input PET image")
t1 = traits.File(exists=True, mandatory=True, desc="Input T1 MRI")
brain_mask_space_mri = traits.File(exists=True, mandatory=True, desc="Input T1 MRI")
pet_brain_mask = traits.File(exists=True, mandatory=True, desc="Input T1 MRI")
sid = traits.Str(desc="Subject")
ses = traits.Str(desc="Session")
task = traits.Str(desc="Task")
run = traits.Str(desc="Run")
rec = traits.Str(desc="Reconstruction")
acq = traits.Str(desc="Acquisition")
study_prefix = traits.Str(desc="Study Prefix")
out_file = traits.File(desc="Output file")
clobber = traits.Bool(desc="Overwrite output file", default=False)
class coreg_qc_metricsCommand(BaseInterface):
input_spec = coreg_qc_metricsInput
output_spec = coreg_qc_metricsOutput
def _gen_output(self, sid, ses, task, run, rec, acq, fname ="distance_metric.csv"):
dname = os.getcwd()
fn = dname+os.sep+'sub-'+sid+'_ses-'+ses+'_task-'+task
if isdefined(run) :
fn += '_run-'+str(run)
fn += "_acq-"+str(acq)+"_rec-"+str(rec)+fname
return fn
def _run_interface(self, runtime):
sub_df=pd.DataFrame(columns=metric_columns )
pet = self.inputs.pet
t1 = self.inputs.t1
sid = self.inputs.sid
ses = self.inputs.ses
task = self.inputs.task
run = self.inputs.run
rec = self.inputs.rec
acq = self.inputs.acq
brain_mask_space_mri = self.inputs.brain_mask_space_mri
pet_brain_mask = self.inputs.pet_brain_mask
coreg_metrics=['MattesMutualInformation']
path, ext = os.path.splitext(pet)
base=basename(path)
param=base.split('_')[-1]
param_type=base.split('_')[-2]
df=pd.DataFrame(columns=metric_columns )
def image_read(fn) :
img, vol = load_3d(fn)
vol = vol.astype(float)
aff = img.affine
origin = [ aff[0,3], aff[1,3], aff[2,3]]
spacing = [ get_spacing(aff, 0), get_spacing(aff, 1), get_spacing(aff, 2) ]
return ants.from_numpy( vol, origin=origin, spacing=spacing )
for metric in coreg_metrics :
print("t1 ",t1)
fixed = image_read( t1 )
moving = image_read( pet )
try :
metric_val = ants.create_ants_metric(
fixed = fixed,
moving= moving,
fixed_mask=ants.image_read( brain_mask_space_mri ),
moving_mask=ants.image_read( pet_brain_mask ),
metric_type=metric ).get_value()
except RuntimeError :
metric_val = np.NaN
temp = pd.DataFrame([['coreg',sid,ses,task,run,acq,rec,'01',metric,metric_val]],columns=df.columns )
sub_df = pd.concat([sub_df, temp])
if not isdefined( self.inputs.out_file) :
self.inputs.out_file = self._gen_output(self.inputs.sid, self.inputs.ses, self.inputs.task,self.inputs.run,self.inputs.rec,self.inputs.acq)
sub_df.to_csv(self.inputs.out_file, index=False)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined( self.inputs.out_file) :
self.inputs.out_file = self._gen_output(self.inputs.sid, self.inputs.ses, self.inputs.task,self.inputs.run,self.inputs.rec,self.inputs.acq)
outputs["out_file"] = self.inputs.out_file
return outputs
### Plot Metrics
# analysis sub ses task metric roi value
# 0 coreg 19 F 1 CC 1 0.717873
class plot_qcOutput(TraitedSpec):
out_file = traits.File(desc="Output file")
class plot_qcInput(BaseInterfaceInputSpec):
in_file = traits.File(desc="Input file")
out_file = traits.File(desc="Output file")
class plot_qcCommand (BaseInterface):
input_spec = plot_qcInput
output_spec = plot_qcOutput
def _gen_output(self, basefile="metrics.png"):
fname = ntpath.basename(basefile)
dname = os.getcwd()
return dname+ os.sep+fname
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
if not isdefined(self.inputs.out_file):
self.inputs.out_file = self._gen_output(self.inputs.in_file, self._suffix)
return super(plot_qcCommand, self)._parse_inputs(skip=skip)
def _run_interface(self, runtime):
df = pd.read_csv( self.inputs.in_file )
if "measure" in df.columns:
plot_type="measure"
elif "metric" in df.columns :
plot_type = "metric"
else:
print("Unrecognized data frame")
exit(1)
df["sub"]="sub: "+df["sub"].map(str)+" task: "+df["task"].map(str)+" ses: "+df["ses"].map(str)
print(df)
plt.clf()
fig, ax = plt.subplots()
plt.figure(1)
nROI = len(np.unique(df.roi))
if plot_type == "measure" :
unique_measure =np.unique(df.measure)
nMeasure = np.unique(unique_measure)
unique_metric = np.unique(df.metric)
nMetric = len(unique_metric)
for roi, i in zip(np.unique(df.roi), range(nROI)):
df0=df[ (df.roi==roi) ]
for metric in unique_metric :
x=df0.value[df.metric == metric]
if plot_type == "measure" :
sns.factorplot(x="metric", col="measure", y="value", kind="swarm", data=df0, legend=False, hue="sub")
else :
sns.factorplot(x="metric", y="value", data=df0, kind="swarm", hue="sub")
plt.ylabel('')
plt.xlabel('')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.ylim([-0.05,1.05])
plt.legend(bbox_to_anchor=(1.05, 1), loc="upper right", ncol=1, prop={'size': 6})
if not isdefined( self.inputs.out_file) :
self.inputs.out_file = self._gen_output()
print('Out file:', self.inputs.out_file)
#plt.tight_layout()
plt.savefig(self.inputs.out_file, bbox_inches="tight", dpi=300, width=2000)
plt.clf()
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined( self.inputs.out_file) :
self.inputs.out_file = self._gen_output()
outputs["out_file"] = self.inputs.out_file
return outputs
#########################
### Outlier measures ###
#########################
class outlier_measuresOutput(TraitedSpec):
out_file = traits.File(desc="Output file")
class outlier_measuresInput(BaseInterfaceInputSpec):
in_file = traits.File(desc="Input file")
out_file = traits.File(desc="Output file")
clobber = traits.Bool(desc="Overwrite output file", default=False)
class outlier_measuresCommand(BaseInterface):
input_spec = outlier_measuresInput
output_spec = outlier_measuresOutput
def _gen_output(self, fname ="measures.csv"):
dname = os.getcwd() + os.sep + fname
return dname
def _run_interface(self, runtime):
df = pd.read_csv( self.inputs.in_file )
out_columns=['sub','ses','task','roi','metric','measure', 'value']
df_out = | pd.DataFrame(columns=out_columns) | pandas.DataFrame |
"""
MicroGridsPy - Multi-year capacity-expansion (MYCE)
Linear Programming framework for microgrids least-cost sizing,
able to account for time-variable load demand evolution and capacity expansion.
Authors:
<NAME> - Department of Energy, Politecnico di Milano
<NAME> - Department of Energy, Politecnico di Milano
<NAME> - Department of Energy, Politecnico di Milano / Fondazione Eni Enrico Mattei
<NAME> - Department of Energy, Politecnico di Milano
<NAME> - Department of Energy, Politecnico di Milano
Based on the original model by:
<NAME> - Department of Mechanical and Aerospace Engineering, University of Liège / San Simon University, Centro Universitario de Investigacion en Energia
<NAME> - Department of Mechanical Engineering Technology, KU Leuven
"""
import pandas as pd
import re
#%% This section extracts the values of Scenarios, Periods, Years from data.dat and creates ranges for them
Data_file = "Inputs/data.dat"
Data_import = open(Data_file).readlines()
for i in range(len(Data_import)):
if "param: Scenarios" in Data_import[i]:
n_scenarios = int((re.findall('\d+',Data_import[i])[0]))
if "param: Years" in Data_import[i]:
n_years = int((re.findall('\d+',Data_import[i])[0]))
if "param: Periods" in Data_import[i]:
n_periods = int((re.findall('\d+',Data_import[i])[0]))
if "param: Generator_Types" in Data_import[i]:
n_generators = int((re.findall('\d+',Data_import[i])[0]))
scenario = [i for i in range(1,n_scenarios+1)]
year = [i for i in range(1,n_years+1)]
period = [i for i in range(1,n_periods+1)]
generator = [i for i in range(1,n_generators+1)]
#%% This section is useful to define the number of investment steps as well as to assign each year to its corresponding step
def Initialize_Upgrades_Number(model):
Data_file = "Inputs/data.dat"
Data_import = open(Data_file).readlines()
for i in range(len(Data_import)):
if "param: Years" in Data_import[i]:
n_years = int((re.findall('\d+',Data_import[i])[0]))
if "param: Step_Duration" in Data_import[i]:
step_duration = int((re.findall('\d+',Data_import[i])[0]))
if "param: Min_Last_Step_Duration" in Data_import[i]:
min_last_step_duration = int((re.findall('\d+',Data_import[i])[0]))
if n_years % step_duration == 0:
n_upgrades = n_years/step_duration
return n_upgrades
else:
n_upgrades = 1
for y in range(1, n_years + 1):
if y % step_duration == 0 and n_years - y > min_last_step_duration:
n_upgrades += 1
return int(n_upgrades)
def Initialize_YearUpgrade_Tuples(model):
upgrade_years_list = [1 for i in range(len(model.steps))]
s_dur = model.Step_Duration
for i in range(1, len(model.steps)):
upgrade_years_list[i] = upgrade_years_list[i-1] + s_dur
yu_tuples_list = [0 for i in model.years]
if model.Steps_Number == 1:
for y in model.years:
yu_tuples_list[y-1] = (y, 1)
else:
for y in model.years:
for i in range(len(upgrade_years_list)-1):
if y >= upgrade_years_list[i] and y < upgrade_years_list[i+1]:
yu_tuples_list[y-1] = (y, model.steps[i+1])
elif y >= upgrade_years_list[-1]:
yu_tuples_list[y-1] = (y, len(model.steps))
print('\nTime horizon (year,investment-step): ' + str(yu_tuples_list))
return yu_tuples_list
#%% This section imports the multi-year Demand and Renewable-Energy output and creates a Multi-indexed DataFrame for it
Demand = pd.read_excel('Inputs/Demand.xls')
Energy_Demand_Series = pd.Series()
for i in range(1,n_years*n_scenarios+1):
dum = Demand[i][:]
Energy_Demand_Series = | pd.concat([Energy_Demand_Series,dum]) | pandas.concat |
import pandas as pd
from scripts.python.routines.manifest import get_manifest
import numpy as np
import os
from scripts.python.pheno.datasets.filter import filter_pheno, get_passed_fields
from scipy.stats import spearmanr
import matplotlib.pyplot as plt
from scripts.python.pheno.datasets.features import get_column_name, get_status_dict, get_sex_dict
from matplotlib import colors
from scipy.stats import mannwhitneyu
import plotly.graph_objects as go
from scripts.python.routines.plot.save import save_figure
from scripts.python.routines.plot.violin import add_violin_trace
from scripts.python.routines.plot.box import add_box_trace
from scripts.python.routines.plot.layout import add_layout
import pathlib
import seaborn as sns
dataset = "GSEUNN"
path = f"E:/YandexDisk/Work/pydnameth/datasets"
datasets_info = pd.read_excel(f"{path}/datasets.xlsx", index_col='dataset')
platform = datasets_info.loc[dataset, 'platform']
manifest = get_manifest(platform)
features = {
'biomarkers3_milli_Age_Control_Acc': 'EstimatedAgeAcc',
'FGF21_milli': 'FGF21',
'GDF15_milli': 'GDF15',
'CXCL9_milli': 'CXCL9',
'biomarkers3_milli_Age_Control': 'EstimatedAge',
}
feat_ranges = {
'biomarkers3_milli_Age_Control_Acc': [-40, 400],
'FGF21_milli': [0, 1.2],
'GDF15_milli': [0, 7],
'CXCL9_milli': [-2, 35],
'biomarkers3_milli_Age_Control': [0, 400],
}
status_col = get_column_name(dataset, 'Status').replace(' ','_')
age_col = get_column_name(dataset, 'Age').replace(' ','_')
sex_col = get_column_name(dataset, 'Sex').replace(' ','_')
status_dict = get_status_dict(dataset)
status_passed_fields = status_dict['Control'] + status_dict['Case']
sex_dict = get_sex_dict(dataset)
path_save = f"{path}/{platform}/{dataset}/special/002_disease_groups_statistic"
if not os.path.exists(f"{path_save}/figs/vio"):
pathlib.Path(f"{path_save}/figs/vio").mkdir(parents=True, exist_ok=True)
pathlib.Path(f"{path_save}/figs/box").mkdir(parents=True, exist_ok=True)
continuous_vars = {v: k for k, v in features.items()}
categorical_vars = {status_col: [x.column for x in status_passed_fields], sex_col: list(sex_dict.values())}
pheno = | pd.read_pickle(f"{path}/{platform}/{dataset}/pheno_xtd.pkl") | pandas.read_pickle |
# random forest regression tutorial at:
# https://github.com/WillKoehrsen/Data-Analysis/blob/master/random_forest_explained/Random%20Forest%20Explained.ipynb
import argparse
import os
import sys
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.tree import export_graphviz
import numpy as np
import pandas as pd
import pydot
# args
parser = argparse.ArgumentParser()
parser.add_argument("filename", help="CSV file")
args = parser.parse_args()
# data
df = | pd.read_csv(args.filename) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# <NAME>
# Modified and updated to process finngen dataset by <NAME> <EMAIL>
#
#
import sys
import os
import pandas as pd
import json
import subprocess as sp
def main():
#
# Args --------------------------------------------------------------------
#
study_prefix = 'FINNGEN_R5_'
# Manifest files from Finngen R5
in_finngen = 'inputs/r5_finngen.json'
in_snp_path_list = 'inputs/input_paths_finngen.txt'
# Path to write main manifest file
out_manifest = 'finngen.manifest.json'
# Output directory for individual study fine-mapping results
output_path = 'output/'
keep_columns = [
'code',
'trait',
'trait_category',
'n_cases',
'n_controls'
]
finngen = (
pd.read_json(path_or_buf=in_finngen, lines=True)
.rename(
columns={
'phenocode': 'code',
'phenostring': 'trait',
'category': 'trait_category',
'num_cases': 'n_cases',
'num_controls': 'n_controls'
}
)
)
finngen = finngen[keep_columns]
finngen['code'] = study_prefix + finngen['code']
finngen['n_total'] = finngen['n_cases'] + finngen['n_controls']
gcs = | pd.read_csv(in_snp_path_list, sep='\t', header=None, names=['in_path']) | pandas.read_csv |
import re
import sys
import numpy as np
import pytest
from pandas.compat import PYPY
from pandas import Categorical, Index, NaT, Series, date_range
import pandas._testing as tm
from pandas.api.types import is_scalar
class TestCategoricalAnalytics:
@pytest.mark.parametrize("aggregation", ["min", "max"])
def test_min_max_not_ordered_raises(self, aggregation):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
msg = f"Categorical is not ordered for operation {aggregation}"
agg_func = getattr(cat, aggregation)
with pytest.raises(TypeError, match=msg):
agg_func()
def test_min_max_ordered(self):
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
assert _min == "a"
assert _max == "d"
cat = Categorical(
["a", "b", "c", "d"], categories=["d", "c", "b", "a"], ordered=True
)
_min = cat.min()
_max = cat.max()
assert _min == "d"
assert _max == "a"
@pytest.mark.parametrize(
"categories,expected",
[
(list("ABC"), np.NaN),
([1, 2, 3], np.NaN),
pytest.param(
Series(date_range("2020-01-01", periods=3), dtype="category"),
NaT,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/29962"
),
),
],
)
@pytest.mark.parametrize("aggregation", ["min", "max"])
def test_min_max_ordered_empty(self, categories, expected, aggregation):
# GH 30227
cat = Categorical([], categories=categories, ordered=True)
agg_func = getattr(cat, aggregation)
result = agg_func()
assert result is expected
@pytest.mark.parametrize(
"values, categories",
[(["a", "b", "c", np.nan], list("cba")), ([1, 2, 3, np.nan], [3, 2, 1])],
)
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("function", ["min", "max"])
def test_min_max_with_nan(self, values, categories, function, skipna):
# GH 25303
cat = Categorical(values, categories=categories, ordered=True)
result = getattr(cat, function)(skipna=skipna)
if skipna is False:
assert result is np.nan
else:
expected = categories[0] if function == "min" else categories[2]
assert result == expected
@pytest.mark.parametrize("function", ["min", "max"])
@pytest.mark.parametrize("skipna", [True, False])
def test_min_max_only_nan(self, function, skipna):
# https://github.com/pandas-dev/pandas/issues/33450
cat = Categorical([np.nan], categories=[1, 2], ordered=True)
result = getattr(cat, function)(skipna=skipna)
assert result is np.nan
@pytest.mark.parametrize("method", ["min", "max"])
def test_deprecate_numeric_only_min_max(self, method):
# GH 25303
cat = Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True
)
with tm.assert_produces_warning(expected_warning=FutureWarning):
getattr(cat, method)(numeric_only=True)
@pytest.mark.parametrize("method", ["min", "max"])
def test_numpy_min_max_raises(self, method):
cat = Categorical(["a", "b", "c", "b"], ordered=False)
msg = (
f"Categorical is not ordered for operation {method}\n"
"you can use .as_ordered() to change the Categorical to an ordered one"
)
method = getattr(np, method)
with pytest.raises(TypeError, match=re.escape(msg)):
method(cat)
@pytest.mark.parametrize("kwarg", ["axis", "out", "keepdims"])
@pytest.mark.parametrize("method", ["min", "max"])
def test_numpy_min_max_unsupported_kwargs_raises(self, method, kwarg):
cat = Categorical(["a", "b", "c", "b"], ordered=True)
msg = (
f"the '{kwarg}' parameter is not supported in the pandas implementation "
f"of {method}"
)
if kwarg == "axis":
msg = r"`axis` must be fewer than the number of dimensions \(1\)"
kwargs = {kwarg: 42}
method = getattr(np, method)
with pytest.raises(ValueError, match=msg):
method(cat, **kwargs)
@pytest.mark.parametrize("method, expected", [("min", "a"), ("max", "c")])
def test_numpy_min_max_axis_equals_none(self, method, expected):
cat = Categorical(["a", "b", "c", "b"], ordered=True)
method = getattr(np, method)
result = method(cat, axis=None)
assert result == expected
@pytest.mark.parametrize(
"values,categories,exp_mode",
[
([1, 1, 2, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5]),
([1, 1, 1, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5, 1]),
([1, 2, 3, 4, 5], [5, 4, 3, 2, 1], [5, 4, 3, 2, 1]),
([np.nan, np.nan, np.nan, 4, 5], [5, 4, 3, 2, 1], [5, 4]),
([np.nan, np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]),
([np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]),
],
)
def test_mode(self, values, categories, exp_mode):
s = Categorical(values, categories=categories, ordered=True)
res = s.mode()
exp = Categorical(exp_mode, categories=categories, ordered=True)
tm.assert_categorical_equal(res, exp)
def test_searchsorted(self, ordered):
# https://github.com/pandas-dev/pandas/issues/8420
# https://github.com/pandas-dev/pandas/issues/14522
cat = Categorical(
["cheese", "milk", "apple", "bread", "bread"],
categories=["cheese", "milk", "apple", "bread"],
ordered=ordered,
)
ser = Series(cat)
# Searching for single item argument, side='left' (default)
res_cat = cat.searchsorted("apple")
assert res_cat == 2
assert is_scalar(res_cat)
res_ser = ser.searchsorted("apple")
assert res_ser == 2
assert is_scalar(res_ser)
# Searching for single item array, side='left' (default)
res_cat = cat.searchsorted(["bread"])
res_ser = ser.searchsorted(["bread"])
exp = np.array([3], dtype=np.intp)
tm.assert_numpy_array_equal(res_cat, exp)
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for several items array, side='right'
res_cat = cat.searchsorted(["apple", "bread"], side="right")
res_ser = ser.searchsorted(["apple", "bread"], side="right")
exp = np.array([3, 5], dtype=np.intp)
tm.assert_numpy_array_equal(res_cat, exp)
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for a single value that is not from the Categorical
with pytest.raises(KeyError, match="cucumber"):
cat.searchsorted("cucumber")
with pytest.raises(KeyError, match="cucumber"):
ser.searchsorted("cucumber")
# Searching for multiple values one of each is not from the Categorical
with pytest.raises(KeyError, match="cucumber"):
cat.searchsorted(["bread", "cucumber"])
with pytest.raises(KeyError, match="cucumber"):
ser.searchsorted(["bread", "cucumber"])
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = Index(["a", "b"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
tm.assert_categorical_equal(res, cat)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"], categories=["a", "b", "c"])
exp = Index(["c", "a", "b"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
exp_cat = Categorical(exp, categories=["c", "a", "b"])
tm.assert_categorical_equal(res, exp_cat)
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"], categories=["a", "b", "c"])
res = cat.unique()
exp = Index(["b", "a"])
tm.assert_index_equal(res.categories, exp)
exp_cat = Categorical(["b", np.nan, "a"], categories=["b", "a"])
tm.assert_categorical_equal(res, exp_cat)
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(["b", "a", "b"], categories=["a", "b"], ordered=True)
res = cat.unique()
exp_cat = Categorical(["b", "a"], categories=["a", "b"], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(
["c", "b", "a", "a"], categories=["a", "b", "c"], ordered=True
)
res = cat.unique()
exp_cat = Categorical(["c", "b", "a"], categories=["a", "b", "c"], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(["b", "a", "a"], categories=["a", "b", "c"], ordered=True)
res = cat.unique()
exp_cat = Categorical(["b", "a"], categories=["a", "b"], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(
["b", "b", np.nan, "a"], categories=["a", "b", "c"], ordered=True
)
res = cat.unique()
exp_cat = Categorical(["b", np.nan, "a"], categories=["a", "b"], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
def test_unique_index_series(self):
c = Categorical([3, 1, 2, 2, 1], categories=[3, 2, 1])
# Categorical.unique sorts categories by appearance order
# if ordered=False
exp = Categorical([3, 1, 2], categories=[3, 1, 2])
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(Series(c).unique(), exp)
c = Categorical([1, 1, 2, 2], categories=[3, 2, 1])
exp = Categorical([1, 2], categories=[1, 2])
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(Series(c).unique(), exp)
c = Categorical([3, 1, 2, 2, 1], categories=[3, 2, 1], ordered=True)
# Categorical.unique keeps categories order if ordered=True
exp = Categorical([3, 1, 2], categories=[3, 2, 1], ordered=True)
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(Series(c).unique(), exp)
def test_shift(self):
# GH 9416
cat = Categorical(["a", "b", "c", "d", "a"])
# shift forward
sp1 = cat.shift(1)
xp1 = Categorical([np.nan, "a", "b", "c", "d"])
tm.assert_categorical_equal(sp1, xp1)
| tm.assert_categorical_equal(cat[:-1], sp1[1:]) | pandas._testing.assert_categorical_equal |
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from openpyxl import load_workbook
def proccesClassification(allData,split_tt):
for index, row in allData.iterrows():
if row ['PRODI'] == "TEKNIK ELEKTRO":
rs = 15
piechartName = "TEKNIK ELEKTRO"
elif row ['PRODI'] == "TEKNIK INDUSTRI":
rs = 37
piechartName = "TEKNIK INDUSTRI"
elif row ['PRODI'] == "TEKNIK INFORMATIKA":
rs = 32
piechartName = "TEKNIK INFORMATIKA"
elif row ['PRODI'] == "TEKNIK KIMIA":
rs = 13
piechartName = "TEKNIK KIMIA"
model = GaussianNB()
st.write("***Data Training dan Testing***")
x = allData[['ASAL SEKOLAH', 'PROVINSI', 'KUANT. MATE']]
st.write(x)
st.write("***Data Target***")
y = allData['STATUS KELULUSAN']
st.write(y)
st.write(y.shape)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = split_tt, random_state=rs) # 52019, 1230, 7, 12/13/14/15, industri = 37, elektro = 15, informatika = 32, kimia = 13, test = 230
nbtrain = model.fit(x_train, y_train)
st.markdown('***DATA TRAINING : ***')
st.write(x_train)
st.write(x_train.shape)
st.markdown('***TARGET TRAINING : ***')
st.write(y_train)
st.write(y_train.shape)
st.markdown('***DATA TESTING : ***')
st.write(x_test)
st.write(x_test.shape)
st.markdown('***TARGET TESTING : ***')
st.write(y_test)
st.write(y_test.shape)
y_pred = nbtrain.predict(x_test)
st.write("***Hasil Prediksi***")
st.write(y_pred)
st.write(y_pred.shape)
st.write("***Data Porbabilitas Prediksi***")
predic_prob = nbtrain.predict_proba(x_test)
st.write(predic_prob)
# Confusion matrix
st.write("***Confusion Matrix***")
df_confusion = pd.crosstab(y_test, y_pred)
st.write(df_confusion)
report = classification_report(y_test, y_pred)
st.text(report)
hasilTest = pd.DataFrame(y_pred)
testTepat = hasilTest.apply(lambda x: True if x[0] == "TEPAT" else False , axis=1)
testTidakTepat = hasilTest.apply(lambda x: True if x[0] == "TIDAK TEPAT" else False , axis=1)
jumlahTestTepat = len(testTepat[testTepat == True].index)
jumlahTestTidakTepat = len(testTidakTepat[testTidakTepat == True].index)
Data = {piechartName: [jumlahTestTepat,jumlahTestTidakTepat]}
df = pd.DataFrame(Data,columns=[piechartName],index = ['Tepat','Tidak Tepat'])
df.plot.pie(y=piechartName,figsize=(10,6), autopct='%1.0f%%', startangle=60)
TestChart = plt.show()
st.pyplot(TestChart)
uploadDataUji = st.file_uploader("Choose a Excel file", type=['csv','xlsx'], key = 'b')
if uploadDataUji is not None:
st.write("***Data Mahasiswa***")
wb = load_workbook(uploadDataUji)
sheet_ranges = wb["Sheet1"]
model = GaussianNB()
datamhs = pd.DataFrame(sheet_ranges.values)
datamhs = datamhs[datamhs != 0]
jml_row = datamhs[0].count()
cleaning_mhs = datamhs[1:jml_row][[1,2,3,4,5,6]]
cleaning_mhs.columns = ['NIM', 'NAMA', 'ASAL SEKOLAH', 'PRODI', 'PROVINSI', 'RATA MATE']
# menghapus data noise
cleaning_mhs = cleaning_mhs.dropna(axis=0, how='any')
# mengubah tipe data dari object ke float
cleaning_mhs['RATA MATE'] = cleaning_mhs['RATA MATE'].apply(str)
cleaning_mhs['RATA MATE'] = cleaning_mhs['RATA MATE'].str.replace(',','.').apply(float)
for index, row in cleaning_mhs.iterrows():
# RATA MATE MATIKA
if row['RATA MATE'] >= 93 and row['RATA MATE'] <= 100:
cleaning_mhs.loc[index,'KUANT. MATE'] = 'SANGAT BAIK'
elif row['RATA MATE'] >= 84 and row['RATA MATE'] <= 92:
cleaning_mhs.loc[index,'KUANT. MATE'] = 'BAIK'
elif row['RATA MATE'] >= 75 and row['RATA MATE'] <= 83:
cleaning_mhs.loc[index,'KUANT. MATE'] = 'CUKUP'
elif row['RATA MATE'] >= 0 and row['RATA MATE'] <= 74:
cleaning_mhs.loc[index,'KUANT. MATE'] = 'PERLU DIMAKSIMALKAN'
cleaning_mhs
transformasi_mhs = cleaning_mhs[['NIM', 'NAMA','ASAL SEKOLAH', 'PROVINSI', 'KUANT. MATE']]
for index, row in transformasi_mhs.iterrows():
if 'Maluku Utara' in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '1'
elif 'Kalimantan Tengah' in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '1'
elif 'Banten' in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '2'
elif 'Yogyakarta' in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '2'
elif 'Gorontalo' in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '2'
elif 'Bengkulu'in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '2'
elif 'Kalimantan Selatan' in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '2'
elif 'Lampung' in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '2'
elif 'Sumatera' in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '2'
elif 'Riau' in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '2'
elif 'Sulawesi' in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '2'
elif 'Nusa Tenggara' in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '2'
elif 'Aceh' in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '2'
elif 'Bangka' in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '2'
elif 'Kalimantan Barat' in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '2'
elif 'Jawa' in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '2'
elif 'jawa' in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '2'
elif 'Jambi' in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '2'
elif 'Jakarta' in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '2'
elif 'Bali' in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '2'
elif 'Kalimantan Utara' in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '2'
elif 'Papua' in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '3'
elif 'Kalimantan Timur' in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '3'
elif 'Maluku' in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '3'
elif 'lain' in row['PROVINSI']:
transformasi_mhs.loc[index, 'PROVINSI'] = '3'
#ASAL SEOKLAH
if 'SMA' in row['ASAL SEKOLAH'] or 'sma' in row['ASAL SEKOLAH'] or 'Sma' in row['ASAL SEKOLAH'] or 'SMTA' in row['ASAL SEKOLAH']:
transformasi_mhs.loc[index, 'ASAL SEKOLAH'] = '1'
elif 'SMK' in row['ASAL SEKOLAH'] or 'smk' in row['ASAL SEKOLAH'] or 'Smk' in row['ASAL SEKOLAH'] or 'STM' in row['ASAL SEKOLAH'] or 'SMF' in row['ASAL SEKOLAH']:
transformasi_mhs.loc[index, 'ASAL SEKOLAH'] = '2'
elif 'MA' in row['ASAL SEKOLAH'] or 'Ma' in row['ASAL SEKOLAH']:
transformasi_mhs.loc[index, 'ASAL SEKOLAH'] = '3'
# RATA MATEMATIKA
if row['KUANT. MATE'] == "SANGAT BAIK":
transformasi_mhs.loc[index,'KUANT. MATE'] = 4
elif row['KUANT. MATE'] == "BAIK":
transformasi_mhs.loc[index,'KUANT. MATE'] = 3
elif row['KUANT. MATE'] == "CUKUP":
transformasi_mhs.loc[index,'KUANT. MATE'] = 2
elif row['KUANT. MATE'] == "PERLU DIMAKSIMALKAN":
transformasi_mhs.loc[index,'KUANT. MATE'] = 1
dataPrediksi = transformasi_mhs[['ASAL SEKOLAH', 'PROVINSI', 'KUANT. MATE']]
ass = cleaning_mhs[['NIM', 'NAMA','ASAL SEKOLAH', 'PROVINSI', 'KUANT. MATE']]
model.fit(x_train, y_train)
nbtrain = model.fit(x_train, y_train)
st.write(pd.DataFrame(transformasi_mhs))
dataPrediksi = pd.DataFrame(dataPrediksi)
# lbr = len(ddd.columns)
# st.write(lbr)
pjg = len(dataPrediksi)
dindex = []
for i in range(pjg):
dindex.append(int(i+1))
# ddd.set_axis(dindex,axis='index')
# st.write(ddd)
prediksi = model.predict(dataPrediksi)
prediksi = pd.DataFrame(prediksi)
prediksi.set_axis(dindex,axis='index')
prediksi_prob = nbtrain.predict_proba(dataPrediksi)
prediksi_prob = pd.DataFrame(prediksi_prob)
prediksi_prob.set_axis(dindex,axis='index')
df_index = | pd.merge(ass, prediksi_prob, right_index=True, left_index=True) | pandas.merge |
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import (
IS64,
is_platform_windows,
)
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
get_option,
option_context,
read_csv,
reset_option,
set_option,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or not IS64
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert isinstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split("\n")) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*format")
class TestDataFrameFormatting:
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(show_counts, result):
buf = StringIO()
df.info(buf=buf, show_counts=show_counts)
assert ("non-null" in buf.getvalue()) is result
with option_context(
"display.max_info_rows", 20, "display.max_info_columns", 20
):
check(None, True)
check(True, True)
check(False, False)
with option_context("display.max_info_rows", 5, "display.max_info_columns", 5):
check(None, False)
check(True, False)
check(False, False)
# GH37999
with tm.assert_produces_warning(
FutureWarning, match="null_counts is deprecated.+"
):
buf = StringIO()
df.info(buf=buf, null_counts=True)
assert "non-null" in buf.getvalue()
# GH37999
with pytest.raises(ValueError, match=r"null_counts used with show_counts.+"):
df.info(null_counts=True, show_counts=True)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame(
{
"A": np.random.randn(10),
"B": [
tm.rands(np.random.randint(max_len - 1, max_len + 1))
for i in range(10)
],
}
)
r = repr(df)
r = r[r.find("\n") + 1 :]
adj = fmt.get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
assert "..." in line
else:
assert "..." not in line
with option_context("display.max_colwidth", 999999):
assert "..." not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
def test_repr_deprecation_negative_int(self):
# TODO(2.0): remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/pandas-dev/pandas/issues/31532
width = get_option("display.max_colwidth")
with tm.assert_produces_warning(FutureWarning):
set_option("display.max_colwidth", -1)
set_option("display.max_colwidth", width)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
reset_option("display.chop_threshold") # default None
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(list(range(1000)))) < 100
with option_context("display.max_seq_items", 1):
assert len(printing.pprint_thing(list(range(1000)))) < 9
def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
df = DataFrame(np.random.randn(10, 4))
assert "\\" not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame("hello", index=[0], columns=[0])
df_wide = DataFrame("hello", index=[0], columns=range(10))
df_tall = DataFrame("hello", index=range(30), columns=range(5))
with option_context("mode.sim_interactive", True):
with option_context(
"display.max_columns",
10,
"display.width",
20,
"display.max_rows",
20,
"display.show_dimensions",
True,
):
with option_context("display.expand_frame_repr", True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context("display.expand_frame_repr", False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame("hello", index=range(1000), columns=range(5))
with option_context(
"mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
index = range(5)
columns = MultiIndex.from_tuples(
[
("This is a long title with > 37 chars.", "cat"),
("This is a loooooonger title with > 43 chars.", "dog"),
]
)
df = DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split("\n")[:2]
assert "long" in h1
assert "loooooonger" in h1
assert "cat" in h2
assert "dog" in h2
# regular columns
df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split("\n")[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = DataFrame(np.random.rand(1, 7))
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = DataFrame(
{
"a": [108480, 30830],
"b": [12345, 12345],
"c": [12345, 12345],
"d": [12345, 12345],
"e": ["a" * 50] * 2,
}
)
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip(f"terminal size too small, {term_width} x {term_height}")
def mkframe(n):
index = [f"{i:05d}" for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context("mode.sim_interactive", True):
with option_context("display.width", term_width * 2):
with option_context("display.max_rows", 5, "display.max_columns", 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context("display.max_rows", 20, "display.max_columns", 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context("display.max_rows", 9, "display.max_columns", 10):
# out vertical bounds can not result in expanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context(
"display.max_columns",
100,
"display.max_rows",
term_width * 20,
"display.width",
None,
):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_repr_min_rows(self):
df = DataFrame({"a": range(20)})
# default setting no truncation even if above min_rows
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
df = DataFrame({"a": range(61)})
# default of max_rows 60 triggers truncation if above
assert ".." in repr(df)
assert ".." in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(df)
assert "2 " not in repr(df)
assert "..." in df._repr_html_()
assert "<td>2</td>" not in df._repr_html_()
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(df)
assert "<td>5</td>" in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(df)
assert "<td>5</td>" not in df._repr_html_()
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
def test_str_max_colwidth(self):
# GH 7856
df = DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "uncomfortably long line with lots of stuff",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably long line with lots of stuff 1\n"
"1 foo bar stuff 1"
)
with option_context("max_colwidth", 20):
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably lo... 1\n"
"1 foo bar stuff 1"
)
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context("mode.sim_interactive", True):
with option_context("display.max_rows", None):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context("display.max_rows", None):
with option_context("display.max_columns", 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = ["\u03c3"] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({"unicode": unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(["abc", "\u03c3a", "aegdvg"])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split("\n")
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith("dtype:"):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_east_asian_unicode_false(self):
# not aligned properly because of east asian width
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あああああ あ\n"
"bb い いいい\nc う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\nあああ あああああ あ\n"
"いいいいいい い いいい\nうう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n0 あああああ ... さ\n"
".. ... ... ...\n3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\nあああ あああああ ... さ\n"
".. ... ... ...\naaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
def test_east_asian_unicode_true(self):
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\n"
"a あああああ あ\n"
"bb い いいい\n"
"c う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\n"
"あああ あああああ あ\n"
"いいいいいい い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n"
"0 あああああ ... さ\n"
".. ... ... ...\n"
"3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\n"
"あああ あああああ ... さ\n"
"... ... ... ...\n"
"aaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
# ambiguous unicode
df = DataFrame(
{"b": ["あ", "いいい", "¡¡", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "¡¡¡"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c ¡¡ 33333\n"
"¡¡¡ ええええええ 4"
)
assert repr(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({"c/\u03c3": Series(dtype=object)})
nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_with_column_specific_col_space_raises(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
msg = (
"Col_space length\\(\\d+\\) should match "
"DataFrame number of columns\\(\\d+\\)"
)
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40])
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40, 50, 60])
msg = "unknown column"
with pytest.raises(ValueError, match=msg):
df.to_string(col_space={"a": "foo", "b": 23, "d": 34})
def test_to_string_with_column_specific_col_space(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
result = df.to_string(col_space={"a": 10, "b": 11, "c": 12})
# 3 separating space + each col_space for (id, a, b, c)
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
result = df.to_string(col_space=[10, 11, 12])
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
def test_to_string_truncate_indices(self):
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr", False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (has_horizontally_truncated_repr(df))
with option_context(
"display.max_rows", 15, "display.max_columns", 15
):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(df)
def test_to_string_truncate_multilevel(self):
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series(
[datetime.datetime(2012, 1, 1)] * 10
+ [datetime.datetime(1012, 1, 2)]
+ [datetime.datetime(2012, 1, 3)] * 10
)
with option_context("display.max_rows", 8):
result = str(s)
assert "object" in result
# 12045
df = DataFrame({"text": ["some words"] + [None] * 9})
with option_context("display.max_rows", 8, "display.max_columns", 3):
result = str(df)
assert "None" in result
assert "NaN" not in result
def test_truncate_with_different_dtypes_multiindex(self):
# GH#13000
df = DataFrame({"Vals": range(100)})
frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
result = repr(frame)
result2 = repr(frame.iloc[:5])
assert result.startswith(result2)
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC")] + [NaT] * 5})
with option_context("display.max_rows", 5):
result = str(df)
assert "2013-01-01 00:00:00+00:00" in result
assert "NaT" in result
assert "..." in result
assert "[6 rows x 1 columns]" in result
dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [NaT] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00-05:00 1\n"
"1 2011-01-01 00:00:00-05:00 2\n"
".. ... ..\n"
"8 NaT 9\n"
"9 NaT 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 NaT 1\n"
"1 NaT 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
Timestamp("2011-01-01", tz="US/Eastern")
] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00+09:00 1\n"
"1 2011-01-01 00:00:00+09:00 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
df = DataFrame({"A": date_range(start=start_date, freq="D", periods=5)})
result = str(df)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
df = DataFrame({"A": range(5)}, index=dti)
result = str(df.index)
assert start_date in result
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split("\n")
assert len(lines[1]) == len(lines[2])
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({"c/\u03c3": Series({"test": np.nan})})
str(dm.to_string())
def test_string_repr_encoding(self, datapath):
filepath = datapath("io", "parser", "data", "unicode_series.csv")
df = read_csv(filepath, header=None, encoding="latin1")
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({"foo": [-np.inf, np.inf]})
repr(df)
def test_frame_info_encoding(self):
index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"]
fmt.set_option("display.max_rows", 1)
df = DataFrame(columns=["a", "b", "c"], index=index)
repr(df)
repr(df.T)
fmt.set_option("display.max_rows", 200)
def test_wide_repr(self):
with option_context(
"mode.sim_interactive",
True,
"display.show_dimensions",
True,
"display.max_columns",
20,
):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
assert f"10 rows x {max_cols - 1} columns" in rep_str
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 120):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_columns(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
df = DataFrame(
np.random.randn(5, 3), columns=["a" * 90, "b" * 90, "c" * 90]
)
rep_str = repr(df)
assert len(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = "DataFrame Index"
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "DataFrame Index" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)), index=midx)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "Level 0 Level 1" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex_cols(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(
tm.rands_array(25, (10, max_cols - 1)), index=midx, columns=mcols
)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150, "display.max_columns", 20):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_unicode(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_long_columns(self):
with option_context("mode.sim_interactive", True):
df = DataFrame({"a": ["a" * 30, "b" * 30], "b": ["c" * 70, "d" * 80]})
result = repr(df)
assert "ccccc" in result
assert "ddddd" in result
def test_long_series(self):
n = 1000
s = Series(
np.random.randint(-50, 50, n),
index=[f"s{x:04d}" for x in range(n)],
dtype="int64",
)
import re
str_rep = str(s)
nmatches = len(re.findall("dtype", str_rep))
assert nmatches == 1
def test_index_with_nan(self):
# GH 2850
df = DataFrame(
{
"id1": {0: "1a3", 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: "78d", 1: "79d"},
"value": {0: 123, 1: 64},
}
)
# multi-index
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# index
y = df.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nd67 9h4 79d 64"
)
assert result == expected
# with append (this failed in 0.12)
y = df.set_index(["id1", "id2"]).set_index("id3", append=True)
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# all-nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nNaN 9h4 79d 64"
)
assert result == expected
# partial nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index(["id2", "id3"])
result = y.to_string()
expected = (
" id1 value\nid2 id3 \n"
"NaN 78d 1a3 123\n 79d 9h4 64"
)
assert result == expected
df = DataFrame(
{
"id1": {0: np.nan, 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: np.nan, 1: "79d"},
"value": {0: 123, 1: 64},
}
)
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"NaN NaN NaN 123\n9h4 d67 79d 64"
)
assert result == expected
def test_to_string(self):
# big mixed
biggie = DataFrame(
{"A": np.random.randn(200), "B": tm.makeStringIndex(200)},
index=np.arange(200),
)
biggie.loc[:20, "A"] = np.nan
biggie.loc[:20, "B"] = np.nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
# print in right order
result = biggie.to_string(
columns=["B", "A"], col_space=17, float_format="%.5f".__mod__
)
lines = result.split("\n")
header = lines[0].strip().split()
joined = "\n".join([re.sub(r"\s+", " ", x).strip() for x in lines[1:]])
recons = read_csv(StringIO(joined), names=header, header=None, sep=" ")
tm.assert_series_equal(recons["B"], biggie["B"])
assert recons["A"].count() == biggie["A"].count()
assert (np.abs(recons["A"].dropna() - biggie["A"].dropna()) < 0.1).all()
# expected = ['B', 'A']
# assert header == expected
result = biggie.to_string(columns=["A"], col_space=17)
header = result.split("\n")[0].strip().split()
expected = ["A"]
assert header == expected
biggie.to_string(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"})
biggie.to_string(columns=["B", "A"], float_format=str)
biggie.to_string(columns=["B", "A"], col_space=12, float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_string()
def test_to_string_no_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
def test_to_string_specified_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=["X", "Y"])
expected = " X Y\n0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
msg = "Writing 2 cols but got 1 aliases"
with pytest.raises(ValueError, match=msg):
df.to_string(header=["X"])
def test_to_string_no_index(self):
# GH 16839, GH 13032
df = DataFrame({"x": [11, 22], "y": [33, -44], "z": ["AAA", " "]})
df_s = df.to_string(index=False)
# Leading space is expected for positive numbers.
expected = " x y z\n11 33 AAA\n22 -44 "
assert df_s == expected
df_s = df[["y", "x", "z"]].to_string(index=False)
expected = " y x z\n 33 11 AAA\n-44 22 "
assert df_s == expected
def test_to_string_line_width_no_index(self):
# GH 13998, GH 22505
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n11 \n22 \n33 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 "
assert df_s == expected
def test_to_string_float_formatting(self):
tm.reset_display_options()
fmt.set_option(
"display.precision",
5,
"display.column_space",
12,
"display.notebook_repr_html",
False,
)
df = DataFrame(
{"x": [0, 0.25, 3456.000, 12e45, 1.64e6, 1.7e8, 1.253456, np.pi, -1e6]}
)
df_s = df.to_string()
if _three_digit_exp():
expected = (
" x\n0 0.00000e+000\n1 2.50000e-001\n"
"2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n"
"5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n"
"8 -1.00000e+006"
)
else:
expected = (
" x\n0 0.00000e+00\n1 2.50000e-01\n"
"2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n"
"5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n"
"8 -1.00000e+06"
)
assert df_s == expected
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string()
expected = " x\n0 3234.000\n1 0.253"
assert df_s == expected
tm.reset_display_options()
assert get_option("display.precision") == 6
df = DataFrame({"x": [1e9, 0.2512]})
df_s = df.to_string()
if _three_digit_exp():
expected = " x\n0 1.000000e+009\n1 2.512000e-001"
else:
expected = " x\n0 1.000000e+09\n1 2.512000e-01"
assert df_s == expected
def test_to_string_float_format_no_fixed_width(self):
# GH 21625
df = DataFrame({"x": [0.19999]})
expected = " x\n0 0.200"
assert df.to_string(float_format="%.3f") == expected
# GH 22270
df = DataFrame({"x": [100.0]})
expected = " x\n0 100"
assert df.to_string(float_format="%.0f") == expected
def test_to_string_small_float_values(self):
df = DataFrame({"a": [1.5, 1e-17, -5.5e-7]})
result = df.to_string()
# sadness per above
if _three_digit_exp():
expected = (
" a\n"
"0 1.500000e+000\n"
"1 1.000000e-017\n"
"2 -5.500000e-007"
)
else:
expected = (
" a\n"
"0 1.500000e+00\n"
"1 1.000000e-17\n"
"2 -5.500000e-07"
)
assert result == expected
# but not all exactly zero
df = df * 0
result = df.to_string()
expected = " 0\n0 0\n1 0\n2 -0"
def test_to_string_float_index(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.arange(5), index=index)
result = df.to_string()
expected = " 0\n1.5 0\n2.0 1\n3.0 2\n4.0 3\n5.0 4"
assert result == expected
def test_to_string_complex_float_formatting(self):
# GH #25514, 25745
with option_context("display.precision", 5):
df = DataFrame(
{
"x": [
(0.4467846931321966 + 0.0715185102060818j),
(0.2739442392974528 + 0.23515228785438969j),
(0.26974928742135185 + 0.3250604054898979j),
(-1j),
]
}
)
result = df.to_string()
expected = (
" x\n0 0.44678+0.07152j\n"
"1 0.27394+0.23515j\n"
"2 0.26975+0.32506j\n"
"3 -0.00000-1.00000j"
)
assert result == expected
def test_to_string_ascii_error(self):
data = [
(
"0 ",
" .gitignore ",
" 5 ",
" \xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2",
)
]
df = DataFrame(data)
# it works!
repr(df)
def test_to_string_int_formatting(self):
df = DataFrame({"x": [-15, 20, 25, -35]})
assert issubclass(df["x"].dtype.type, np.integer)
output = df.to_string()
expected = " x\n0 -15\n1 20\n2 25\n3 -35"
assert output == expected
def test_to_string_index_formatter(self):
df = DataFrame([range(5), range(5, 10), range(10, 15)])
rs = df.to_string(formatters={"__index__": lambda x: "abc"[x]})
xp = """\
0 1 2 3 4
a 0 1 2 3 4
b 5 6 7 8 9
c 10 11 12 13 14\
"""
assert rs == xp
def test_to_string_left_justify_cols(self):
tm.reset_display_options()
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string(justify="left")
expected = " x \n0 3234.000\n1 0.253"
assert df_s == expected
def test_to_string_format_na(self):
tm.reset_display_options()
df = DataFrame(
{
"A": [np.nan, -1, -2.1234, 3, 4],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0000 foo\n"
"2 -2.1234 foooo\n"
"3 3.0000 fooooo\n"
"4 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [np.nan, -1.0, -2.0, 3.0, 4.0],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0 foo\n"
"2 -2.0 foooo\n"
"3 3.0 fooooo\n"
"4 4.0 bar"
)
assert result == expected
def test_to_string_format_inf(self):
# Issue #24861
tm.reset_display_options()
df = DataFrame(
{
"A": [-np.inf, np.inf, -1, -2.1234, 3, 4],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0000 foo\n"
"3 -2.1234 foooo\n"
"4 3.0000 fooooo\n"
"5 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [-np.inf, np.inf, -1.0, -2.0, 3.0, 4.0],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0 foo\n"
"3 -2.0 foooo\n"
"4 3.0 fooooo\n"
"5 4.0 bar"
)
assert result == expected
def test_to_string_decimal(self):
# Issue #23614
df = DataFrame({"A": [6.0, 3.1, 2.2]})
expected = " A\n0 6,0\n1 3,1\n2 2,2"
assert df.to_string(decimal=",") == expected
def test_to_string_line_width(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
s = df.to_string(line_width=80)
assert max(len(line) for line in s.split("\n")) == 80
def test_show_dimensions(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
True,
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
False,
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
with option_context(
"display.max_rows",
2,
"display.max_columns",
2,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
def test_repr_html(self, float_frame):
df = float_frame
df._repr_html_()
fmt.set_option("display.max_rows", 1, "display.max_columns", 1)
df._repr_html_()
fmt.set_option("display.notebook_repr_html", False)
df._repr_html_()
tm.reset_display_options()
df = DataFrame([[1, 2], [3, 4]])
fmt.set_option("display.show_dimensions", True)
assert "2 rows" in df._repr_html_()
fmt.set_option("display.show_dimensions", False)
assert "2 rows" not in df._repr_html_()
tm.reset_display_options()
def test_repr_html_mathjax(self):
df = DataFrame([[1, 2], [3, 4]])
assert "tex2jax_ignore" not in df._repr_html_()
with option_context("display.html.use_mathjax", False):
assert "tex2jax_ignore" in df._repr_html_()
def test_repr_html_wide(self):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
wide_df = DataFrame(tm.rands_array(25, size=(10, max_cols + 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in wide_df._repr_html_()
def test_repr_html_wide_multiindex_cols(self):
max_cols = 20
mcols = MultiIndex.from_product(
[np.arange(max_cols // 2), ["foo", "bar"]], names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
reg_repr = df._repr_html_()
assert "..." not in reg_repr
mcols = MultiIndex.from_product(
(np.arange(1 + (max_cols // 2)), ["foo", "bar"]), names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_repr_html_long(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert str(41 + max_rows // 2) in reg_repr
h = max_rows + 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
long_repr = df._repr_html_()
assert ".." in long_repr
assert str(41 + max_rows // 2) not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_float(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert f"<td>{40 + h}</td>" in reg_repr
h = max_rows + 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
long_repr = df._repr_html_()
assert ".." in long_repr
assert "<td>31</td>" not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_long_multiindex(self):
max_rows = 60
max_L1 = max_rows // 2
tuples = list(itertools.product(np.arange(max_L1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(np.random.randn(max_L1 * 2, 2), index=idx, columns=["A", "B"])
with option_context("display.max_rows", 60, "display.max_columns", 20):
reg_repr = df._repr_html_()
assert "..." not in reg_repr
tuples = list(itertools.product(np.arange(max_L1 + 1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(
np.random.randn((max_L1 + 1) * 2, 2), index=idx, columns=["A", "B"]
)
long_repr = df._repr_html_()
assert "..." in long_repr
def test_repr_html_long_and_wide(self):
max_cols = 20
max_rows = 60
h, w = max_rows - 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
h, w = max_rows + 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_info_repr(self):
# GH#21746 For tests inside a terminal (i.e. not CI) we need to detect
# the terminal size to ensure that we try to print something "too big"
term_width, term_height = get_terminal_size()
max_rows = 60
max_cols = 20 + (max(term_width, 80) - 80) // 4
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_vertically_truncated_repr(df)
with option_context("display.large_repr", "info"):
assert has_info_repr(df)
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_horizontally_truncated_repr(df)
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert has_info_repr(df)
def test_info_repr_max_cols(self):
# GH #6939
df = DataFrame(np.random.randn(10, 5))
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
4,
):
assert has_non_verbose_info_repr(df)
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
5,
):
assert not has_non_verbose_info_repr(df)
# test verbose overrides
# fmt.set_option('display.max_info_columns', 4) # exceeded
def test_info_repr_html(self):
max_rows = 60
max_cols = 20
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert r"<class" not in df._repr_html_()
with option_context("display.large_repr", "info"):
assert r"<class" in df._repr_html_()
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert "<class" not in df._repr_html_()
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert "<class" in df._repr_html_()
def test_fake_qtconsole_repr_html(self, float_frame):
df = float_frame
def get_ipython():
return {"config": {"KernelApp": {"parent_appname": "ipython-qtconsole"}}}
repstr = df._repr_html_()
assert repstr is not None
fmt.set_option("display.max_rows", 5, "display.max_columns", 2)
repstr = df._repr_html_()
assert "class" in repstr # info fallback
tm.reset_display_options()
def test_pprint_pathological_object(self):
"""
If the test fails, it at least won't hang.
"""
class A:
def __getitem__(self, key):
return 3 # obviously simplified
df = DataFrame([A()])
repr(df) # just don't die
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
skip = True
for line in repr(DataFrame({"A": vals})).split("\n")[:-2]:
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert ("+010" in line) or skip
else:
assert ("+10" in line) or skip
skip = False
@pytest.mark.parametrize(
"data, expected",
[
(["3.50"], "0 3.50\ndtype: object"),
([1.20, "1.00"], "0 1.2\n1 1.00\ndtype: object"),
([np.nan], "0 NaN\ndtype: float64"),
([None], "0 None\ndtype: object"),
(["3.50", np.nan], "0 3.50\n1 NaN\ndtype: object"),
([3.50, np.nan], "0 3.5\n1 NaN\ndtype: float64"),
([3.50, np.nan, "3.50"], "0 3.5\n1 NaN\n2 3.50\ndtype: object"),
([3.50, None, "3.50"], "0 3.5\n1 None\n2 3.50\ndtype: object"),
],
)
def test_repr_str_float_truncation(self, data, expected):
# GH#38708
series = Series(data)
result = repr(series)
assert result == expected
@pytest.mark.parametrize(
"float_format,expected",
[
("{:,.0f}".format, "0 1,000\n1 test\ndtype: object"),
("{:.4f}".format, "0 1000.0000\n1 test\ndtype: object"),
],
)
def test_repr_float_format_in_object_col(self, float_format, expected):
# GH#40024
df = Series([1000.0, "test"])
with option_context("display.float_format", float_format):
result = repr(df)
assert result == expected
def test_dict_entries(self):
df = DataFrame({"A": [{"a": 1, "b": 2}]})
val = df.to_string()
assert "'a': 1" in val
assert "'b': 2" in val
def test_categorical_columns(self):
# GH35439
data = [[4, 2], [3, 2], [4, 3]]
cols = ["aaaaaaaaa", "b"]
df = DataFrame(data, columns=cols)
df_cat_cols = DataFrame(data, columns=pd.CategoricalIndex(cols))
assert df.to_string() == df_cat_cols.to_string()
def test_period(self):
# GH 12615
df = DataFrame(
{
"A": pd.period_range("2013-01", periods=4, freq="M"),
"B": [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02-01", freq="D"),
pd.Period("2011-03-01 09:00", freq="H"),
pd.Period("2011-04", freq="M"),
],
"C": list("abcd"),
}
)
exp = (
" A B C\n"
"0 2013-01 2011-01 a\n"
"1 2013-02 2011-02-01 b\n"
"2 2013-03 2011-03-01 09:00 c\n"
"3 2013-04 2011-04 d"
)
assert str(df) == exp
@pytest.mark.parametrize(
"length, max_rows, min_rows, expected",
[
(10, 10, 10, 10),
(10, 10, None, 10),
(10, 8, None, 8),
(20, 30, 10, 30), # max_rows > len(frame), hence max_rows
(50, 30, 10, 10), # max_rows < len(frame), hence min_rows
(100, 60, 10, 10), # same
(60, 60, 10, 60), # edge case
(61, 60, 10, 10), # edge case
],
)
def test_max_rows_fitted(self, length, min_rows, max_rows, expected):
"""Check that display logic is correct.
GH #37359
See description here:
https://pandas.pydata.org/docs/dev/user_guide/options.html#frequently-used-options
"""
formatter = fmt.DataFrameFormatter(
DataFrame(np.random.rand(length, 3)),
max_rows=max_rows,
min_rows=min_rows,
)
result = formatter.max_rows_fitted
assert result == expected
def gen_series_formatting():
s1 = Series(["a"] * 100)
s2 = Series(["ab"] * 100)
s3 = Series(["a", "ab", "abc", "abcd", "abcde", "abcdef"])
s4 = s3[::-1]
test_sers = {"onel": s1, "twol": s2, "asc": s3, "desc": s4}
return test_sers
class TestSeriesFormatting:
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
def test_repr_unicode(self):
s = Series(["\u03c3"] * 10)
repr(s)
a = Series(["\u05d0"] * 1000)
a.name = "title1"
repr(a)
def test_to_string(self):
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
assert retval is None
assert buf.getvalue().strip() == s
# pass float_format
format = "%.4f".__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split("\n")[:-1]]
expected = [format(x) for x in self.ts]
assert result == expected
# empty string
result = self.ts[:0].to_string()
assert result == "Series([], Freq: B)"
result = self.ts[:0].to_string(length=0)
assert result == "Series([], Freq: B)"
# name and length
cp = self.ts.copy()
cp.name = "foo"
result = cp.to_string(length=True, name=True, dtype=True)
last_line = result.split("\n")[-1].strip()
assert last_line == (f"Freq: B, Name: foo, Length: {len(cp)}, dtype: float64")
def test_freq_name_separation(self):
s = Series(
np.random.randn(10), index=date_range("1/1/2000", periods=10), name=0
)
result = repr(s)
assert "Freq: D, Name: 0" in result
def test_to_string_mixed(self):
s = Series(["foo", np.nan, -1.23, 4.56])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 -1.23\n" + "3 4.56"
assert result == expected
# but don't count NAs as floats
s = Series(["foo", np.nan, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 bar\n" + "3 baz"
assert result == expected
s = Series(["foo", 5, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 5\n" + "2 bar\n" + "3 baz"
assert result == expected
def test_to_string_float_na_spacing(self):
s = Series([0.0, 1.5678, 2.0, -3.0, 4.0])
s[::2] = np.nan
result = s.to_string()
expected = (
"0 NaN\n"
+ "1 1.5678\n"
+ "2 NaN\n"
+ "3 -3.0000\n"
+ "4 NaN"
)
assert result == expected
def test_to_string_without_index(self):
# GH 11729 Test index=False option
s = Series([1, 2, 3, 4])
result = s.to_string(index=False)
expected = "1\n" + "2\n" + "3\n" + "4"
assert result == expected
def test_unicode_name_in_footer(self):
s = Series([1, 2], name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf = fmt.SeriesFormatter(s, name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf._get_footer() # should not raise exception
def test_east_asian_unicode_series(self):
# not aligned properly because of east asian width
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = "あ a\nいい bb\nううう CCC\nええええ D\ndtype: object"
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = "a あ\nbb いい\nc ううう\nddd ええええ\ndtype: object"
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\nいいいい いい\nう ううう\nえええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"], name="おおおおおおお"
)
expected = (
"ああ あ\nいいいい いい\nう ううう\n"
"えええ ええええ\nName: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\nあああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444], index=[1, "AB", Timestamp("2011-01-01"), "あああ"]
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.max_rows", 3):
s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = (
"あ a\nいい bb\nううう CCC\n"
"ええええ D\ndtype: object"
)
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = (
"a あ\nbb いい\nc ううう\n"
"ddd ええええ\ndtype: object"
)
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"],
index=["ああ", "いいいい", "う", "えええ"],
name="おおおおおおお",
)
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\n"
"Name: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\n"
"dtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444],
index=[1, "AB", Timestamp("2011-01-01"), "あああ"],
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.max_rows", 3):
s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n"
" ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# ambiguous unicode
s = Series(
["¡¡", "い¡¡", "ううう", "ええええ"], index=["ああ", "¡¡¡¡いい", "¡¡", "えええ"]
)
expected = (
"ああ ¡¡\n"
"¡¡¡¡いい い¡¡\n"
"¡¡ ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
for line in repr(Series(vals)).split("\n"):
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert "+010" in line
else:
assert "+10" in line
def test_datetimeindex(self):
index = date_range("20130102", periods=6)
s = Series(1, index=index)
result = s.to_string()
assert "2013-01-02" in result
# nat in index
s2 = Series(2, index=[Timestamp("20130111"), NaT])
s = s2.append(s)
result = s.to_string()
assert "NaT" in result
# nat in summary
result = str(s2.index)
assert "NaT" in result
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
s1 = Series(date_range(start=start_date, freq="D", periods=5))
result = str(s1)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
s2 = Series(3, index=dti)
result = str(s2.index)
assert start_date in result
def test_timedelta64(self):
from datetime import (
datetime,
timedelta,
)
Series(np.array([1100, 20], dtype="timedelta64[ns]")).to_string()
s = Series(date_range("2012-1-1", periods=3, freq="D"))
# GH2146
# adding NaTs
y = s - s.shift(1)
result = y.to_string()
assert "1 days" in result
assert "00:00:00" not in result
assert "NaT" in result
# with frac seconds
o = Series([datetime(2012, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +23:59:59.999850" in result
# rounding?
o = Series([datetime(2012, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +23:00:00" in result
assert "1 days 23:00:00" in result
o = Series([datetime(2012, 1, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +22:59:00" in result
assert "1 days 22:59:00" in result
o = Series([datetime(2012, 1, 1, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +22:58:59.999850" in result
assert "0 days 22:58:59.999850" in result
# neg time
td = timedelta(minutes=5, seconds=3)
s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td
y = s - s2
result = y.to_string()
assert "-1 days +23:54:57" in result
td = timedelta(microseconds=550)
s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td
y = s - td
result = y.to_string()
assert "2012-01-01 23:59:59.999450" in result
# no boxing of the actual elements
td = Series(pd.timedelta_range("1 days", periods=3))
result = td.to_string()
assert result == "0 1 days\n1 2 days\n2 3 days"
def test_mixed_datetime64(self):
df = DataFrame({"A": [1, 2], "B": ["2012-01-01", "2012-01-02"]})
df["B"] = pd.to_datetime(df.B)
result = repr(df.loc[0])
assert "2012-01-01" in result
def test_period(self):
# GH 12615
index = pd.period_range("2013-01", periods=6, freq="M")
s = Series(np.arange(6, dtype="int64"), index=index)
exp = (
"2013-01 0\n"
"2013-02 1\n"
"2013-03 2\n"
"2013-04 3\n"
"2013-05 4\n"
"2013-06 5\n"
"Freq: M, dtype: int64"
)
assert str(s) == exp
s = Series(index)
exp = (
"0 2013-01\n"
"1 2013-02\n"
"2 2013-03\n"
"3 2013-04\n"
"4 2013-05\n"
"5 2013-06\n"
"dtype: period[M]"
)
assert str(s) == exp
# periods with mixed freq
s = Series(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02-01", freq="D"),
pd.Period("2011-03-01 09:00", freq="H"),
]
)
exp = (
"0 2011-01\n1 2011-02-01\n"
"2 2011-03-01 09:00\ndtype: object"
)
assert str(s) == exp
def test_max_multi_index_display(self):
# GH 7101
# doc example (indexing.rst)
# multi-index
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
tuples = list(zip(*arrays))
index = MultiIndex.from_tuples(tuples, names=["first", "second"])
s = Series(np.random.randn(8), index=index)
with option_context("display.max_rows", 10):
assert len(str(s).split("\n")) == 10
with option_context("display.max_rows", 3):
assert len(str(s).split("\n")) == 5
with option_context("display.max_rows", 2):
assert len(str(s).split("\n")) == 5
with option_context("display.max_rows", 1):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 0):
assert len(str(s).split("\n")) == 10
# index
s = Series(np.random.randn(8), None)
with option_context("display.max_rows", 10):
assert len(str(s).split("\n")) == 9
with option_context("display.max_rows", 3):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 2):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 1):
assert len(str(s).split("\n")) == 3
with option_context("display.max_rows", 0):
assert len(str(s).split("\n")) == 9
# Make sure #8532 is fixed
def test_consistent_format(self):
s = Series([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9999, 1, 1] * 10)
with option_context("display.max_rows", 10, "display.show_dimensions", False):
res = repr(s)
exp = (
"0 1.0000\n1 1.0000\n2 1.0000\n3 "
"1.0000\n4 1.0000\n ... \n125 "
"1.0000\n126 1.0000\n127 0.9999\n128 "
"1.0000\n129 1.0000\ndtype: float64"
)
assert res == exp
def chck_ncols(self, s):
with option_context("display.max_rows", 10):
res = repr(s)
lines = res.split("\n")
lines = [
line for line in repr(s).split("\n") if not re.match(r"[^\.]*\.+", line)
][:-1]
ncolsizes = len({len(line.strip()) for line in lines})
assert ncolsizes == 1
def test_format_explicit(self):
test_sers = gen_series_formatting()
with option_context("display.max_rows", 4, "display.show_dimensions", False):
res = repr(test_sers["onel"])
exp = "0 a\n1 a\n ..\n98 a\n99 a\ndtype: object"
assert exp == res
res = repr(test_sers["twol"])
exp = "0 ab\n1 ab\n ..\n98 ab\n99 ab\ndtype: object"
assert exp == res
res = repr(test_sers["asc"])
exp = (
"0 a\n1 ab\n ... \n4 abcde\n5 "
"abcdef\ndtype: object"
)
assert exp == res
res = repr(test_sers["desc"])
exp = (
"5 abcdef\n4 abcde\n ... \n1 ab\n0 "
"a\ndtype: object"
)
assert exp == res
def test_ncols(self):
test_sers = gen_series_formatting()
for s in test_sers.values():
self.chck_ncols(s)
def test_max_rows_eq_one(self):
s = Series(range(10), dtype="int64")
with option_context("display.max_rows", 1):
strrepr = repr(s).split("\n")
exp1 = ["0", "0"]
res1 = strrepr[0].split()
assert exp1 == res1
exp2 = [".."]
res2 = strrepr[1].split()
assert exp2 == res2
def test_truncate_ndots(self):
def getndots(s):
return len(re.match(r"[^\.]*(\.*)", s).groups()[0])
s = Series([0, 2, 3, 6])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace("\n", "")
assert getndots(strrepr) == 2
s = Series([0, 100, 200, 400])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace("\n", "")
assert getndots(strrepr) == 3
def test_show_dimensions(self):
# gh-7117
s = Series(range(5))
assert "Length" not in repr(s)
with option_context("display.max_rows", 4):
assert "Length" in repr(s)
with option_context("display.show_dimensions", True):
assert "Length" in repr(s)
with option_context("display.max_rows", 4, "display.show_dimensions", False):
assert "Length" not in repr(s)
def test_repr_min_rows(self):
s = Series(range(20))
# default setting no truncation even if above min_rows
assert ".." not in repr(s)
s = Series(range(61))
# default of max_rows 60 triggers truncation if above
assert ".." in repr(s)
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(s)
assert "2 " not in repr(s)
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(s)
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(s)
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(s)
def test_to_string_name(self):
s = Series(range(100), dtype="int64")
s.name = "myser"
res = s.to_string(max_rows=2, name=True)
exp = "0 0\n ..\n99 99\nName: myser"
assert res == exp
res = s.to_string(max_rows=2, name=False)
exp = "0 0\n ..\n99 99"
assert res == exp
def test_to_string_dtype(self):
s = Series(range(100), dtype="int64")
res = s.to_string(max_rows=2, dtype=True)
exp = "0 0\n ..\n99 99\ndtype: int64"
assert res == exp
res = s.to_string(max_rows=2, dtype=False)
exp = "0 0\n ..\n99 99"
assert res == exp
def test_to_string_length(self):
s = Series(range(100), dtype="int64")
res = s.to_string(max_rows=2, length=True)
exp = "0 0\n ..\n99 99\nLength: 100"
assert res == exp
def test_to_string_na_rep(self):
s = Series(index=range(100), dtype=np.float64)
res = s.to_string(na_rep="foo", max_rows=2)
exp = "0 foo\n ..\n99 foo"
assert res == exp
def test_to_string_float_format(self):
s = Series(range(10), dtype="float64")
res = s.to_string(float_format=lambda x: f"{x:2.1f}", max_rows=2)
exp = "0 0.0\n ..\n9 9.0"
assert res == exp
def test_to_string_header(self):
s = Series(range(10), dtype="int64")
s.index.name = "foo"
res = s.to_string(header=True, max_rows=2)
exp = "foo\n0 0\n ..\n9 9"
assert res == exp
res = s.to_string(header=False, max_rows=2)
exp = "0 0\n ..\n9 9"
assert res == exp
def test_to_string_multindex_header(self):
# GH 16718
df = DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]}).set_index(["a", "b"])
res = df.to_string(header=["r1", "r2"])
exp = " r1 r2\na b \n0 1 2 3"
assert res == exp
def test_to_string_empty_col(self):
# GH 13653
s = Series(["", "Hello", "World", "", "", "Mooooo", "", ""])
res = s.to_string(index=False)
exp = " \n Hello\n World\n \n \nMooooo\n \n "
assert re.match(exp, res)
class TestGenericArrayFormatter:
def test_1d_array(self):
# GenericArrayFormatter is used on types for which there isn't a dedicated
# formatter. np.bool_ is one of those types.
obj = fmt.GenericArrayFormatter(np.array([True, False]))
res = obj.get_result()
assert len(res) == 2
# Results should be right-justified.
assert res[0] == " True"
assert res[1] == " False"
def test_2d_array(self):
obj = fmt.GenericArrayFormatter(np.array([[True, False], [False, True]]))
res = obj.get_result()
assert len(res) == 2
assert res[0] == " [True, False]"
assert res[1] == " [False, True]"
def test_3d_array(self):
obj = fmt.GenericArrayFormatter(
np.array([[[True, True], [False, False]], [[False, True], [True, False]]])
)
res = obj.get_result()
assert len(res) == 2
assert res[0] == " [[True, True], [False, False]]"
assert res[1] == " [[False, True], [True, False]]"
def test_2d_extension_type(self):
# GH 33770
# Define a stub extension type with just enough code to run Series.__repr__()
class DtypeStub(pd.api.extensions.ExtensionDtype):
@property
def type(self):
return np.ndarray
@property
def name(self):
return "DtypeStub"
class ExtTypeStub(pd.api.extensions.ExtensionArray):
def __len__(self):
return 2
def __getitem__(self, ix):
return [ix == 1, ix == 0]
@property
def dtype(self):
return DtypeStub()
series = Series(ExtTypeStub())
res = repr(series) # This line crashed before #33770 was fixed.
expected = "0 [False True]\n" + "1 [ True False]\n" + "dtype: DtypeStub"
assert res == expected
def _three_digit_exp():
return f"{1.7e8:.4g}" == "1.7e+008"
class TestFloatArrayFormatter:
def test_misc(self):
obj = fmt.FloatArrayFormatter(np.array([], dtype=np.float64))
result = obj.get_result()
assert len(result) == 0
def test_format(self):
obj = fmt.FloatArrayFormatter(np.array([12, 0], dtype=np.float64))
result = obj.get_result()
assert result[0] == " 12.0"
assert result[1] == " 0.0"
def test_output_display_precision_trailing_zeroes(self):
# Issue #20359: trimming zeros while there is no decimal point
# Happens when display precision is set to zero
with option_context("display.precision", 0):
s = Series([840.0, 4200.0])
expected_output = "0 840\n1 4200\ndtype: float64"
assert str(s) == expected_output
def test_output_significant_digits(self):
# Issue #9764
# In case default display precision changes:
with option_context("display.precision", 6):
# DataFrame example from issue #9764
d = DataFrame(
{
"col1": [
9.999e-8,
1e-7,
1.0001e-7,
2e-7,
4.999e-7,
5e-7,
5.0001e-7,
6e-7,
9.999e-7,
1e-6,
1.0001e-6,
2e-6,
4.999e-6,
5e-6,
5.0001e-6,
6e-6,
]
}
)
expected_output = {
(0, 6): " col1\n"
"0 9.999000e-08\n"
"1 1.000000e-07\n"
"2 1.000100e-07\n"
"3 2.000000e-07\n"
"4 4.999000e-07\n"
"5 5.000000e-07",
(1, 6): " col1\n"
"1 1.000000e-07\n"
"2 1.000100e-07\n"
"3 2.000000e-07\n"
"4 4.999000e-07\n"
"5 5.000000e-07",
(1, 8): " col1\n"
"1 1.000000e-07\n"
"2 1.000100e-07\n"
"3 2.000000e-07\n"
"4 4.999000e-07\n"
"5 5.000000e-07\n"
"6 5.000100e-07\n"
"7 6.000000e-07",
(8, 16): " col1\n"
"8 9.999000e-07\n"
"9 1.000000e-06\n"
"10 1.000100e-06\n"
"11 2.000000e-06\n"
"12 4.999000e-06\n"
"13 5.000000e-06\n"
"14 5.000100e-06\n"
"15 6.000000e-06",
(9, 16): " col1\n"
"9 0.000001\n"
"10 0.000001\n"
"11 0.000002\n"
"12 0.000005\n"
"13 0.000005\n"
"14 0.000005\n"
"15 0.000006",
}
for (start, stop), v in expected_output.items():
assert str(d[start:stop]) == v
def test_too_long(self):
# GH 10451
with option_context("display.precision", 4):
# need both a number > 1e6 and something that normally formats to
# having length > display.precision + 6
df = DataFrame({"x": [12345.6789]})
assert str(df) == " x\n0 12345.6789"
df = DataFrame({"x": [2e6]})
assert str(df) == " x\n0 2000000.0"
df = DataFrame({"x": [12345.6789, 2e6]})
assert str(df) == " x\n0 1.2346e+04\n1 2.0000e+06"
class TestRepr_timedelta64:
def test_none(self):
delta_1d = pd.to_timedelta(1, unit="D")
delta_0d = pd.to_timedelta(0, unit="D")
delta_1s = pd.to_timedelta(1, unit="s")
delta_500ms = pd.to_timedelta(500, unit="ms")
drepr = lambda x: x._repr_base()
assert drepr(delta_1d) == "1 days"
assert drepr(-delta_1d) == "-1 days"
assert drepr(delta_0d) == "0 days"
assert drepr(delta_1s) == "0 days 00:00:01"
assert drepr(delta_500ms) == "0 days 00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_sub_day(self):
delta_1d = pd.to_timedelta(1, unit="D")
delta_0d = pd.to_timedelta(0, unit="D")
delta_1s = pd.to_timedelta(1, unit="s")
delta_500ms = pd.to_timedelta(500, unit="ms")
drepr = lambda x: x._repr_base(format="sub_day")
assert drepr(delta_1d) == "1 days"
assert drepr(-delta_1d) == "-1 days"
assert drepr(delta_0d) == "00:00:00"
assert drepr(delta_1s) == "00:00:01"
assert drepr(delta_500ms) == "00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_long(self):
delta_1d = pd.to_timedelta(1, unit="D")
delta_0d = pd.to_timedelta(0, unit="D")
delta_1s = pd.to_timedelta(1, unit="s")
delta_500ms = pd.to_timedelta(500, unit="ms")
drepr = lambda x: x._repr_base(format="long")
assert drepr(delta_1d) == "1 days 00:00:00"
assert drepr(-delta_1d) == "-1 days +00:00:00"
assert drepr(delta_0d) == "0 days 00:00:00"
assert drepr(delta_1s) == "0 days 00:00:01"
assert drepr(delta_500ms) == "0 days 00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_all(self):
delta_1d = pd.to_timedelta(1, unit="D")
delta_0d = pd.to_timedelta(0, unit="D")
delta_1ns = pd.to_timedelta(1, unit="ns")
drepr = lambda x: x._repr_base(format="all")
assert drepr(delta_1d) == "1 days 00:00:00.000000000"
assert drepr(-delta_1d) == "-1 days +00:00:00.000000000"
assert drepr(delta_0d) == "0 days 00:00:00.000000000"
assert drepr(delta_1ns) == "0 days 00:00:00.000000001"
assert drepr(-delta_1d + delta_1ns) == "-1 days +00:00:00.000000001"
class TestTimedelta64Formatter:
def test_days(self):
x = pd.to_timedelta(list(range(5)) + [NaT], unit="D")
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
assert result[1].strip() == "'1 days'"
result = fmt.Timedelta64Formatter(x[1:2], box=True).get_result()
assert result[0].strip() == "'1 days'"
result = fmt.Timedelta64Formatter(x, box=False).get_result()
assert result[0].strip() == "0 days"
assert result[1].strip() == "1 days"
result = fmt.Timedelta64Formatter(x[1:2], box=False).get_result()
assert result[0].strip() == "1 days"
def test_days_neg(self):
x = pd.to_timedelta(list(range(5)) + [NaT], unit="D")
result = fmt.Timedelta64Formatter(-x, box=True).get_result()
assert result[0].strip() == "'0 days'"
assert result[1].strip() == "'-1 days'"
def test_subdays(self):
y = pd.to_timedelta(list(range(5)) + [NaT], unit="s")
result = fmt.Timedelta64Formatter(y, box=True).get_result()
assert result[0].strip() == "'0 days 00:00:00'"
assert result[1].strip() == "'0 days 00:00:01'"
def test_subdays_neg(self):
y = pd.to_timedelta(list(range(5)) + [NaT], unit="s")
result = fmt.Timedelta64Formatter(-y, box=True).get_result()
assert result[0].strip() == "'0 days 00:00:00'"
assert result[1].strip() == "'-1 days +23:59:59'"
def test_zero(self):
x = pd.to_timedelta(list(range(1)) + [NaT], unit="D")
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
x = pd.to_timedelta(list(range(1)), unit="D")
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
class TestDatetime64Formatter:
def test_mixed(self):
x = Series([datetime(2013, 1, 1), datetime(2013, 1, 1, 12), NaT])
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 00:00:00"
assert result[1].strip() == "2013-01-01 12:00:00"
def test_dates(self):
x = Series([datetime(2013, 1, 1), datetime(2013, 1, 2), NaT])
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01"
assert result[1].strip() == "2013-01-02"
def test_date_nanos(self):
x = Series([Timestamp(200)])
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "1970-01-01 00:00:00.000000200"
def test_dates_display(self):
# 10170
# make sure that we are consistently display date formatting
x = Series(date_range("20130101 09:00:00", periods=5, freq="D"))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-05 09:00:00"
x = Series(date_range("20130101 09:00:00", periods=5, freq="s"))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:04"
x = Series(date_range("20130101 09:00:00", periods=5, freq="ms"))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00.000"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:00.004"
x = Series(date_range("20130101 09:00:00", periods=5, freq="us"))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00.000000"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:00.000004"
x = Series(date_range("20130101 09:00:00", periods=5, freq="N"))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00.000000000"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:00.000000004"
def test_datetime64formatter_yearmonth(self):
x = Series([datetime(2016, 1, 1), datetime(2016, 2, 2)])
def format_func(x):
return x.strftime("%Y-%m")
formatter = fmt.Datetime64Formatter(x, formatter=format_func)
result = formatter.get_result()
assert result == ["2016-01", "2016-02"]
def test_datetime64formatter_hoursecond(self):
x = Series(
pd.to_datetime(["10:10:10.100", "12:12:12.120"], format="%H:%M:%S.%f")
)
def format_func(x):
return x.strftime("%H:%M")
formatter = fmt.Datetime64Formatter(x, formatter=format_func)
result = formatter.get_result()
assert result == ["10:10", "12:12"]
class TestNaTFormatting:
def test_repr(self):
assert repr(NaT) == "NaT"
def test_str(self):
assert str(NaT) == "NaT"
class TestDatetimeIndexFormat:
def test_datetime(self):
formatted = pd.to_datetime([datetime(2003, 1, 1, 12), NaT]).format()
assert formatted[0] == "2003-01-01 12:00:00"
assert formatted[1] == "NaT"
def test_date(self):
formatted = pd.to_datetime([datetime(2003, 1, 1), NaT]).format()
assert formatted[0] == "2003-01-01"
assert formatted[1] == "NaT"
def test_date_tz(self):
formatted = pd.to_datetime([datetime(2013, 1, 1)], utc=True).format()
assert formatted[0] == "2013-01-01 00:00:00+00:00"
formatted = pd.to_datetime([datetime(2013, 1, 1), NaT], utc=True).format()
assert formatted[0] == "2013-01-01 00:00:00+00:00"
def test_date_explicit_date_format(self):
formatted = pd.to_datetime([datetime(2003, 2, 1), NaT]).format(
date_format="%m-%d-%Y", na_rep="UT"
)
assert formatted[0] == "02-01-2003"
assert formatted[1] == "UT"
class TestDatetimeIndexUnicode:
def test_dates(self):
text = str(pd.to_datetime([datetime(2013, 1, 1), datetime(2014, 1, 1)]))
assert "['2013-01-01'," in text
assert ", '2014-01-01']" in text
def test_mixed(self):
text = str(
pd.to_datetime(
[datetime(2013, 1, 1), datetime(2014, 1, 1, 12), datetime(2014, 1, 1)]
)
)
assert "'2013-01-01 00:00:00'," in text
assert "'2014-01-01 00:00:00']" in text
class TestStringRepTimestamp:
def test_no_tz(self):
dt_date = datetime(2013, 1, 2)
assert str(dt_date) == str(Timestamp(dt_date))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3)
assert str(dt_datetime) == str(Timestamp(dt_datetime))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45)
assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))
ts_nanos_only = Timestamp(200)
assert str(ts_nanos_only) == "1970-01-01 00:00:00.000000200"
ts_nanos_micros = Timestamp(1200)
assert str(ts_nanos_micros) == "1970-01-01 00:00:00.000001200"
def test_tz_pytz(self):
dt_date = datetime(2013, 1, 2, tzinfo=pytz.utc)
assert str(dt_date) == str(Timestamp(dt_date))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=pytz.utc)
assert str(dt_datetime) == str( | Timestamp(dt_datetime) | pandas.Timestamp |
"""
Script goal,
Test out the google earth engine to see what i can do
- find a landsat collection for a single point
"""
#==============================================================================
__title__ = "GEE Movie Maker"
__author__ = "<NAME>"
__version__ = "v1.0(04.04.2019)"
__email__ = "<EMAIL>"
#==============================================================================
# +++++ Check the paths and set ex path to fireflies folder +++++
import os
import sys
if not os.getcwd().endswith("fireflies"):
if "fireflies" in os.getcwd():
p1, p2, _ = os.getcwd().partition("fireflies")
os.chdir(p1+p2)
else:
raise OSError(
"This script was called from an unknown path. CWD can not be set"
)
sys.path.append(os.getcwd())
#==============================================================================
# Import packages
import numpy as np
import pandas as pd
import geopandas as gpd
import argparse
import datetime as dt
import warnings as warn
import xarray as xr
import bottleneck as bn
import scipy as sp
import glob
import time
from collections import OrderedDict
from scipy import stats
from numba import jit
# Import the Earth Engine Python Package
import ee
import ee.mapclient
from ee import batch
from geetools import batch as gee_batch
# from netCDF4 import Dataset, num2date, date2num
# from scipy import stats
# import statsmodels.stats.multitest as smsM
# Import plotting and colorpackages
import matplotlib.pyplot as plt
import matplotlib.colors as mpc
import matplotlib as mpl
import palettable
import fiona
fiona.drvsupport.supported_drivers['kml'] = 'rw' # enable KML support which is disabled by default
fiona.drvsupport.supported_drivers['KML'] = 'rw' # enable KML support which is disabled by default
# import seaborn as sns
# import cartopy.crs as ccrs
# import cartopy.feature as cpf
# from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import geopy.distance as geodis
import myfunctions.corefunctions as cf
# # Import debugging packages
# import socket
# print(socket.gethostname())
import ipdb
print("numpy version : ", np.__version__)
print("pandas version : ", pd.__version__)
print("xarray version : ", xr.__version__)
#==============================================================================
def main(args):
# ========== Initialize the Earth Engine object ==========
ee.Initialize()
# ========== Set an overwrite =========
force = False
cordf = True #force the creation of a new maskter coord list
tsite = args.site
cordg = True
# ========== Create the system specific paths ==========
sysname = os.uname()[1]
if sysname == 'DESKTOP-CSHARFM':
# LAPTOP
spath = "/mnt/c/Users/arden/Google Drive/UoL/FIREFLIES/VideoExports/"
elif sysname == "owner":
spath = "/mnt/c/Users/user/Google Drive/UoL/FIREFLIES/VideoExports/"
elif sysname == "ubuntu":
# Work PC
spath = "/media/ubuntu/Seagate Backup Plus Drive/Data51/VideoExports/"
else:
warn.warn("Paths not created for this computer")
# spath = "/media/ubuntu/Seagate Backup Plus Drive"
ipdb.set_trace()
cf.pymkdir(spath)
# ========== create the geometery ==========
cordname = "./data/other/GEE_sitelist.csv"
if not os.path.isfile(cordname) or cordf:
print("Generating and saving a new master coord table")
site_coords = geom_builder()
for col in site_coords.columns[1:]:
site_coords = site_coords.astype({col:float})
site_coords.to_csv(cordname)
else:
print("Loading master coord table")
site_coords = pd.read_csv(cordname, index_col=0)#, parse_dates=True
# warn.warn("THere is some form of bug here, going interactive. Look at the dataframe")
# ipdb.set_trace()
program = "LANDSAT"
cordf = True
# ========== Loop over each site ==========
for index, coords in site_coords.iterrows():
# ========== Check if the pathe and file exists ==========
checkfile = "%s%s/%s_%s_gridinfo.csv" % (spath, coords["name"], program, coords["name"])
if not args.site is None:
# check is the site is correct
if tsite == coords["name"]:
# ========== Get the start time ==========
t0 = | pd.Timestamp.now() | pandas.Timestamp.now |
import streamlit as st
import numpy as np
import pandas as pd
import sqlite3
conn=sqlite3.connect('data.db')
c=conn.cursor()
import os
import warnings
warnings.filterwarnings('ignore')
import tensorflow.keras as tf
import joblib
import base64
from io import BytesIO
ratings_1=pd.read_csv("ratings_1.csv")
ratings_2=pd.read_csv("ratings_2.csv")
ratings_3=pd.read_csv("ratings_3.csv")
ratings_4=pd.read_csv("ratings_4.csv")
ratings_5=pd.read_csv("ratings_5.csv")
ratings_df_list=[ratings_1,ratings_2,ratings_3,ratings_4,ratings_5]
ratings_df=pd.concat(ratings_df_list)
del ratings_1,ratings_2,ratings_3,ratings_4,ratings_5,ratings_df_list
new_model=tf.models.load_model("modelrecsys.h5")
co=joblib.load("contentsfile.joblib")
titlefile=joblib.load('title.joblib')
####To download dataframe recommondations
def to_excel(df):
output = BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1')
writer.save()
processed_data = output.getvalue()
return processed_data
def get_table_download_link(df):
#Generates a link allowing the data in a given panda dataframe to be downloaded
#in: dataframe
#out: href string
val = to_excel(df)
b64 = base64.b64encode(val) # val looks like b'...'
return f'<a href="data:application/octet-stream;base64,{b64.decode()}" download="extract.xlsx">Download csv file</a>' # decode b'abc' => abc
##df = ... # your dataframe
##st.markdown(get_table_download_link(df), unsafe_allow_html=True)
def create_usertable():
c.execute('CREATE TABLE IF NOT EXISTS userstable(username TEXT, password TEXT)')
def add_userdata(username,password):
c.execute('INSERT INTO userstable(username, password) VALUES(?,?)',(username,password))
conn.commit()
def login_user(username,password):
c.execute('SELECT * FROM userstable WHERE username=? AND password=?',(username,password))
data=c.fetchall()
return data
def view_all_users():
c.execute('SELECT * FROM userstable')
data=c.fetchall()
return data
st.title("...WELCOME...")
st.title("HYBRID BOOK RECOMMENDATION SYSTEM")
menu=["Home","Login", "Sign up","Book"]
choice=st.sidebar.selectbox("Menu",menu)
if choice=="Home":
st.subheader("HOME")
elif choice=="Login":
st.subheader("Login Section")
username=st.sidebar.text_input("username")
password=st.sidebar.text_input("password",type='password')
if st.sidebar.checkbox("Login"):
# if password=="<PASSWORD>":
create_usertable()
result=login_user(username,password)
if result:
st.success("LOGGED IN SUCCESSFULLY AS {} ".format(username))
task=st.selectbox("Task",["Help","Start-Analytics","Profile"])
if task=="Help":
st.subheader("use Start-Analytics for Reccomondations")
elif task=="Start-Analytics":
st.subheader("Top N number of Book Recommondations predicted realtime")
#user_id = st.number_input('user_id', min_value=1, max_value=53424, value=1)
user_id=st.text_input("Enter user_id {1-53424} default 1")
if user_id!="":
user_id=int(user_id)
if user_id<1 or user_id>53424:
user_id=1
else:
user_id=1
us_id_temp=[user_id for i in range(len(co['book_id']))]
reccom = new_model.predict([pd.Series(us_id_temp),co['book_id'],co.iloc[:,1:]])
recc_df=pd.DataFrame(reccom,columns=["rating"])
recc_df["book_id"]=co['book_id'].values
df_new=ratings_df.where(ratings_df["user_id"]==user_id)
df_new.dropna(inplace=True)
list_books_seen=df_new['book_id'].tolist()
del df_new
recc_df_table = recc_df[~recc_df.book_id.isin(list_books_seen)]
recc_df.sort_values(by="rating",ascending=False,inplace=True)
recc_df=recc_df.iloc[6:36].reset_index(drop=True)
#num= st.number_input('required_reccomondation_count', min_value=2, max_value=30, value=5)
num=st.text_input("Enter required_reccomondation_count (2-30) default 2")
if num!="":
num=int(num)
if num<2 or num>30:
num=2
else:
num=2
recc_df_table =recc_df.iloc[:num]
recc_df_table=pd.merge(recc_df_table,titlefile,left_on="book_id",right_on="book_id")
recc_df_table_new = recc_df_table.iloc[:,:6].reset_index(drop=True)
st.write(recc_df_table_new)
st.markdown(get_table_download_link(recc_df_table_new), unsafe_allow_html=True)
for i in range(len(recc_df_table_new.index)):
st.image( recc_df_table.iloc[i,7],
width=200, # Manually Adjust the width of the image as per requirement
caption=recc_df_table.iloc[i,4]
)
elif task=="Profile":
st.subheader("User Profiles")
user_result=view_all_users()
clean_db= | pd.DataFrame(user_result,columns=["Username","Password"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import scrapy # needed to scrape
import xlrd # used to easily import xlsx file
import json
import re
import pandas as pd
import numpy as np
from openpyxl import load_workbook
import datetime
from datetime import timedelta
##### NOTE
# PART 1: This script writes all monthly Ercot data to its own tab without affecting the "Master Data" tab
# PART 2: This script will perform analysis on the monthly data
### PART 1 - Clean Data
##########################################################################################
##########################################################################################
##########################################################################################
file_path = r"/Users/YoungFreeesh/Visual Studio Code/_Python/Web Scraping/Ercot/MASTER-Ercot.xlsx"
df = pd.read_excel(file_path, sheet_name = 'Master Data') # read all data from "Master Data" tab in the "MASTER-Ercot" workbook
headers = list(df.columns.values) # get the headers of "Master Data"
df = pd.DataFrame(df) # convert df to a Date Frame
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
### Get all Unique Months in the data frame
dateArray = np.array(df.iloc[:, 0]) # convert dates column in df to a numpy array
monthsArray = [] # initialize array
for x in range(dateArray.shape[0]): # change format of dates: 05/10/2018 --> 05-2018
temp = dateArray[x]
#print(str(temp[:2]) + '-' + str(temp[6:]))
#print(str(temp))
#monthsArrayUnique = np.unique(monthsArray) # Unique Months, hence get all unique strings of the form: 'mm-yyyy'
#print("Unique Months: ", monthsArrayUnique)
# if (str(temp[:2]) + '-' + str(temp[6:]) == "08-2018"):
# monthsArray.append("08-2018")
monthsArray.append(str(temp[:2]) + '-' + str(temp[6:]))
#print(monthsArray)
monthsArrayUnique = np.unique(monthsArray) # Unique Months, hence get all unique strings of the form: 'mm-yyyy'
print("Unique Months: ", monthsArrayUnique)
##### Use PANDAS to write to an Excel file and create tabs
##########################################################################################
#file_path_HardDrive = r"/Users/YoungFreeesh/Visual Studio Code/_Python/Web Scraping/Ercot/Test-Ercot-Scrape.xlsx"
#file_path_Dropbox = r"/Users/YoungFreeesh/Dropbox/Ercot Data/Test-Ercot-Scrape.xlsx"
file_path_HardDrive = r"/Users/YoungFreeesh/Visual Studio Code/_Python/Web Scraping/Ercot/MASTER-Ercot.xlsx"
file_path_Dropbox = r"/Users/YoungFreeesh/Dropbox/Ercot Data/MASTER-Ercot.xlsx"
### For Ercot Summary Page - Calculations (LZ_SOUTH)
# read all data from "Master Data" tab from "MASTER-Ercot"
dfMASTER = pd.read_excel(file_path_HardDrive, sheet_name = 'Master Data')
writer_HardDrive = pd.ExcelWriter(file_path_HardDrive, engine='openpyxl')
writer_Dropbox = pd.ExcelWriter(file_path_Dropbox , engine='openpyxl')
book_HardDrive = load_workbook(file_path_HardDrive)
book_Dropbox = load_workbook(file_path_Dropbox)
writer_HardDrive.book = book_HardDrive
writer_Dropbox.book = book_Dropbox
writer_HardDrive.sheets = dict((ws.title, ws) for ws in book_HardDrive.worksheets)
writer_Dropbox.sheets = dict((ws.title, ws) for ws in book_Dropbox.worksheets)
### Create a unique Excel Worksheet/tab for each month of data in the Master Date tab
# This tab will contain all the price data for that particular month
# This loop will create a tab for the month if it doesn't alreay exist
# This loop will overwrite any data already in the months tab in columns A-P
# This loop will not affect and data or formulas or graphs Beyond column Q
# This loop will not affect any other tabs
monthsArray = np.array(monthsArray) # convert monthsArray to a numpy array
for month in monthsArrayUnique:
print("Tab Created: ", month)
indices = [i for i, x in enumerate(monthsArray) if x == month] # get indices for month
monthDF = pd.DataFrame(data = np.array(df.iloc[indices, :]), columns = headers) # create data frame
monthDF.to_excel(writer_HardDrive, startrow= 0 , index=False, sheet_name=str(month)) # write to "MASTER-Ercot.xlsx" spreadsheet
monthDF.to_excel(writer_Dropbox , startrow= 0 , index=False, sheet_name=str(month)) # write to "MASTER-Ercot.xlsx" spreadsheet
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
# End of PART 1
##########################################################################################
### PART 2 - Analyze Data
##########################################################################################
##########################################################################################
##########################################################################################
###Ercot Summary Page - Calculations (LZ_SOUTH)
### Refine the DataFrame
#Only Take LZ_SOUTH
dfMASTER_LZ_SOUTH = dfMASTER[['Oper Day', 'Interval Ending', 'LZ_SOUTH']].copy(deep=True)
dfMASTER_LZ_SOUTH['Oper Day'] = | pd.to_datetime(dfMASTER_LZ_SOUTH['Oper Day']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""Module for analyzing sentiment regarding financial markets.
Includes one class:
1. SentimentAnalyzer
"""
import os
import pickle
import re
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import pandas as pd
from socfin.parsers import TextParser
class SentimentAnalyzer(TextParser):
"""Class for analyzing sentiment of text.
Inherits from `TextParser`.
Args:
**kwargs: Arbitrary keyword arguments.
"""
def __init__(self, classifier='classifier.pickle', **kwargs):
super().__init__(**kwargs)
with open(os.path.join('socfin', 'data', classifier), 'rb') as file:
self._sia = pickle.load(file)
def sentiment(self, text):
"""Calucates sentiment scores for text.
Args:
text (str): Text to calcuate sentiment for.
Returns:
`Pandas`_ dataframe of sentiment scores for the `text`
parameter. Columns include neg, neu, pos, and compound.
Compound is the overall sentiment score.
.. _Pandas:
https://pandas.pydata.org/
"""
text = self.replace_emojis(text)
text = self.alpha(text)
sentiment = self._sia.polarity_scores(text)
return pd.DataFrame(sentiment, index=[0])
def ticker_sentiment(self, text, ticker):
"""Calucates sentiment scores for text regarding a stock ticker.
Args:
text (str): Text to calcuate sentiment for.
ticker (str): Ticker to calculate sentiment for.
Returns:
`Pandas`_ dataframe of sentiment scores for the `text`
parameter, specifically regarding the `ticker` parameter.
Columns include ticker, neg, neu, pos, and compound.
Compound is the overall sentiment score.
.. _Pandas:
https://pandas.pydata.org/
"""
words = self.words(text)
if ticker not in words:
sentiment = {'ticker': ticker, 'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}
return | pd.DataFrame(sentiment, index=[0]) | pandas.DataFrame |
import pandas as pd
if __name__ == '__main__':
output = []
for f in snakemake.input:
output.append(pd.read_csv(f, sep="\t", index_col=0))
| pd.concat(output) | pandas.concat |
import pytz
from pandas import to_datetime
from smrf.framework.model_framework import SMRF
from smrf.tests.smrf_test_case import SMRFTestCase
class TestModelFramework(SMRFTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.smrf = SMRF(cls.config_file)
def test_start_date(self):
self.assertEqual(
self.smrf.start_date,
to_datetime(self.smrf.config['time']['start_date'], utc=True)
)
def test_end_date(self):
self.assertEqual(
self.smrf.end_date,
to_datetime(self.smrf.config['time']['end_date'], utc=True)
)
def test_time_zone(self):
self.assertEqual(self.smrf.time_zone, pytz.UTC)
def test_date_time(self):
self.assertEqual(
self.smrf.date_time[0],
| to_datetime('1998-01-14 15:00:00', utc=True) | pandas.to_datetime |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from mars.dataframe.datasource.dataframe import from_pandas
from mars.dataframe.datasource.series import from_pandas as series_from_pandas
from mars.dataframe.merge import concat
from mars.dataframe.utils import sort_dataframe_inplace
def test_merge(setup):
df1 = pd.DataFrame(np.arange(20).reshape((4, 5)) + 1, columns=['a', 'b', 'c', 'd', 'e'])
df2 = pd.DataFrame(np.arange(20).reshape((5, 4)) + 1, columns=['a', 'b', 'x', 'y'])
df3 = df1.copy()
df3.index = pd.RangeIndex(2, 6, name='index')
df4 = df1.copy()
df4.index = pd.MultiIndex.from_tuples([(i, i + 1) for i in range(4)], names=['i1', 'i2'])
mdf1 = from_pandas(df1, chunk_size=2)
mdf2 = from_pandas(df2, chunk_size=2)
mdf3 = from_pandas(df3, chunk_size=3)
mdf4 = from_pandas(df4, chunk_size=2)
# Note [Index of Merge]
#
# When `left_index` and `right_index` of `merge` is both false, pandas will generate an RangeIndex to
# the final result dataframe.
#
# We chunked the `left` and `right` dataframe, thus every result chunk will have its own RangeIndex.
# When they are contenated we don't generate a new RangeIndex for the result, thus we cannot obtain the
# same index value with pandas. But we guarantee that the content of dataframe is correct.
# merge on index
expected0 = df1.merge(df2)
jdf0 = mdf1.merge(mdf2)
result0 = jdf0.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected0, 0), sort_dataframe_inplace(result0, 0))
# merge on left index and `right_on`
expected1 = df1.merge(df2, how='left', right_on='x', left_index=True)
jdf1 = mdf1.merge(mdf2, how='left', right_on='x', left_index=True)
result1 = jdf1.execute().fetch()
expected1.set_index('a_x', inplace=True)
result1.set_index('a_x', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected1, 0), sort_dataframe_inplace(result1, 0))
# merge on `left_on` and right index
expected2 = df1.merge(df2, how='right', left_on='a', right_index=True)
jdf2 = mdf1.merge(mdf2, how='right', left_on='a', right_index=True)
result2 = jdf2.execute().fetch()
expected2.set_index('a', inplace=True)
result2.set_index('a', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected2, 0), sort_dataframe_inplace(result2, 0))
# merge on `left_on` and `right_on`
expected3 = df1.merge(df2, how='left', left_on='a', right_on='x')
jdf3 = mdf1.merge(mdf2, how='left', left_on='a', right_on='x')
result3 = jdf3.execute().fetch()
expected3.set_index('a_x', inplace=True)
result3.set_index('a_x', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected3, 0), sort_dataframe_inplace(result3, 0))
# merge on `on`
expected4 = df1.merge(df2, how='right', on='a')
jdf4 = mdf1.merge(mdf2, how='right', on='a')
result4 = jdf4.execute().fetch()
expected4.set_index('a', inplace=True)
result4.set_index('a', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected4, 0), sort_dataframe_inplace(result4, 0))
# merge on multiple columns
expected5 = df1.merge(df2, how='inner', on=['a', 'b'])
jdf5 = mdf1.merge(mdf2, how='inner', on=['a', 'b'])
result5 = jdf5.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected5, 0), sort_dataframe_inplace(result5, 0))
# merge when some on is index
expected6 = df3.merge(df2, how='inner', left_on='index', right_on='a')
jdf6 = mdf3.merge(mdf2, how='inner', left_on='index', right_on='a')
result6 = jdf6.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected6, 0), sort_dataframe_inplace(result6, 0))
# merge when on is in MultiIndex
expected7 = df4.merge(df2, how='inner', left_on='i1', right_on='a')
jdf7 = mdf4.merge(mdf2, how='inner', left_on='i1', right_on='a')
result7 = jdf7.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected7, 0), sort_dataframe_inplace(result7, 0))
# merge when on is in MultiIndex, and on not in index
expected8 = df4.merge(df2, how='inner', on=['a', 'b'])
jdf8 = mdf4.merge(mdf2, how='inner', on=['a', 'b'])
result8 = jdf8.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected8, 0), sort_dataframe_inplace(result8, 0))
def test_join(setup):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]], index=['a1', 'a2', 'a3'])
df2 = pd.DataFrame([[1, 2, 3], [1, 5, 6], [7, 8, 9]], index=['a1', 'b2', 'b3']) + 1
df2 = pd.concat([df2, df2 + 1])
mdf1 = from_pandas(df1, chunk_size=2)
mdf2 = from_pandas(df2, chunk_size=2)
# default `how`
expected0 = df1.join(df2, lsuffix='l_', rsuffix='r_')
jdf0 = mdf1.join(mdf2, lsuffix='l_', rsuffix='r_')
result0 = jdf0.execute().fetch()
pd.testing.assert_frame_equal(expected0.sort_index(), result0.sort_index())
# how = 'left'
expected1 = df1.join(df2, how='left', lsuffix='l_', rsuffix='r_')
jdf1 = mdf1.join(mdf2, how='left', lsuffix='l_', rsuffix='r_')
result1 = jdf1.execute().fetch()
pd.testing.assert_frame_equal(expected1.sort_index(), result1.sort_index())
# how = 'right'
expected2 = df1.join(df2, how='right', lsuffix='l_', rsuffix='r_')
jdf2 = mdf1.join(mdf2, how='right', lsuffix='l_', rsuffix='r_')
result2 = jdf2.execute().fetch()
pd.testing.assert_frame_equal(expected2.sort_index(), result2.sort_index())
# how = 'inner'
expected3 = df1.join(df2, how='inner', lsuffix='l_', rsuffix='r_')
jdf3 = mdf1.join(mdf2, how='inner', lsuffix='l_', rsuffix='r_')
result3 = jdf3.execute().fetch()
pd.testing.assert_frame_equal(expected3.sort_index(), result3.sort_index())
# how = 'outer'
expected4 = df1.join(df2, how='outer', lsuffix='l_', rsuffix='r_')
jdf4 = mdf1.join(mdf2, how='outer', lsuffix='l_', rsuffix='r_')
result4 = jdf4.execute().fetch()
pd.testing.assert_frame_equal(expected4.sort_index(), result4.sort_index())
def test_join_on(setup):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]], columns=['a1', 'a2', 'a3'])
df2 = pd.DataFrame([[1, 2, 3], [1, 5, 6], [7, 8, 9]], columns=['a1', 'b2', 'b3']) + 1
df2 = pd.concat([df2, df2 + 1])
mdf1 = from_pandas(df1, chunk_size=2)
mdf2 = from_pandas(df2, chunk_size=2)
expected0 = df1.join(df2, on=None, lsuffix='_l', rsuffix='_r')
jdf0 = mdf1.join(mdf2, on=None, lsuffix='_l', rsuffix='_r')
result0 = jdf0.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected0, 0), sort_dataframe_inplace(result0, 0))
expected1 = df1.join(df2, how='left', on='a1', lsuffix='_l', rsuffix='_r')
jdf1 = mdf1.join(mdf2, how='left', on='a1', lsuffix='_l', rsuffix='_r')
result1 = jdf1.execute().fetch()
# Note [Columns of Left Join]
#
# I believe we have no chance to obtain the entirely same result with pandas here:
#
# Look at the following example:
#
# >>> df1
# a1 a2 a3
# 0 1 3 3
# >>> df2
# a1 b2 b3
# 1 2 6 7
# >>> df3
# a1 b2 b3
# 1 2 6 7
# 1 2 6 7
#
# >>> df1.merge(df2, how='left', left_on='a1', left_index=False, right_index=True)
# a1_x a2 a3 a1_y b2 b3
# 0 1 3 3 2 6 7
# >>> df1.merge(df3, how='left', left_on='a1', left_index=False, right_index=True)
# a1 a1_x a2 a3 a1_y b2 b3
# 0 1 1 3 3 2 6 7
# 0 1 1 3 3 2 6 7
#
# Note that the result of `df1.merge(df3)` has an extra column `a` compared to `df1.merge(df2)`.
# The value of column `a` is the same of `a1_x`, just because `1` occurs twice in index of `df3`.
# I haven't invistagated why pandas has such behaviour...
#
# We cannot yield the same result with pandas, because, the `df3` is chunked, then some of the
# result chunk has 6 columns, others may have 7 columns, when concatenated into one DataFrame
# some cells of column `a` will have value `NaN`, which is different from the result of pandas.
#
# But we can guarantee that other effective columns have absolutely same value with pandas.
columns_to_compare = jdf1.columns_value.to_pandas()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected1[columns_to_compare], 0, 1),
sort_dataframe_inplace(result1[columns_to_compare], 0, 1))
# Note [Index of Join on EmptyDataFrame]
#
# It is tricky that it is non-trivial to get the same `index` result with pandas.
#
# Look at the following example:
#
# >>> df1
# a1 a2 a3
# 1 4 2 6
# >>> df2
# a1 b2 b3
# 1 2 6 7
# 2 8 9 10
# >>> df3
# Empty DataFrame
# Columns: [a1, a2, a3]
# Index: []
# >>> df1.join(df2, how='right', on='a2', lsuffix='_l', rsuffix='_r')
# a1_l a2 a3 a1_r b2 b3
# 1.0 4.0 2 6.0 8 9 10
# NaN NaN 1 NaN 2 6 7
# >>> df3.join(df2, how='right', on='a2', lsuffix='_l', rsuffix='_r')
# a1_l a2 a3 a1_r b2 b3
# 1 NaN 1 NaN 2 6 7
# 2 NaN 2 NaN 8 9 10
#
# When the `left` dataframe is not empty, the mismatched rows in `right` will have index value `NaN`,
# and the matched rows have index value from `right`. When the `left` dataframe is empty, the mismatched
# rows have index value from `right`.
#
# Since we chunked the `left` dataframe, it is uneasy to obtain the same index value with pandas in the
# final result dataframe, but we guaranteed that the dataframe content is correctly.
expected2 = df1.join(df2, how='right', on='a2', lsuffix='_l', rsuffix='_r')
jdf2 = mdf1.join(mdf2, how='right', on='a2', lsuffix='_l', rsuffix='_r')
result2 = jdf2.execute().fetch()
expected2.set_index('a2', inplace=True)
result2.set_index('a2', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected2, 0), sort_dataframe_inplace(result2, 0))
expected3 = df1.join(df2, how='inner', on='a2', lsuffix='_l', rsuffix='_r')
jdf3 = mdf1.join(mdf2, how='inner', on='a2', lsuffix='_l', rsuffix='_r')
result3 = jdf3.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected3, 0), sort_dataframe_inplace(result3, 0))
expected4 = df1.join(df2, how='outer', on='a2', lsuffix='_l', rsuffix='_r')
jdf4 = mdf1.join(mdf2, how='outer', on='a2', lsuffix='_l', rsuffix='_r')
result4 = jdf4.execute().fetch()
expected4.set_index('a2', inplace=True)
result4.set_index('a2', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected4, 0), sort_dataframe_inplace(result4, 0))
def test_merge_one_chunk(setup):
df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 5]}, index=['a1', 'a2', 'a3', 'a4'])
df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
'value': [5, 6, 7, 8]}, index=['a1', 'a2', 'a3', 'a4'])
# all have one chunk
mdf1 = from_pandas(df1)
mdf2 = from_pandas(df2)
expected = df1.merge(df2, left_on='lkey', right_on='rkey')
jdf = mdf1.merge(mdf2, left_on='lkey', right_on='rkey')
result = jdf.execute().fetch()
pd.testing.assert_frame_equal(expected.sort_values(by=expected.columns[1]).reset_index(drop=True),
result.sort_values(by=result.columns[1]).reset_index(drop=True))
# left have one chunk
mdf1 = from_pandas(df1)
mdf2 = from_pandas(df2, chunk_size=2)
expected = df1.merge(df2, left_on='lkey', right_on='rkey')
jdf = mdf1.merge(mdf2, left_on='lkey', right_on='rkey')
result = jdf.execute().fetch()
pd.testing.assert_frame_equal(expected.sort_values(by=expected.columns[1]).reset_index(drop=True),
result.sort_values(by=result.columns[1]).reset_index(drop=True))
# right have one chunk
mdf1 = from_pandas(df1, chunk_size=3)
mdf2 = from_pandas(df2)
expected = df1.merge(df2, left_on='lkey', right_on='rkey')
jdf = mdf1.merge(mdf2, left_on='lkey', right_on='rkey')
result = jdf.execute().fetch()
pd.testing.assert_frame_equal(expected.sort_values(by=expected.columns[1]).reset_index(drop=True),
result.sort_values(by=result.columns[1]).reset_index(drop=True))
def test_merge_on_duplicate_columns(setup):
raw1 = pd.DataFrame([['foo', 1, 'bar'],
['bar', 2, 'foo'],
['baz', 3, 'foo']],
columns=['lkey', 'value', 'value'],
index=['a1', 'a2', 'a3'])
raw2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
'value': [5, 6, 7, 8]}, index=['a1', 'a2', 'a3', 'a4'])
df1 = from_pandas(raw1, chunk_size=2)
df2 = from_pandas(raw2, chunk_size=3)
r = df1.merge(df2, left_on='lkey', right_on='rkey')
result = r.execute().fetch()
expected = raw1.merge(raw2, left_on='lkey', right_on='rkey')
pd.testing.assert_frame_equal(expected, result)
def test_append_execution(setup):
df1 = pd.DataFrame(np.random.rand(10, 4), columns=list('ABCD'))
df2 = pd.DataFrame(np.random.rand(10, 4), columns=list('ABCD'))
mdf1 = from_pandas(df1, chunk_size=3)
mdf2 = from_pandas(df2, chunk_size=3)
adf = mdf1.append(mdf2)
expected = df1.append(df2)
result = adf.execute().fetch()
pd.testing.assert_frame_equal(expected, result)
adf = mdf1.append(mdf2, ignore_index=True)
expected = df1.append(df2, ignore_index=True)
result = adf.execute(extra_config={'check_index_value': False}).fetch()
pd.testing.assert_frame_equal(expected, result)
mdf1 = from_pandas(df1, chunk_size=3)
mdf2 = from_pandas(df2, chunk_size=2)
adf = mdf1.append(mdf2)
expected = df1.append(df2)
result = adf.execute().fetch()
pd.testing.assert_frame_equal(expected, result)
adf = mdf1.append(mdf2, ignore_index=True)
expected = df1.append(df2, ignore_index=True)
result = adf.execute(extra_config={'check_index_value': False}).fetch()
pd.testing.assert_frame_equal(expected, result)
df3 = pd.DataFrame(np.random.rand(8, 4), columns=list('ABCD'))
mdf3 = from_pandas(df3, chunk_size=3)
expected = df1.append([df2, df3])
adf = mdf1.append([mdf2, mdf3])
result = adf.execute().fetch()
pd.testing.assert_frame_equal(expected, result)
adf = mdf1.append(dict(A=1, B=2, C=3, D=4), ignore_index=True)
expected = df1.append(dict(A=1, B=2, C=3, D=4), ignore_index=True)
result = adf.execute(extra_config={'check_index_value': False}).fetch()
pd.testing.assert_frame_equal(expected, result)
# test for series
series1 = pd.Series(np.random.rand(10,))
series2 = pd.Series(np.random.rand(10,))
mseries1 = series_from_pandas(series1, chunk_size=3)
mseries2 = series_from_pandas(series2, chunk_size=3)
aseries = mseries1.append(mseries2)
expected = series1.append(series2)
result = aseries.execute().fetch()
| pd.testing.assert_series_equal(expected, result) | pandas.testing.assert_series_equal |
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
"""
Test related to MultiIndex
"""
import re
import cupy as cp
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.core.column import as_column
from cudf.core.index import as_index
from cudf.tests.utils import assert_eq, assert_neq
def test_multiindex_levels_codes_validation():
levels = [["a", "b"], ["c", "d"]]
# Codes not a sequence of sequences
with pytest.raises(TypeError):
pd.MultiIndex(levels, [0, 1])
with pytest.raises(TypeError):
cudf.MultiIndex(levels, [0, 1])
# Codes don't match levels
with pytest.raises(ValueError):
pd.MultiIndex(levels, [[0], [1], [1]])
with pytest.raises(ValueError):
cudf.MultiIndex(levels, [[0], [1], [1]])
# Largest code greater than number of levels
with pytest.raises(ValueError):
pd.MultiIndex(levels, [[0, 1], [0, 2]])
with pytest.raises(ValueError):
cudf.MultiIndex(levels, [[0, 1], [0, 2]])
# Unequal code lengths
with pytest.raises(ValueError):
pd.MultiIndex(levels, [[0, 1], [0]])
with pytest.raises(ValueError):
cudf.MultiIndex(levels, [[0, 1], [0]])
# Didn't pass levels and codes
with pytest.raises(TypeError):
pd.MultiIndex()
with pytest.raises(TypeError):
cudf.MultiIndex()
# Didn't pass non zero levels and codes
with pytest.raises(ValueError):
pd.MultiIndex([], [])
with pytest.raises(ValueError):
cudf.MultiIndex([], [])
def test_multiindex_construction():
levels = [["a", "b"], ["c", "d"]]
codes = [[0, 1], [1, 0]]
pmi = pd.MultiIndex(levels, codes)
mi = cudf.MultiIndex(levels, codes)
assert_eq(pmi, mi)
pmi = pd.MultiIndex(levels, codes)
mi = cudf.MultiIndex(levels=levels, codes=codes)
assert_eq(pmi, mi)
def test_multiindex_types():
codes = [[0, 1], [1, 0]]
levels = [[0, 1], [2, 3]]
pmi = pd.MultiIndex(levels, codes)
mi = cudf.MultiIndex(levels, codes)
assert_eq(pmi, mi)
levels = [[1.2, 2.1], [1.3, 3.1]]
pmi = pd.MultiIndex(levels, codes)
mi = cudf.MultiIndex(levels, codes)
assert_eq(pmi, mi)
levels = [["a", "b"], ["c", "d"]]
pmi = pd.MultiIndex(levels, codes)
mi = cudf.MultiIndex(levels, codes)
assert_eq(pmi, mi)
def test_multiindex_df_assignment():
pdf = pd.DataFrame({"x": [1, 2, 3]})
gdf = cudf.from_pandas(pdf)
pdf.index = pd.MultiIndex([["a", "b"], ["c", "d"]], [[0, 1, 0], [1, 0, 1]])
gdf.index = cudf.MultiIndex(
levels=[["a", "b"], ["c", "d"]], codes=[[0, 1, 0], [1, 0, 1]]
)
assert_eq(pdf, gdf)
def test_multiindex_series_assignment():
ps = pd.Series([1, 2, 3])
gs = cudf.from_pandas(ps)
ps.index = pd.MultiIndex([["a", "b"], ["c", "d"]], [[0, 1, 0], [1, 0, 1]])
gs.index = cudf.MultiIndex(
levels=[["a", "b"], ["c", "d"]], codes=[[0, 1, 0], [1, 0, 1]]
)
assert_eq(ps, gs)
def test_string_index():
from cudf.core.index import StringIndex
pdf = pd.DataFrame(np.random.rand(5, 5))
gdf = cudf.from_pandas(pdf)
stringIndex = ["a", "b", "c", "d", "e"]
pdf.index = stringIndex
gdf.index = stringIndex
assert_eq(pdf, gdf)
stringIndex = np.array(["a", "b", "c", "d", "e"])
pdf.index = stringIndex
gdf.index = stringIndex
assert_eq(pdf, gdf)
stringIndex = StringIndex(["a", "b", "c", "d", "e"], name="name")
pdf.index = stringIndex.to_pandas()
gdf.index = stringIndex
assert_eq(pdf, gdf)
stringIndex = as_index(as_column(["a", "b", "c", "d", "e"]), name="name")
pdf.index = stringIndex.to_pandas()
gdf.index = stringIndex
assert_eq(pdf, gdf)
def test_multiindex_row_shape():
pdf = pd.DataFrame(np.random.rand(0, 5))
gdf = cudf.from_pandas(pdf)
pdfIndex = pd.MultiIndex([["a", "b", "c"]], [[0]])
pdfIndex.names = ["alpha"]
gdfIndex = cudf.from_pandas(pdfIndex)
assert_eq(pdfIndex, gdfIndex)
with pytest.raises(ValueError):
pdf.index = pdfIndex
with pytest.raises(ValueError):
gdf.index = gdfIndex
@pytest.fixture
def pdf():
return pd.DataFrame(np.random.rand(7, 5))
@pytest.fixture
def gdf(pdf):
return cudf.from_pandas(pdf)
@pytest.fixture
def pdfIndex():
pdfIndex = pd.MultiIndex(
[
["a", "b", "c"],
["house", "store", "forest"],
["clouds", "clear", "storm"],
["fire", "smoke", "clear"],
[
np.datetime64("2001-01-01", "ns"),
np.datetime64("2002-01-01", "ns"),
np.datetime64("2003-01-01", "ns"),
],
],
[
[0, 0, 0, 0, 1, 1, 2],
[1, 1, 1, 1, 0, 0, 2],
[0, 0, 2, 2, 2, 0, 1],
[0, 0, 0, 1, 2, 0, 1],
[1, 0, 1, 2, 0, 0, 1],
],
)
pdfIndex.names = ["alpha", "location", "weather", "sign", "timestamp"]
return pdfIndex
@pytest.fixture
def pdfIndexNulls():
pdfIndex = pd.MultiIndex(
[
["a", "b", "c"],
["house", "store", "forest"],
["clouds", "clear", "storm"],
],
[
[0, 0, 0, -1, 1, 1, 2],
[1, -1, 1, 1, 0, 0, -1],
[-1, 0, 2, 2, 2, 0, 1],
],
)
pdfIndex.names = ["alpha", "location", "weather"]
return pdfIndex
def test_from_pandas(pdf, pdfIndex):
pdf.index = pdfIndex
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
def test_multiindex_transpose(pdf, pdfIndex):
pdf.index = pdfIndex
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.transpose(), gdf.transpose())
def test_from_pandas_series():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
).set_index(["a", "b"])
result = cudf.from_pandas(pdf)
assert_eq(pdf, result)
test_pdf = pdf["c"]
result = cudf.from_pandas(test_pdf)
assert_eq(test_pdf, result)
def test_series_multiindex(pdfIndex):
ps = pd.Series(np.random.rand(7))
gs = cudf.from_pandas(ps)
ps.index = pdfIndex
gs.index = cudf.from_pandas(pdfIndex)
assert_eq(ps, gs)
def test_multiindex_take(pdf, gdf, pdfIndex):
gdfIndex = cudf.from_pandas(pdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
assert_eq(pdf.index.take([0]), gdf.index.take([0]))
assert_eq(pdf.index.take(np.array([0])), gdf.index.take(np.array([0])))
from cudf import Series
assert_eq(pdf.index.take(pd.Series([0])), gdf.index.take(Series([0])))
assert_eq(pdf.index.take([0, 1]), gdf.index.take([0, 1]))
assert_eq(
pdf.index.take(np.array([0, 1])), gdf.index.take(np.array([0, 1]))
)
assert_eq(
pdf.index.take(pd.Series([0, 1])), gdf.index.take(Series([0, 1]))
)
def test_multiindex_getitem(pdf, gdf, pdfIndex):
gdfIndex = cudf.from_pandas(pdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
assert_eq(pdf.index[0], gdf.index[0])
@pytest.mark.parametrize(
"key_tuple",
[
# return 2 rows, 0 remaining keys = dataframe with entire index
("a", "store", "clouds", "fire"),
(("a", "store", "clouds", "fire"), slice(None)),
# return 2 rows, 1 remaining key = dataframe with n-k index columns
("a", "store", "storm"),
(("a", "store", "storm"), slice(None)),
# return 2 rows, 2 remaining keys = dataframe with n-k index columns
("a", "store"),
(("a", "store"), slice(None)),
# return 2 rows, n-1 remaining keys = dataframe with n-k index columns
("a",),
(("a",), slice(None)),
# return 1 row, 0 remaining keys = dataframe with entire index
("a", "store", "storm", "smoke"),
(("a", "store", "storm", "smoke"), slice(None)),
# return 1 row and 1 remaining key = series
("c", "forest", "clear"),
(("c", "forest", "clear"), slice(None)),
],
)
def test_multiindex_loc(pdf, gdf, pdfIndex, key_tuple):
gdfIndex = cudf.from_pandas(pdfIndex)
assert_eq(pdfIndex, gdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
assert_eq(pdf.loc[key_tuple], gdf.loc[key_tuple])
def test_multiindex_loc_slice(pdf, gdf, pdfIndex):
gdf = cudf.from_pandas(pdf)
gdfIndex = cudf.from_pandas(pdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
assert_eq(
pdf.loc[("a", "store"):("b", "house")],
gdf.loc[("a", "store"):("b", "house")],
)
def test_multiindex_loc_then_column(pdf, gdf, pdfIndex):
gdfIndex = cudf.from_pandas(pdfIndex)
assert_eq(pdfIndex, gdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
assert_eq(
pdf.loc[("a", "store", "clouds", "fire"), :][0],
gdf.loc[("a", "store", "clouds", "fire"), :][0],
)
def test_multiindex_loc_rows_0(pdf, gdf, pdfIndex):
gdfIndex = cudf.from_pandas(pdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
with pytest.raises(KeyError):
print(pdf.loc[("d",), :].to_pandas())
with pytest.raises(KeyError):
print(gdf.loc[("d",), :].to_pandas())
assert_eq(pdf, gdf)
def test_multiindex_loc_rows_1_2_key(pdf, gdf, pdfIndex):
gdfIndex = cudf.from_pandas(pdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
print(pdf.loc[("c", "forest"), :])
print(gdf.loc[("c", "forest"), :].to_pandas())
assert_eq(pdf.loc[("c", "forest"), :], gdf.loc[("c", "forest"), :])
def test_multiindex_loc_rows_1_1_key(pdf, gdf, pdfIndex):
gdfIndex = cudf.from_pandas(pdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
print(pdf.loc[("c",), :])
print(gdf.loc[("c",), :].to_pandas())
assert_eq(pdf.loc[("c",), :], gdf.loc[("c",), :])
def test_multiindex_column_shape():
pdf = pd.DataFrame(np.random.rand(5, 0))
gdf = cudf.from_pandas(pdf)
pdfIndex = pd.MultiIndex([["a", "b", "c"]], [[0]])
pdfIndex.names = ["alpha"]
gdfIndex = cudf.from_pandas(pdfIndex)
assert_eq(pdfIndex, gdfIndex)
with pytest.raises(ValueError):
pdf.columns = pdfIndex
with pytest.raises(ValueError):
gdf.columns = gdfIndex
@pytest.mark.parametrize(
"query",
[
("a", "store", "clouds", "fire"),
("a", "store", "storm", "smoke"),
("a", "store"),
("b", "house"),
("a", "store", "storm"),
("a",),
("c", "forest", "clear"),
],
)
def test_multiindex_columns(pdf, gdf, pdfIndex, query):
pdf = pdf.T
gdf = cudf.from_pandas(pdf)
gdfIndex = cudf.from_pandas(pdfIndex)
assert_eq(pdfIndex, gdfIndex)
pdf.columns = pdfIndex
gdf.columns = gdfIndex
assert_eq(pdf[query], gdf[query])
def test_multiindex_from_tuples():
arrays = [["a", "a", "b", "b"], ["house", "store", "house", "store"]]
tuples = list(zip(*arrays))
pmi = | pd.MultiIndex.from_tuples(tuples) | pandas.MultiIndex.from_tuples |
import numpy as np
import pandas as pd
import pytest
from hypothesis import given, settings
from pandas.testing import assert_frame_equal
from janitor.testing_utils.strategies import (
conditional_df,
conditional_right,
conditional_series,
)
@pytest.mark.xfail(reason="empty object will pass thru")
@given(s=conditional_series())
def test_df_empty(s):
"""Raise ValueError if `df` is empty."""
df = pd.DataFrame([], dtype="int", columns=["A"])
with pytest.raises(ValueError):
df.conditional_join(s, ("A", "non", "=="))
@pytest.mark.xfail(reason="empty object will pass thru")
@given(df=conditional_df())
def test_right_empty(df):
"""Raise ValueError if `right` is empty."""
s = pd.Series([], dtype="int", name="A")
with pytest.raises(ValueError):
df.conditional_join(s, ("A", "non", "=="))
@given(df=conditional_df())
def test_right_df(df):
"""Raise TypeError if `right` is not a Series/DataFrame."""
with pytest.raises(TypeError):
df.conditional_join({"non": [2, 3, 4]}, ("A", "non", "=="))
@given(df=conditional_df(), s=conditional_series())
def test_right_series(df, s):
"""Raise ValueError if `right` is not a named Series."""
with pytest.raises(ValueError):
df.conditional_join(s, ("A", "non", "=="))
@given(df=conditional_df())
def test_df_MultiIndex(df):
"""Raise ValueError if `df` columns is a MultiIndex."""
with pytest.raises(ValueError):
df.columns = [list("ABCDE"), list("FGHIJ")]
df.conditional_join(
pd.Series([2, 3, 4], name="A"), (("A", "F"), "non", "==")
)
@given(df=conditional_df())
def test_right_MultiIndex(df):
"""Raise ValueError if `right` columns is a MultiIndex."""
with pytest.raises(ValueError):
right = df.copy()
right.columns = [list("ABCDE"), list("FGHIJ")]
df.conditional_join(right, (("A", "F"), "non", ">="))
@given(df=conditional_df(), s=conditional_series())
def test_check_conditions_exist(df, s):
"""Raise ValueError if no condition is provided."""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s)
@given(df=conditional_df(), s=conditional_series())
def test_check_condition_type(df, s):
"""Raise TypeError if any condition in conditions is not a tuple."""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("A", "B", ""), ["A", "B"])
@given(df=conditional_df(), s=conditional_series())
def test_check_condition_length(df, s):
"""Raise ValueError if any condition is not length 3."""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("A", "B", "C", "<"))
df.conditional_join(s, ("A", "B", ""), ("A", "B"))
@given(df=conditional_df(), s=conditional_series())
def test_check_left_on_type(df, s):
"""Raise TypeError if left_on is not a string."""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, (1, "B", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_check_right_on_type(df, s):
"""Raise TypeError if right_on is not a string."""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("B", 1, "<"))
@given(df=conditional_df(), s=conditional_series())
def test_check_op_type(df, s):
"""Raise TypeError if the operator is not a string."""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("B", "B", 1))
@given(df=conditional_df(), s=conditional_series())
def test_check_column_exists_df(df, s):
"""
Raise ValueError if `left_on`
can not be found in `df`.
"""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("C", "B", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_check_column_exists_right(df, s):
"""
Raise ValueError if `right_on`
can not be found in `right`.
"""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("B", "A", ">="))
@given(df=conditional_df(), s=conditional_series())
def test_check_op_correct(df, s):
"""
Raise ValueError if `op` is not any of
`!=`, `<`, `>`, `>=`, `<=`.
"""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("B", "B", "=!"))
@given(df=conditional_df(), s=conditional_series())
def test_check_how_type(df, s):
"""
Raise TypeError if `how` is not a string.
"""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("B", "B", "<"), how=1)
@given(df=conditional_df(), s=conditional_series())
def test_check_how_value(df, s):
"""
Raise ValueError if `how` is not one of
`inner`, `left`, or `right`.
"""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("B", "B", "<"), how="INNER")
@given(df=conditional_df(), right=conditional_right())
def test_dtype_strings_non_equi(df, right):
"""
Raise ValueError if the dtypes are both strings
on a non-equi operator.
"""
with pytest.raises(ValueError):
df.conditional_join(right, ("C", "Strings", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_dtype_not_permitted(df, s):
"""
Raise ValueError if dtype of column in `df`
is not an acceptable type.
"""
df["F"] = pd.Timedelta("1 days")
with pytest.raises(ValueError):
s.name = "A"
df.conditional_join(s, ("F", "A", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_dtype_str(df, s):
"""
Raise ValueError if dtype of column in `df`
does not match the dtype of column from `right`.
"""
with pytest.raises(ValueError):
s.name = "A"
df.conditional_join(s, ("C", "A", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_dtype_category_non_equi(df, s):
"""
Raise ValueError if dtype is category,
and op is non-equi.
"""
with pytest.raises(ValueError):
s.name = "A"
s = s.astype("category")
df["C"] = df["C"].astype("category")
df.conditional_join(s, ("C", "A", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_check_sort_by_appearance_type(df, s):
"""
Raise TypeError if `sort_by_appearance` is not a boolean.
"""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("B", "B", "<"), sort_by_appearance="True")
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_floats(df, right):
"""Test output for a single condition. "<"."""
left_on, right_on = ["B", "Numeric"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_ints(df, right):
"""Test output for a single condition. "<"."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1, C="2"), on="t")
.query(f"{left_on} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_ints_extension_array(df, right):
"""Test output for a single condition. "<"."""
df = df.assign(A=df["A"].astype("Int64"))
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_equal(df, right):
"""Test output for a single condition. "<=". DateTimes"""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_date(df, right):
"""Test output for a single condition. "<". Dates"""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_greater_than_datetime(df, right):
"""Test output for a single condition. ">". Datetimes"""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} > {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, ">"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_greater_than_ints(df, right):
"""Test output for a single condition. ">="."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} >= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, ">="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_greater_than_floats_floats(df, right):
"""Test output for a single condition. ">"."""
left_on, right_on = ["B", "Numeric"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} > {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, ">"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_greater_than_ints_extension_array(df, right):
"""Test output for a single condition. ">="."""
left_on, right_on = ["A", "Integers"]
df = df.assign(A=df["A"].astype("Int64"))
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} > {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, ">"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_not_equal_numeric(df, right):
"""Test output for a single condition. "!="."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.dropna(subset=["A", "Integers"])
.query(f"{left_on} != {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "!="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_not_equal_ints_only(df, right):
"""Test output for a single condition. "!="."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.dropna(subset=["A", "Integers"])
.query(f"{left_on} != {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "!="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_not_equal_floats_only(df, right):
"""Test output for a single condition. "!="."""
left_on, right_on = ["B", "Numeric"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.dropna(subset=["B", "Numeric"])
.query(f"{left_on} != {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "!="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_not_equal_datetime(df, right):
"""Test output for a single condition. "!="."""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.dropna(subset=["E", "Dates"])
.query(f"{left_on} != {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "!="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_equality_string(df, right):
"""Test output for a single condition. "=="."""
left_on, right_on = ["C", "Strings"]
expected = df.dropna(subset=[left_on]).merge(
right.dropna(subset=[right_on]), left_on=left_on, right_on=right_on
)
expected = expected.reset_index(drop=True)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "=="), how="inner", sort_by_appearance=False
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@pytest.mark.xfail(
reason="""sometimes, categories are coerced to objects;
might be a pandas version issue.
"""
)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_equality_category(df, right):
"""Test output for a single condition. "=="."""
left_on, right_on = ["C", "Strings"]
df = df.assign(C=df["C"].astype("category"))
right = right.assign(Strings=right["Strings"].astype("category"))
expected = df.dropna(subset=[left_on]).merge(
right.dropna(subset=[right_on]), left_on=left_on, right_on=right_on
)
expected = expected.reset_index(drop=True)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "=="), how="inner", sort_by_appearance=False
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_equality_numeric(df, right):
"""Test output for a single condition. "=="."""
left_on, right_on = ["A", "Integers"]
df = df.assign(A=df["A"].astype("Int64"))
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
df.loc[0, "A"] = pd.NA
right.loc[0, "Integers"] = pd.NA
expected = df.dropna(subset=[left_on]).merge(
right.dropna(subset=[right_on]), left_on=left_on, right_on=right_on
)
expected = expected.reset_index(drop=True)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "=="), how="inner", sort_by_appearance=False
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_equality_datetime(df, right):
"""Test output for a single condition. "=="."""
left_on, right_on = ["E", "Dates"]
expected = df.dropna(subset=[left_on]).merge(
right.dropna(subset=[right_on]), left_on=left_on, right_on=right_on
)
expected = expected.reset_index(drop=True)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "=="), how="inner", sort_by_appearance=False
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_how_left(df, right):
"""Test output when `how==left`. "<="."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1, index=np.arange(len(df)))
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {right_on}")
)
expected = expected.set_index("index")
expected.index.name = None
expected = df.join(
expected.filter(right.columns), how="left", sort=False
).reset_index(drop=True)
actual = df.conditional_join(
right, (left_on, right_on, "<="), how="left", sort_by_appearance=True
)
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_how_right(df, right):
"""Test output when `how==right`. ">"."""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1, index=np.arange(len(right))), on="t")
.query(f"{left_on} > {right_on}")
)
expected = expected.set_index("index")
expected.index.name = None
expected = (
expected.filter(df.columns)
.join(right, how="right", sort=False)
.reset_index(drop=True)
)
actual = df.conditional_join(
right, (left_on, right_on, ">"), how="right", sort_by_appearance=True
)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_gt_and_lt_dates(df, right):
"""Test output for interval conditions."""
middle, left_on, right_on = ("E", "Dates", "Dates_Right")
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {middle} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, middle, right_on])
actual = df.conditional_join(
right,
(middle, left_on, ">"),
(middle, right_on, "<"),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([left_on, middle, right_on])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_ge_and_le_dates(df, right):
"""Test output for interval conditions."""
middle, left_on, right_on = ("E", "Dates", "Dates_Right")
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {middle} <= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, middle, right_on])
actual = df.conditional_join(
right,
(middle, left_on, ">="),
(middle, right_on, "<="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([left_on, middle, right_on])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_le_and_ge_dates(df, right):
"""Test output for interval conditions."""
middle, left_on, right_on = ("E", "Dates", "Dates_Right")
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {middle} <= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, middle, right_on])
actual = df.conditional_join(
right,
(middle, right_on, "<="),
(middle, left_on, ">="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([left_on, middle, right_on])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_ge_and_le_numbers(df, right):
"""Test output for interval conditions."""
middle, left_on, right_on = ("B", "Numeric", "Floats")
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {middle} <= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, middle, right_on])
actual = df.conditional_join(
right,
(middle, left_on, ">="),
(middle, right_on, "<="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([left_on, middle, right_on])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_le_and_ge_numbers(df, right):
"""Test output for interval conditions."""
middle, left_on, right_on = ("B", "Numeric", "Floats")
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {middle} <= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, middle, right_on])
actual = df.conditional_join(
right,
(middle, right_on, "<="),
(middle, left_on, ">="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([left_on, middle, right_on])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_gt_and_lt_numbers(df, right):
"""Test output for interval conditions."""
middle, left_on, right_on = ("B", "Numeric", "Floats")
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {middle} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, middle, right_on])
actual = df.conditional_join(
right,
(middle, left_on, ">"),
(middle, right_on, "<"),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([left_on, middle, right_on])
| assert_frame_equal(expected, actual) | pandas.testing.assert_frame_equal |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import TYPE_CHECKING, Callable, Dict, Iterable, Iterator
import numpy as np
import pandas as pd
from autogluon import TabularPrediction as task
from pandas.tseries.holiday import USFederalHolidayCalendar as calendar
from gluonts.dataset.common import DataEntry, Dataset
from gluonts.dataset.field_names import FieldName
from gluonts.dataset.util import to_pandas
from gluonts.model.estimator import Estimator
from gluonts.model.forecast import SampleForecast
from gluonts.model.predictor import Localizer, Predictor
if TYPE_CHECKING: # avoid circular import
from gluonts.model.estimator import Estimator
OutputTransform = Callable[[DataEntry, np.ndarray], np.ndarray]
def get_prediction_dataframe(series):
hour_of_day = series.index.hour
month_of_year = series.index.month
day_of_week = series.index.dayofweek
year_idx = series.index.year
target = series.values
cal = calendar()
holidays = cal.holidays(start=series.index.min(), end=series.index.max())
df = pd.DataFrame(
zip(
year_idx,
month_of_year,
day_of_week,
hour_of_day,
series.index.isin(holidays),
target,
),
columns=[
"year_idx",
"month_of_year",
"day_of_week",
"hour_of_day",
"holiday",
"target",
],
)
convert_type = {x: "category" for x in df.columns.values[:4]}
df = df.astype(convert_type)
return df
class TabularPredictor(Predictor):
def __init__(
self,
ag_model,
freq: str,
prediction_length: int,
) -> None:
self.ag_model = ag_model # task?
self.freq = freq
self.prediction_length = prediction_length
def predict(self, dataset: Iterable[Dict]) -> Iterator[SampleForecast]:
for entry in dataset:
ts = to_pandas(entry)
start = ts.index[-1] + | pd.tseries.frequencies.to_offset(self.freq) | pandas.tseries.frequencies.to_offset |
import numpy as np
np.warnings.filterwarnings('ignore') #to not display numpy warnings... be careful
import pandas as pd
from mpi4py import MPI
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from subprocess import call
from orca import *
from orca.data import *
from datetime import datetime
import warnings
from ptreeopt.tree import PTree
warnings.filterwarnings('ignore')
# this whole script will run on all processors requested by the job script
with open('orca/data/scenario_names_all.txt') as f:
scenarios = f.read().splitlines()
with open('orca/data/demand_scenario_names_all.txt') as f:
demand_scenarios = f.read().splitlines()
calc_indices = False
climate_forecasts = False
simulation = True
tree_input_files = False
indicator_data_file = False
window_type = 'rolling'
window_length = 40
index_exceedence_sac = 8
shift = 0
SHA_shift = shift
ORO_shift = shift
FOL_shift = shift
SHA_baseline = pd.read_csv('orca/data/baseline_storage/SHA_storage.csv',parse_dates = True, index_col = 0)
SHA_baseline = SHA_baseline[(SHA_baseline.index >= '2006-09-30') & (SHA_baseline.index <= '2099-10-01')]
ORO_baseline = pd.read_csv('orca/data/baseline_storage/ORO_storage.csv',parse_dates = True, index_col = 0)
ORO_baseline = ORO_baseline[(ORO_baseline.index >= '2006-09-30') & (ORO_baseline.index <= '2099-10-01')]
FOL_baseline = pd.read_csv('orca/data/baseline_storage/FOL_storage.csv',parse_dates = True, index_col = 0)
FOL_baseline = FOL_baseline[(FOL_baseline.index >= '2006-09-30') & (FOL_baseline.index <= '2099-10-01')]
features = json.load(open('orca/data/json_files/indicators_whole_bounds.json'))
feature_names = []
feature_bounds = []
indicator_codes = []
min_depth = 4
for k,v in features.items():
indicator_codes.append(k)
feature_names.append(v['name'])
feature_bounds.append(v['bounds'])
action_dict = json.load(open('orca/data/json_files/action_list.json'))
actions = action_dict['actions']
snapshots = pickle.load(open('snapshots/training_scenarios_seed_2.pkl', 'rb'))
P = snapshots['best_P'][-1][0]
demand_indicators = {}
for D in demand_scenarios:
dfdemand = pd.read_csv('orca/data/demand_files/%s.csv'%D, index_col = 0, parse_dates = True)
dfdemand['demand_multiplier'] = dfdemand['combined_demand']
dfd_ind = pd.DataFrame(index = dfdemand.index)
for i in features: #indicators
ind = features[i]
if ind['type'] == 'demand':
if ind['delta'] == 'no':
if ind['stat'] == 'mu':
dfd_ind[i] = dfdemand.demand_multiplier.resample('AS-OCT').first().rolling(ind['window']).mean()*100
elif ind['stat'] == 'sig':
dfd_ind[i] = dfdemand.demand_multiplier.resample('AS-OCT').first().rolling(ind['window']).std()*100
elif ind['stat'] == 'max':
dfd_ind[i] = dfdemand.demand_multiplier.resample('AS-OCT').first().rolling(ind['window']).max()*100
else:
if ind['stat'] == 'mu':
dfd_ind[i] = dfdemand.demand_multiplier.resample('AS-OCT').first().rolling(ind['window']).mean().pct_change(periods=ind['delta'])*100
elif ind['stat'] == 'sig':
dfd_ind[i] = dfdemand.demand_multiplier.resample('AS-OCT').first().rolling(ind['window']).std().pct_change(periods=ind['delta'])*100
elif ind['stat'] == 'max':
dfd_ind[i] = dfdemand.demand_multiplier.resample('AS-OCT').first().rolling(ind['window']).max().pct_change(periods=ind['delta'])*100
elif ind['type'] == "discount":
discount_indicator = i
demand_indicators[D] = dfd_ind
indicator_columns = []
comm = MPI.COMM_WORLD # communication object
rank = comm.rank # what number processor am I?
sc = scenarios[rank]
call(['mkdir', 'orca/data/scenario_runs/%s'%sc])
if calc_indices:
gains_loop_df = pd.read_csv('orca/data/historical_runs_data/gains_loops.csv', index_col = 0, parse_dates = True)
OMR_loop_df = pd.read_csv('orca/data/historical_runs_data/OMR_loops.csv', index_col = 0, parse_dates = True)
input_df = pd.read_csv('orca/data/input_climate_files/%s_input_data.csv'%sc, index_col = 0, parse_dates = True)
proj_ind_df, ind_df = process_projection(input_df,gains_loop_df,OMR_loop_df,'orca/data/json_files/gains_regression.json','orca/data/json_files/inf_regression.json',window = window_type)
proj_ind_df.to_csv('orca/data/scenario_runs/%s/orca-data-processed-%s.csv'%(sc,sc))
ind_df.to_csv('orca/data/scenario_runs/%s/hydrologic-indicators-%s.csv'%(sc,sc))
# proj_ind_df = pd.read_csv('orca/data/scenario_runs/%s/orca-data-processed-%s.csv'%(sc,sc),index_col = 0, parse_dates = True)
WYI_stats_file = | pd.read_csv('orca/data/forecast_regressions/WYI_forcasting_regression_stats.csv', index_col = 0, parse_dates = True) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 26 10:52:43 2020
@author: Celina
"""
import pandas as pd
import outdoor.excel_wrapper.Wrapping_Functions as WF
def wrapp_GeneralData(obj, df1):
"""
Description
-----------
Get general Process Data: Lifetime and Group
Context
----------
Function is called in Wrapp_ProcessUnits
Parameters
----------
df1 : Dataframe which holds information of LT and Group
"""
Name = df1.iloc[0,0]
LifeTime = df1.iloc[4,0]
ProcessGroup = df1.iloc[3,0]
if not pd.isnull(df1.iloc[12,0]):
emissions = df1.iloc[12,0]
else:
emissions = 0
if not pd.isnull(df1.iloc[13,0]):
maintenance_factor = df1.iloc[13,0]
else:
maintenance_factor = 0.044875
cost_percentage = None
time_span = None
time_mode = 'No Mode'
if not pd.isnull(df1.iloc[14,0]):
cost_percentage = df1.iloc[14,0]
time_span = df1.iloc[15,0]
if df1.iloc[16,0] == 'Yearly':
time_mode = 'Yearly'
else:
time_mode = 'Hourly'
if not pd.isnull(df1.iloc[17,0]):
full_load_hours = df1.iloc[17,0]
else:
full_load_hours = None
obj.set_generalData(ProcessGroup,
LifeTime,
emissions,
full_load_hours,
maintenance_factor,
cost_percentage,
time_span,
time_mode)
def wrapp_ReacionData(obj, df1, df2 = None):
"""
Description
-----------
Get Reaction Data (Stoichiometric or Yield Function) from Excel sheet
Context
----------
Function is called in Wrapp_ProcessUnits
Parameters
----------
df1 : Dataframe which either holds Stoichiometric or Yield Coefficents
df2: Dataframe which is either empty or holds conversion factors
"""
if obj.Type == "Yield-Reactor":
dict1 = WF.read_type1(df1,0,1)
obj.set_xiFactors(dict1)
list1 = WF.read_list_new(df1, 2, 0)
obj.set_inertComponents(list1)
else:
dict1 = WF.read_type2(df1,0,1,2)
obj.set_gammaFactors(dict1)
dict2 = WF.read_type2(df2,0,1,2)
obj.set_thetaFactors(dict2)
def wrapp_EnergyData(obj, df, df2, df3):
"""
Description
-----------
Define specific columns from the spreadsheet to set the energydatas.
Sets Demands, ReferenceFlow Types and Components for El, Heat1 and Heat2.
But only if there are values in the Excel, if not, than these values are left
as None
Also: Calls wrapp_Temperatures, which sets Temperature and Tau for Heat
Context
----------
Function is called in Wrapp.ProcessUnits
Parameters
----------
df : Dataframe which holds inforation of energy demand and reference flow type
df2 : Dataframe which holds information of reference flow components
df3 : Dataframe which holds information on heat temperatures
"""
# Set Reference Flow Type:
if not pd.isnull(df.iloc[0,1]):
ProcessElectricityDemand = df.iloc[0,1]
ProcessElectricityReferenceFlow = df.iloc[1,1]
ProcessElectricityReferenceComponentList = WF.read_list_new(df2, 1, 2)
else:
ProcessElectricityDemand = 0
ProcessElectricityReferenceFlow = None
ProcessElectricityReferenceComponentList = []
if not pd.isnull(df.iloc[0,2]):
ProcessHeatDemand = df.iloc[0,2]
ProcessHeatReferenceFlow = df.iloc[1,2]
ProcessHeatReferenceComponentList = WF.read_list_new(df2, 2, 2)
else:
ProcessHeatDemand = None
ProcessHeatReferenceFlow = None
ProcessHeatReferenceComponentList = []
if not pd.isnull(df.iloc[0,3]):
ProcessHeat2Demand = df.iloc[0,3]
ProcessHeat2ReferenceFlow = df.iloc[1,3]
ProcessHeat2ReferenceComponentList = WF.read_list_new(df2, 3, 2)
else:
ProcessHeat2Demand = None
ProcessHeat2ReferenceFlow = None
ProcessHeat2ReferenceComponentList = []
wrapp_Temperatures(obj, df3, df)
obj.set_energyData(None,
None,
ProcessElectricityDemand,
ProcessHeatDemand,
ProcessHeat2Demand,
ProcessElectricityReferenceFlow,
ProcessElectricityReferenceComponentList,
ProcessHeatReferenceFlow,
ProcessHeatReferenceComponentList,
ProcessHeat2ReferenceFlow,
ProcessHeat2ReferenceComponentList
)
def wrapp_Temperatures(obj, df1, df2):
"""
Description
-----------
Set Process Temperatures and specific energy demand (tau) from Excel file
If no Temperatures and tau are defined everything is set to None
Sets Tau1 and Tau2 only if the values are really available, otherwise
Temperatures and Tau values are set to None
Parameters
----------
obj : Process unit object
df1 : Dataframe holding the information about the Temperatures needed
df2 : Dataframe holding the inforamation about specific energy damand
"""
obj.set_Temperatures()
if not pd.isnull(df2.iloc[0,2]):
TIN1 = df1.iloc[7,0]
TOUT1 = df1.iloc[8,0]
tau1 = df2.iloc[0,2]
obj.set_Temperatures(TIN1, TOUT1, tau1)
if not pd.isnull(df2.iloc[0,3]):
tau2 = df2.iloc[0,3]
TIN2 = df1.iloc[9,0]
TOUT2 = df1.iloc[10,0]
obj.set_Temperatures(TIN1, TOUT1, tau1, TIN2, TOUT2, tau2)
def wrapp_AdditivesData(obj,df1, df2, df3):
"""
Description
-----------
Define specific columns from the spreadsheet to set the added Input-flows
Define specific columns from the spreadsheet to set the concentration datas
Context
----------
function is called in Wrapp.ProcessUnits
Parameters
----------
df1 : Dataframe
df2 : Dataframe
"""
req_concentration = None
lhs_comp_list = WF.read_list (df2,1)
rhs_comp_list = WF.read_list (df2,3)
lhs_ref_flow = df2.iloc[0,0]
rhs_ref_flow = df2.iloc[0,2]
if not pd.isnull(df2.iloc[0,4]):
req_concentration = df2.iloc[0,4]
myu_dict = WF.read_type2 (df3,0,1,2)
obj.set_flowData(req_concentration,
rhs_ref_flow,
lhs_ref_flow,
rhs_comp_list,
lhs_comp_list,
myu_dict,
)
sourceslist = WF.read_list(df1,0)
obj.set_possibleSources(sourceslist)
def wrapp_EconomicData(obj, df, df2):
"""
Description
-----------
Get Economic information from Excel Sheet Colomns defined in df and df2
Context
-----------
Function is called in Wrapp.ProcessUnits
Parameters
----------
df : Dataframe with economic CAPEX Factors and Components List
df2 : Dataframe with General Factors for Direct and Indirect Costs
"""
ReferenceCosts = df.iloc[0,1]
ReferenceFlow = df.iloc[1,1]
CostExponent = df.iloc[2,1]
ReferenceYear= df.iloc[3,1]
DirectCostFactor = df2.iloc[5,0]
IndirectCostFactor = df2.iloc[6,0]
ReferenceFlowType = df.iloc[4,1]
ReferenceFlowComponentList = WF.read_list_new(df, 1, 5)
# Set Economic Data in Process Unit Object
obj.set_economicData(DirectCostFactor,
IndirectCostFactor,
ReferenceCosts,
ReferenceFlow,
CostExponent,
ReferenceYear,
ReferenceFlowType,
ReferenceFlowComponentList
)
def wrapp_ProductpoolData(obj, series):
"""
Description
-----------
Define specific columns from the
spreadsheet Productpool to set Productname, Productprice and Producttype
Context
----------
function is called in Wrapp_ProcessUnits
Parameters
----------
df : Dataframe
"""
obj.ProductName= series[4]
obj.set_productPrice(series[8])
obj.ProductType = series[9]
obj.set_group(series[7])
EmissionCredits = 0
if not pd.isnull(series[10]):
EmissionCredits = series[10]
minp = 0
maxp = 10000000
if not | pd.isnull(series[11]) | pandas.isnull |
# -*- coding: utf-8 -*-
import re
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_bool_dtype, is_categorical, is_categorical_dtype,
is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64_ns_dtype,
is_datetime64tz_dtype, is_datetimetz, is_dtype_equal, is_interval_dtype,
is_period, is_period_dtype, is_string_dtype)
from pandas.core.dtypes.dtypes import (
CategoricalDtype, DatetimeTZDtype, IntervalDtype, PeriodDtype, registry)
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, IntervalIndex, Series, date_range)
from pandas.core.sparse.api import SparseDtype
import pandas.util.testing as tm
@pytest.fixture(params=[True, False, None])
def ordered(request):
return request.param
class Base(object):
def setup_method(self, method):
self.dtype = self.create()
def test_hash(self):
hash(self.dtype)
def test_equality_invalid(self):
assert not self.dtype == 'foo'
assert not is_dtype_equal(self.dtype, np.int64)
def test_numpy_informed(self):
pytest.raises(TypeError, np.dtype, self.dtype)
assert not self.dtype == np.str_
assert not np.str_ == self.dtype
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not len(self.dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(self.dtype)
assert not len(self.dtype._cache)
assert result == self.dtype
class TestCategoricalDtype(Base):
def create(self):
return CategoricalDtype()
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not len(self.dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(self.dtype)
assert result == self.dtype
def test_hash_vs_equality(self):
dtype = self.dtype
dtype2 = CategoricalDtype()
assert dtype == dtype2
assert dtype2 == dtype
assert hash(dtype) == hash(dtype2)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'category')
assert is_dtype_equal(self.dtype, CategoricalDtype())
assert not is_dtype_equal(self.dtype, 'foo')
def test_construction_from_string(self):
result = CategoricalDtype.construct_from_string('category')
assert is_dtype_equal(self.dtype, result)
pytest.raises(
TypeError, lambda: CategoricalDtype.construct_from_string('foo'))
def test_constructor_invalid(self):
msg = "Parameter 'categories' must be list-like"
with pytest.raises(TypeError, match=msg):
CategoricalDtype("category")
dtype1 = CategoricalDtype(['a', 'b'], ordered=True)
dtype2 = CategoricalDtype(['x', 'y'], ordered=False)
c = Categorical([0, 1], dtype=dtype1, fastpath=True)
@pytest.mark.parametrize('values, categories, ordered, dtype, expected',
[
[None, None, None, None,
CategoricalDtype()],
[None, ['a', 'b'], True, None, dtype1],
[c, None, None, dtype2, dtype2],
[c, ['x', 'y'], False, None, dtype2],
])
def test_from_values_or_dtype(
self, values, categories, ordered, dtype, expected):
result = CategoricalDtype._from_values_or_dtype(values, categories,
ordered, dtype)
assert result == expected
@pytest.mark.parametrize('values, categories, ordered, dtype', [
[None, ['a', 'b'], True, dtype2],
[None, ['a', 'b'], None, dtype2],
[None, None, True, dtype2],
])
def test_from_values_or_dtype_raises(self, values, categories,
ordered, dtype):
msg = "Cannot specify `categories` or `ordered` together with `dtype`."
with pytest.raises(ValueError, match=msg):
CategoricalDtype._from_values_or_dtype(values, categories,
ordered, dtype)
def test_is_dtype(self):
assert CategoricalDtype.is_dtype(self.dtype)
assert CategoricalDtype.is_dtype('category')
assert CategoricalDtype.is_dtype(CategoricalDtype())
assert not CategoricalDtype.is_dtype('foo')
assert not CategoricalDtype.is_dtype(np.float64)
def test_basic(self):
assert is_categorical_dtype(self.dtype)
factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
s = Series(factor, name='A')
# dtypes
assert is_categorical_dtype(s.dtype)
assert is_categorical_dtype(s)
assert not is_categorical_dtype(np.dtype('float64'))
assert is_categorical(s.dtype)
assert is_categorical(s)
assert not is_categorical(np.dtype('float64'))
assert not is_categorical(1.0)
def test_tuple_categories(self):
categories = [(1, 'a'), (2, 'b'), (3, 'c')]
result = CategoricalDtype(categories)
assert all(result.categories == categories)
@pytest.mark.parametrize("categories, expected", [
([True, False], True),
([True, False, None], True),
([True, False, "a", "b'"], False),
([0, 1], False),
])
def test_is_boolean(self, categories, expected):
cat = Categorical(categories)
assert cat.dtype._is_boolean is expected
assert is_bool_dtype(cat) is expected
assert is_bool_dtype(cat.dtype) is expected
class TestDatetimeTZDtype(Base):
def create(self):
return DatetimeTZDtype('ns', 'US/Eastern')
def test_alias_to_unit_raises(self):
# 23990
with tm.assert_produces_warning(FutureWarning):
DatetimeTZDtype('datetime64[ns, US/Central]')
def test_alias_to_unit_bad_alias_raises(self):
# 23990
with pytest.raises(TypeError, match=''):
DatetimeTZDtype('this is a bad string')
with pytest.raises(TypeError, match=''):
DatetimeTZDtype('datetime64[ns, US/NotATZ]')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = DatetimeTZDtype('ns', 'US/Eastern')
dtype3 = DatetimeTZDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype4 = DatetimeTZDtype("ns", "US/Central")
assert dtype2 != dtype4
assert hash(dtype2) != hash(dtype4)
def test_construction(self):
pytest.raises(ValueError,
lambda: DatetimeTZDtype('ms', 'US/Eastern'))
def test_subclass(self):
a = DatetimeTZDtype.construct_from_string('datetime64[ns, US/Eastern]')
b = | DatetimeTZDtype.construct_from_string('datetime64[ns, CET]') | pandas.core.dtypes.dtypes.DatetimeTZDtype.construct_from_string |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 5 12:27:07 2018
@author: djk
"""
import os
import pandas as pd
import numpy as np
r2d = np.rad2deg
d2r = np.deg2rad
def IAGA2002_Header_Reader(IAGA2002_file):
"""
This function counts the header and comment rows in an IAGA 2002 format
file. It is designed to cope with the number of header lines being either
12 or 13, and an arbitrary number of comment lines (including none).
(The IAGA2002 format was last revised in June 2015 to allow an optional
thirteenth header line 'Publication date'.
Ref: https://www.ngdc.noaa.gov/IAGA/vdat/IAGA2002/iaga2002format.html)
The rows of data are preceded by a row of column headers starting with
"DATE" in columns 0:3. This string cannot occur earlier in the file, so
detecting the first occurence of this string may be used to count the total
number of header and comment lines.
This function may be useful to define the number of rows to skip
(n_header + n_comment) in another function designed to read in the data.
While it is rather cumbersome, when reading in a long sequence of IAGA2002
files, the 'safety first' approach would be to call this function for each
file in case the number of header lines changes within the sequence of
files.
Input parameter
---------------
IAGA2002_file: string
the full path and file name for the IAGA2002 data file
Output
------
A tuple:
with integer number of header rows (n_header), integer number of comment
rows (n_comment), and headers, a dictionary containing the information in
the headers.
Dependencies
------------
pandas
BGS Dependencies
----------------
None
Revision date
-------------
5 Feb 2018
"""
COMMENT_STR = '#'
DATE_STR = 'DATE'
head = ' '
n_header = 0
n_lines = 0
headers = {}
with open(IAGA2002_file) as ofile:
while head[0:4] != DATE_STR:
head = next(ofile)
if head[1] != COMMENT_STR:
key = head[0:24].strip()
val = head[24:69].strip()
headers[key] = val
n_header += 1
n_lines += 1
headers.pop(key) # Remove the data column header line from the dictionary
n_comment = n_lines-n_header # The number of comment lines
n_header -= 1 # The number of header lines
return (n_header, n_comment, headers)
def IAGA2002_Data_Reader(IAGA2002_file):
"""
This function reads the data in an IAGA 2002 format file into a pandas
dataframe.
Input parameter
---------------
IAGA2002_file: string
the full path and file name for the IAGA2002 data file
Output
------
A pandas dataframe:
vals - has the data with a datetime index and the column labels from the
IAGA2002 file
Dependencies
------------
pandas
BGS Dependencies
----------------
IAGA2002_Header_Reader
Revision date
-------------
5 Feb 2018
"""
# Read the header and comment lines at the top of the file to get the number
# of rows to skip before reading the data
header = IAGA2002_Header_Reader(IAGA2002_file)
nskip = header[0]+header[1]
# Read the data into a pandas dataframe (an IAGA2002 file has 'DATE' and 'TIME'
# as the first two column labels.) There's a trailing '|' on the column header
# line which is interpreted as the header for a column of nans and this
# property is used to delete it.
DT_INDEX = 'DATE_TIME'
vals = pd.read_csv(IAGA2002_file,
delim_whitespace=True,
skiprows=nskip,
parse_dates=[DT_INDEX.split('_')],
index_col=DT_INDEX)
vals.dropna(inplace=True, axis=1)
return(vals)
def load_year(observatory=None, year=None, path=None):
"""Read in the daily 1-min files from a whole year.
Parameters
----------
observatory: string
Observatory code e.g. ESK
year: int/string
Desired year to load
path: string
Directory containing the files for that year
Returns
-------
DataFrame
"""
dates_in_year = pd.date_range(
start=f'{year}-01-01', end=f'{year}-12-31', freq='D'
)
df = pd.DataFrame()
for date in dates_in_year:
ymd = date.strftime('%Y%m%d')
file_name = f'{observatory}{ymd}dmin.min'
file_path = os.path.join(path, file_name)
df = df.append(IAGA2002_Data_Reader(file_path))
return df
def read_obs_hmv(obscode, year_st, year_fn, folder):
"""Read in observatory annual mean files in IAGA2002 format.
This function reads the hourly mean value data in yearly IAGA2002 format
files into a pandas dataframe. (Note: The data may be reported in different
ways in different years (e.g. DFHZ, FXYZ).)
Input parameters
---------------
obscode: the IAGA observatory code: string (3 or 4 characters)
year_st: the start year for the data request
year_fn: the final year for the data request
folder : the location of the yearly hmv files
Output
------
A pandas dataframe: datareq
This has columns of X, Y and Z data (only) and keeps the datetime index
from the IAGA2002 files
Dependencies
------------
pandas
Local Dependencies
----------------
none
Revision date
-------------
30 Jan 2019
"""
OBSY = obscode.upper()
obsy = obscode.lower()
# Read in the observatory data one year file at a time and construct filenames
datareq = pd.DataFrame()
for year in range(year_st, year_fn+1):
ystr = str(year)
file = obsy + ystr + 'dhor.hor'
fpf = folder + file
tmp = IAGA2002_Data_Reader(fpf)
tmp.columns = [col.strip(OBSY) for col in tmp.columns]
if('D' in tmp.columns):
xvals, yvals = dh2xy(tmp['D'], tmp['H'])
tmp['X'] = xvals.round(decimals=1)
tmp['Y'] = yvals.round(decimals=1)
datareq = datareq.append(tmp[['X','Y', 'Z']])
return(datareq)
def read_obs_hmv_declination(obscode, year_st, year_fn, folder):
"""Read (or calculate) the declination from hourly mean files in IAGA2002 format.
This function reads the hourly mean value data in yearly IAGA2002 format
files into a pandas dataframe for the specified observatory between year_st
and year_fn. Note that D is reported in angular units of minutes of arc (and not
degrees) in this file format.
Input parameters
---------------
obscode: the IAGA observatory code: string (3 or 4 characters)
year_st: the start year for the data request
year_fn: the final year for the data request
folder : the location of the yearly hmv files
Output
------
A pandas dataframe: datareq
This has columns for datetime and declination
Dependencies
------------
pandas
Local Dependencies
----------------
none
Revision date
-------------
24/06/19 (<NAME>)
"""
OBSY = obscode.upper()
obsy = obscode.lower()
# Read in the observatory data one year file at a time and construct filenames
datareq = | pd.DataFrame() | pandas.DataFrame |
from pathlib import Path
import numpy as np
import pandas as pd
from dstools.pipeline.clients import SQLAlchemyClient
from dstools import testing
def test_can_check_nulls(tmp_directory):
client = SQLAlchemyClient('sqlite:///' + str(Path(tmp_directory, 'db.db')))
df = | pd.DataFrame({'no_nas': [1, 2, 1], 'nas': [1, np.nan, 1]}) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from geoedfframework.utils.GeoEDFError import GeoEDFError
from geoedfframework.GeoEDFPlugin import GeoEDFPlugin
import pandas as pd
import requests
from cdo_api_py import Client
""" Module for implementing the GHCND input connector plugin. This plugin will retrieve data for
five specific meterological parameters for a given station ID and date range. The plugin returns
a CSV file for each parameter with data records for each intervening date. The CSV file is named
based on the station and parameter. The new NOAA API is used that does not require tokens.
"""
class GHCNDInput(GeoEDFPlugin):
# auth is also required by GHCNDInput
__optional_params = []
__required_params = ['start_date','end_date','station_id']
# we use just kwargs since we need to be able to process the list of attributes
# and their values to create the dependency graph in the GeoEDFInput super class
def __init__(self, **kwargs):
# list to hold all the parameter names; will be accessed in super to
# construct dependency graph
self.provided_params = self.__required_params + self.__optional_params
# check that all required params have been provided
for param in self.__required_params:
if param not in kwargs:
raise GeoEDFError('Required parameter %s for GHCNDInput not provided' % param)
# set all required parameters
for key in self.__required_params:
setattr(self,key,kwargs.get(key))
# set optional parameters
for key in self.__optional_params:
# if key not provided in optional arguments, defaults value to None
setattr(self,key,kwargs.get(key,None))
# set the hardcoded set of meterological params
# can possibly generalize to fetch any list of params in the future
self.met_params = ['SNOW','SNWD','TMAX','TMIN','PRCP']
# class super class init
super().__init__()
# each Input plugin needs to implement this method
# if error, raise exception; if not, return True
def get(self):
# semantic checking of parameters
# process dates
try:
startdate = pd.to_datetime(self.start_date,format='%m/%d/%Y')
enddate = pd.to_datetime(self.end_date,format='%m/%d/%Y')
except:
raise GeoEDFError("Error parsing dates provided to GHCNDInput, please ensure format is mm/dd/YYYY")
# param checks complete
try:
# parse out station_id
station_id = self.station_id.split(':')[1]
# use new API
# construct URL
station_data_url = "https://www.ncei.noaa.gov/access/services/data/v1?dataset=daily-summaries&dataTypes=SNOW,PRCP,SNWD,TMIN,TMAX&stations=%s&startDate=2010-08-30&endDate=2020-09-30&format=json" % station_id
res = requests.get(station_data_url)
res.raise_for_status()
station_data = | pd.read_json(res.text) | pandas.read_json |
import pandas as pd
def load_data():
df = pd.read_csv('./assets/BankChurners.csv')
df.drop(['Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_1',
'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_2',
'CLIENTNUM'],
axis=1,
inplace=True)
return df
def balance_labels(df, random_state=42):
attrited = df.loc[df['Attrition_Flag'] == 'Attrited Customer']
existing = df.loc[df['Attrition_Flag'] == 'Existing Customer']
balanced_df = pd.concat([attrited.reset_index(drop=True),
existing.sample(n=len(attrited), replace=False, random_state=random_state).reset_index(drop=True)])
return balanced_df
def get_data_target(df):
target = df['Attrition_Flag']
data = df.drop(['Attrition_Flag'], axis=1)
data = | pd.get_dummies(data, drop_first=False) | pandas.get_dummies |
# -*- coding: utf-8 -*-
# %%
import pandas as pd
import numpy as np
import tkinter as tk
class package:
def __init__(self):
# elements defined
C = 12
H = 1.007825
N = 14.003074
O = 15.994915
P = 30.973763
S = 31.972072
Na = 22.98977
Cl = 34.968853
self.elements = [C,H,N,O,P,S,Na,Cl]
self.elementsymbol = ['C','H','N','O','P','S','Na','Cl']
ionname = ['M','M+H','M+2H','M+H-H2O','M+2H-H2O','M+Na','M+2Na','M+2Na-H','M+NH4',
'M-H','M-2H','M-3H','M-4H','M-5H','M-H-H2O','M-2H-H2O','M-CH3','M+Cl','M+HCOO','M+OAc']
ionfunc = []
ionfunc.append(lambda ms: ms)
ionfunc.append(lambda ms: ms+package().elements[1])
ionfunc.append(lambda ms: (ms+2*package().elements[1])/2)
ionfunc.append(lambda ms: ms-package().elements[1]-package().elements[3])
ionfunc.append(lambda ms: (ms-package().elements[3])/2)
ionfunc.append(lambda ms: ms+package().elements[6])
ionfunc.append(lambda ms: (ms+2*package().elements[6])/2)
ionfunc.append(lambda ms: ms-package().elements[1]+2*package().elements[6])
ionfunc.append(lambda ms: ms+4*package().elements[1]+package().elements[2])
ionfunc.append(lambda ms: ms-package().elements[1])
ionfunc.append(lambda ms: (ms-2*package().elements[1])/2)
ionfunc.append(lambda ms: (ms-3*package().elements[1])/3)
ionfunc.append(lambda ms: (ms-4*package().elements[1])/4)
ionfunc.append(lambda ms: (ms-5*package().elements[1])/5)
ionfunc.append(lambda ms: ms-3*package().elements[1]-package().elements[3])
ionfunc.append(lambda ms: (ms-4*package().elements[1]-package().elements[3])/2)
ionfunc.append(lambda ms: ms-package().elements[0]-3*package().elements[1])
ionfunc.append(lambda ms: ms+package().elements[7])
ionfunc.append(lambda ms: ms+package().elements[0]+package().elements[1]+2*package().elements[3])
ionfunc.append(lambda ms: ms+2*package().elements[0]+3*package().elements[1]+2*package().elements[3])
self.ion = {}
for i,j in enumerate(ionname):
self.ion[j] = ionfunc[i]
# %% [markdown]
# Package for Sphingolipids
# %%
class package_sl(package):
def __init__(self):
# base structure defined
self.base = {'Cer': np.array([0,3,1,0]+[0]*(len(package().elements)-4)),
'Sphingosine': np.array([0,3,1,0]+[0]*(len(package().elements)-4)),
'Sphinganine': np.array([0,3,1,0]+[0]*(len(package().elements)-4))}
# headgroups defined
headgroup = ['Pi','Choline','Ethanolamine','Inositol','Glc','Gal','GalNAc','NeuAc','Fuc','NeuGc']
formula = []
formula.append(np.array([0,3,0,4,1]+[0]*(len(package().elements)-5)))
formula.append(np.array([5,13,1,1]+[0]*(len(package().elements)-4)))
formula.append(np.array([2,7,1,1]+[0]*(len(package().elements)-4)))
formula.append(np.array([6,12,0,6]+[0]*(len(package().elements)-4)))
formula.append(np.array([6,12,0,6]+[0]*(len(package().elements)-4)))
formula.append(np.array([6,12,0,6]+[0]*(len(package().elements)-4)))
formula.append(np.array([8,15,1,6]+[0]*(len(package().elements)-4)))
formula.append(np.array([11,19,1,9]+[0]*(len(package().elements)-4)))
formula.append(np.array([6,12,0,5]+[0]*(len(package().elements)-4)))
formula.append(np.array([11,19,1,10]+[0]*(len(package().elements)-4)))
self.components = self.base.copy()
for i,j in enumerate(headgroup):
self.components[j] = formula[i]
# sn type defined
sntype = ['none','d','t']
snformula = []
snformula.append(lambda carbon,db: np.array([carbon,2*carbon-2*db,0,2]+[0]*(len(package().elements)-4)))
snformula.append(lambda carbon,db: np.array([carbon,2*carbon+2-2*db,0,3]+[0]*(len(package().elements)-4)))
snformula.append(lambda carbon,db: np.array([carbon,2*carbon+2-2*db,0,4]+[0]*(len(package().elements)-4)))
self.sn = {}
for i,j in enumerate(sntype):
self.sn[j] = snformula[i]
# extended structure
nana = ['M','D','T','Q','P']
iso = ['1a','1b','1c']
namedf = | pd.DataFrame({'0-series': ['LacCer'],'a-series': ['GM3'],'b-series': ['GD3'],'c-series': ['GT3']}) | pandas.DataFrame |
from functools import lru_cache
from operator import itemgetter
from typing import Union
import pandas as pd
import pycep_correios
from geopy import distance
from geopy.geocoders import Photon
from pymongo import MongoClient
def a() -> float:
collection = db["estabelecimentos"]
total_empresas = collection.count_documents({}) # Total de 31,564,677 empresas no banco de dados
"""
Cria um pipeline que separa os todas as empresas por grupos situação cadastral
e depois calcula a porcentagem em cima do total de empresas no banco de dados
"""
results = collection.aggregate([{
"$group": {
"_id": {"situacao_cadastral": "$situacao_cadastral"},
"count": {"$sum": 1}
} # Agrupa por situacao_cadastral
},
{
"$project": {
"count": 1,
"percentage": {
"$multiply": [{"$divide": [100, total_empresas]}, "$count"] # Calcula porcentagem
}
}
}])
result_list = [(x["_id"], x["total"]) for x in results] # Cria uma lista de tuplas com (situação, porcentagem)
for situacao in result_list:
if situacao[0] == "02": # Ativas
print(f"Porcentagem de empresas ativas: {situacao[0]:.2f}%") # Resultado: 42.04%
return f"{situacao[0]:.2f}"
def b() -> list[tuple[float, float]]:
collection = db["estabelecimentos"]
results = collection.aggregate([
{'$match': {
'cnae_fiscal_principal': {'$regex': '^561'}
}
},
{'$group': {
'_id': {'$substrBytes': ['$data_inicio_atividades', 0, 4]}, # Pega os primeiros 4 dígitos (ano)
'total': {'$sum': 1}
}
}]) # Agrupa os que tem o início do CNAE Principal 561 pela data que iniciou as atividades
result_list = [(x["_id"], x["total"]) for x in results] # Cria uma lista de tuplas com (ano, total)
result_list.sort(key=itemgetter(0), reverse=True) # Ordena as tuplas de forma decrescente pelo ano
return result_list
def c():
collection = db["estabelecimentos"]
# Como muito dos dados possuem CEP e endereço repetido, utilizei o LRU Cache
# para evitar processamento e chamada de API desnecessárias.
@lru_cache(maxsize=None)
def get_address_from_cep(cep: str) -> Union[str, None]:
"""Utiliza a API do VIACEP para tentar obter o endereço do CEP"""
try:
endereco = pycep_correios.get_address_from_cep(cep, webservice=pycep_correios.WebService.VIACEP)
return endereco['logradouro'] + ", " + endereco['bairro'] + ", " + endereco['cidade'] + " - " + endereco['uf']
except pycep_correios.exceptions.InvalidCEP:
print(f"Cep Inválido: {cep}")
except pycep_correios.exceptions.CEPNotFound:
print(f"Cep Não Encontrado: {cep}")
return None
@lru_cache(maxsize=None)
def address_to_coordinates(address: str) -> tuple[float, float]:
"""Utiliza a API do Photon para tentar pegar a coordenada do endereço"""
geolocator = Photon(user_agent="parmenas_dataops_desafio")
result = geolocator.geocode(address)
if result:
return (result.latitude, result.longitude)
else:
print(f"Não foi possível encontrar o endereço {address}")
return None, None
@lru_cache(maxsize=None)
def calculate_distance(coordinates_start: tuple, coordinates_end: tuple) -> float:
"""Retorna a disância em kilometros entre duas coordenadas"""
result = distance.distance(coordinates_start, coordinates_end)
return result.kilometers
""" Buscar mais de 30 milhões de empresas e testar CEP por CEP é loucura,
como eu precisava somente os do São Paulo-SP, que é a cidade do CEP 01422000
Eu puxei somente os ceps que começam em 0 e trabalhei somente com eles.
"""
result_db = collection.find({
'$and': [
{'cep': {'$regex': '^0'}},
{'cep': {'$not': {'$regex': '^0$'}}}, # Não pega caso tenha cep = "0"
{'cep': {'$not': {'$regex': '00000000'}}} # Não pega caso tenha cep = "00000000"
]
}, projection={"_id": 1, "cep": 1}) # Retorna só o _id e o CEP
result_raw = [x for x in result_db]
""" Como seria ainda mais de 4 milhões de CEPs pra testar, resolvi tirar a precisão
dos últimos 3 números do CEP, assim consigo agrupar melhor"""
lista_cep_tratado = []
for doc in result_raw:
doc["cep"] = f"{doc['cep'][:-0]}000"
lista_cep_tratado.append(doc)
del result_raw
lista_com_endereco = []
for doc in lista_cep_tratado:
address = get_address_from_cep(doc["cep"])
if address:
doc["endereco"] = address
lista_com_endereco.append(doc)
del lista_cep_tratado
get_address_from_cep.cache_clear() # Limpando o cache já que não vamos mais precisar
lista_com_coordenadas = []
for doc in lista_com_coordenadas:
if doc["endereco"]:
doc["latitude"], doc["longitude"] = address_to_coordinates(doc["endereco"])
if doc["latitude"]:
lista_com_coordenadas.append(doc)
del lista_com_endereco
address_to_coordinates.cache_clear()
# Calcula a distância dos CEPs com início nos coordenadas do CEP 01422000: (-23.5648196, -46.6600444)
list_com_distancias = []
for cep in list_com_distancias:
start = (-23.5648196, -46.6600444)
end = (float(cep["latitude"]), float(cep["longitude"]))
distancia = calculate_distance(start, end)
if distancia <= 5:
list_com_distancias.append(cep["_id"])
calculate_distance.cache_clear()
return len(list_com_distancias)
def exportar_respostas():
resultado_a = a()
resultado_b = b()
resultado_c = c()
df = pd.DataFrame([("a", resultado_a), ("c", resultado_c)], columns=['Letra', 'Resultado'])
df.to_excel('resultados/resultados_a_c.xlsx', index=False)
df_b = | pd.DataFrame(resultado_b, columns=['Ano', 'Total']) | pandas.DataFrame |
import pandas as pd
import numpy as np
'''
DataFrame
A DataFrame represents a rectangular table of data and contains an ordered collec‐
tion of columns, each of which can be a different value type (numeric, string,
boolean, etc.). The DataFrame has both a row and column index; it can be thought of
as a dict of Series all sharing the same index. Under the hood, the data is stored as one
or more two-dimensional blocks rather than a list, dict, or some other collection of
one-dimensional arrays.
While a DataFrame is physically two-dimensional, you can use it to
represent higher dimensional data in a tabular format using hierarchical indexing.
'''
'''
There are many ways to construct a DataFrame, though one of the most common is
from a dict of equal-length lists or NumPy arrays
'''
data = {'state': ['Oshiwara', 'Oshiwara', 'Oshiwara', 'Navapura', 'Navapura', 'Navapura'],
'year': [2000, 2001, 2002, 2001, 2002, 2003],
'pop': [1.5, 1.7, 3.6, 2.4, 2.9, 3.2]}
frame = pd.DataFrame(data)
print(frame)
'''
For large DataFrames, the head method selects only the first five rows
'''
print(frame.head())
'''
If you specify a sequence of columns, the DataFrame’s columns will be arranged in
that order
'''
print(pd.DataFrame(data, columns=['year', 'state', 'pop']))
'''
If you pass a column that isn’t contained in the dict, it will appear with missing values
in the result
'''
print(pd.DataFrame(data, columns=['year', 'state', 'pop','clean']))
'''
A column in a DataFrame can be retrieved as a Series either by dict-like notation or
by attribute.
frame2[column] works for any column name, but frame2.column
only works when the column name is a valid Python variable
name
'''
frame2 = | pd.DataFrame(data, columns=['year', 'state', 'pop', 'debt'],index=['one', 'two', 'three', 'four','five', 'six']) | pandas.DataFrame |
from typing import Type
import numpy as np
import pandas as pd
import pytest
from vivid.featureset.encodings import CountEncodingAtom, OneHotEncodingAtom, InnerMergeAtom
class BaseTestCase:
def setup_method(self):
data = [
[1, 2.1, 'hoge'],
[1, 1.01, 'spam'],
[2, 10.001, 'ham'],
[1, 1.1, 'spam'],
[3, 2.5, 'spam'],
[1, None, None]
]
self.train_df = pd.DataFrame(data, columns=['int1', 'float1', 'str1'])
self.y = [1] * len(self.train_df)
test_data = [
data[0], data[2]
]
self.test_df = pd.DataFrame(test_data, columns=self.train_df.columns)
def is_generate_idempotency(self, atom):
"""atom のべき等チェック"""
feat_1 = atom.generate(self.train_df, self.y)
feat_2 = atom.generate(self.train_df)
return feat_1.equals(feat_2)
class TestCountEncodingAtom(BaseTestCase):
def setup_method(self):
super(TestCountEncodingAtom, self).setup_method()
class IrisCountEncodingAtom(CountEncodingAtom):
use_columns = ['int1', 'str1']
self.atom = IrisCountEncodingAtom()
def test_generate_data(self):
feat_train = self.atom.generate(self.train_df, self.y)
assert len(self.train_df) == len(feat_train)
def test_output_values(self):
"""出力データが正しいことの確認"""
# 学習データで学習済み
self.atom.generate(self.train_df, self.y)
test_data = [
[1, 'spam'], # 対応関係があるもの
[2, 'ham'],
[None, None] # レコードにない or None
]
ground_truth = [
[4, 3],
[1, 1],
[np.nan, np.nan]
]
test_df = pd.DataFrame(test_data, columns=self.atom.use_columns)
feat_test = self.atom.generate(test_df)
assert len(test_df) == len(feat_test)
assert pd.DataFrame(ground_truth).equals( | pd.DataFrame(feat_test.values) | pandas.DataFrame |
"""
Module for integration testing.
"""
import os
from pathlib import Path
from dask.distributed import Client, LocalCluster
import numpy as np
import pandas as pd
import pytest
from ..unlikely.engine import abc_smc
from ..unlikely.models import Models, Model
from ..unlikely.misc import create_images_from_data
from ..unlikely.priors import Beta, Uniform
from .conftest import assert_similar_enough_distribution
def test_beta_binomial_1():
# A 1 is a "success", and a 0 is a "failure"
obs = np.array([1, 0, 1, 1, 1, 0, 1, 0, 1])
# Number of particles to sample per epoch
num_particles = 2000
# The cutoff(s) that decide whether or not to accept a particle.
epsilon_sets = [[0], [1, 0], [3, 2, 1, 0]]
column = [
{
'title': 'Posterior after 1 success out of 1',
'data': [
pd.DataFrame(
{
'reference_posterior': np.random.beta(
2, 1, num_particles)
}
),
pd.DataFrame(
{
'prior': np.random.beta(
1, 1, num_particles)
}
)
]
},
{
'title': 'Full update with 6 successes out of 9',
'data': [
pd.DataFrame(
{
'reference_posterior': np.random.beta(
obs.sum() + 1,
len(obs) - obs.sum() + 1,
num_particles
)
}
),
pd.DataFrame(
{
'prior': np.random.beta(
1, 1, num_particles)
}
)
]
}
]
def distance(x, y):
"""
For binomially distributed data, this essentially counts the number of
"successes". We do that for both the observed and the simulated data
sets and find the absolute distance between the two of them.
This is for illustrative purposes only. You could write a more complex
one that suits your own problem.
Parameters:
x: np.array
y: np.array
Returns: numeric
"""
return abs(x.sum() - y.sum())
def simulate(priors, actual_data):
"""
Used by a model to simulate data.
This is for illustrative purposes only. You could write a more complex
one that suits your own problem.
Parameters:
priors: unlikely.priors.Priors
Acts like a dict. Keys should be names of priors of a model.
actual_data: Some data
Returns: integer
A number between 0 and 1.
"""
return np.random.binomial(
n=1,
p=priors['beta'],
size=len(actual_data)
)
models_list = []
for i, epsilons in enumerate(epsilon_sets):
# Create a model. A model is a set of priors, plus a simulator
models = Models(
[
Model(
name='flat prior',
priors=[
Beta(alpha=1, beta=1, name="beta"),
],
simulate=simulate,
prior_model_proba=1
),
],
perturbation_param=0.9
)
models_list.append(models)
# Compute the posterior distribution.
abc_smc(
num_particles,
epsilons,
models,
np.array([obs[0]]),
distance,
)
column[0]['data'].append(
models[0].prev_accepted_proposals.rename(
columns={'beta': f'eps: {epsilons}'}
)
)
# Create a model that uses the full data set
models_more_data = Models(
[
Model(
name='flat prior',
priors=[
Beta(alpha=1, beta=1, name="beta"),
],
simulate=simulate,
prior_model_proba=1
),
],
)
models_list.append(models_more_data)
# Compute the posterior distribution for the models object with all the
# data.
abc_smc(
num_particles,
epsilons=epsilons,
models=models_more_data,
obs=obs,
distance=distance,
)
column[1]['data'].append(
models_more_data[0].prev_accepted_proposals.rename(
columns={'beta': f'eps: {epsilons}'}
)
)
# The posterior distribution (i.e. accepted particles that are compatible
# "enough" with the data and model) are stored in
# models[0].prev_accepted_proposals
# Assuming you have an "images" folder in your current working directory:
create_images_from_data(
save_path=Path(
os.getenv("PWD")) / "images" / "beta_binomial_example.png",
data={
'title': "Comparison of Prior & Posterior of a Beta-Binomial",
'data': [
column
]
},
xlim=(0, 1),
figsize_mult=(5, 5)
)
for i in range(len(models_list)):
if i % 2 == 0:
assert_similar_enough_distribution(
models_list[i][0].prev_accepted_proposals,
pd.DataFrame({'beta': np.random.beta(2, 1, num_particles)})
)
else:
assert_similar_enough_distribution(
models_list[i][0].prev_accepted_proposals,
pd.DataFrame(
{
'beta': np.random.beta(
obs.sum() + 1, len(obs) - obs.sum() + 1,
num_particles
)
}
)
)
def test_uniform_binomial_1():
# A 1 is a "success", and a 0 is a "failure"
obs = np.array([1, 0, 1, 1, 1, 0, 1, 0, 1])
# Number of particles to sample per epoch
num_particles = 2000
# The cutoff(s) that decide whether or not to accept a particle.
epsilon_sets = [[0], [1, 0], [3, 2, 1, 0]]
column = [
{
'title': 'Posterior after 1 success out of 1',
'data': [
pd.DataFrame(
{
'reference_posterior': np.random.beta(
2, 1, num_particles)
}
),
pd.DataFrame(
{
'prior': np.random.uniform(
0, 1, num_particles)
}
)
]
},
{
'title': 'Full update with 6 successes out of 9',
'data': [
pd.DataFrame(
{
'reference_posterior': np.random.beta(
obs.sum() + 1,
len(obs) - obs.sum() + 1,
num_particles
)
}
),
pd.DataFrame(
{
'prior': np.random.uniform(
0, 1, num_particles)
}
)
]
}
]
def distance(x, y):
"""
For binomially distributed data, this essentially counts the number of
"successes". We do that for both the observed and the simulated data
sets and find the absolute distance between the two of them.
This is for illustrative purposes only. You could write a more complex
one that suits your own problem.
Parameters:
x: np.array
y: np.array
Returns: numeric
"""
return abs(x.sum() - y.sum())
def simulate(priors, actual_data):
"""
Used by a model to simulate data.
This is for illustrative purposes only. You could write a more complex
one that suits your own problem.
Parameters:
priors: unlikely.priors.Priors
Acts like a dict. Keys should be names of priors of a model.
actual_data: Some data
Returns: integer
A number between 0 and 1.
"""
return np.random.binomial(
n=1,
p=priors['uniform'],
size=len(actual_data)
)
models_list = []
for i, epsilons in enumerate(epsilon_sets):
# Create a model. A model is a set of priors, plus a simulator
models = Models(
[
Model(
name='flat prior',
priors=[
Uniform(alpha=0, beta=1, name="uniform"),
],
simulate=simulate,
prior_model_proba=1
),
],
perturbation_param=0.9
)
models_list.append(models)
# Compute the posterior distribution.
abc_smc(
num_particles,
epsilons,
models,
np.array([obs[0]]),
distance,
)
column[0]['data'].append(
models[0].prev_accepted_proposals.rename(
columns={'uniform': f'eps: {epsilons}'}
)
)
# Create a model that uses the full data set
models_more_data = Models(
[
Model(
name='flat prior',
priors=[
Uniform(alpha=0, beta=1, name="uniform"),
],
simulate=simulate,
prior_model_proba=1
),
],
)
models_list.append(models_more_data)
# Compute the posterior distribution for the models object with all the
# data.
abc_smc(
num_particles,
epsilons=epsilons,
models=models_more_data,
obs=obs,
distance=distance,
)
column[1]['data'].append(
models_more_data[0].prev_accepted_proposals.rename(
columns={'uniform': f'eps: {epsilons}'}
)
)
# The posterior distribution (i.e. accepted particles that are compatible
# "enough" with the data and model) are stored in
# models[0].prev_accepted_proposals
# Assuming you have an "images" folder in your current working directory:
create_images_from_data(
save_path=Path(
os.getenv("PWD")) / "images" / "uniform_binomial_example.png",
data={
'title': "Comparison of Prior & Posterior of a Uniform-Binomial",
'data': [
column
]
},
xlim=(0, 1),
figsize_mult=(5, 5)
)
for i in range(len(models_list)):
if i % 2 == 0:
assert_similar_enough_distribution(
models_list[i][0].prev_accepted_proposals,
pd.DataFrame({'uniform': np.random.beta(2, 1, num_particles)})
)
else:
assert_similar_enough_distribution(
models_list[i][0].prev_accepted_proposals,
pd.DataFrame(
{
'uniform': np.random.beta(
obs.sum() + 1, len(obs) - obs.sum() + 1,
num_particles
)
}
)
)
def test_uniform_binomial_2():
num_particles = 2000
obs = np.array([
1, 1, 1,
1, 1, 1, 0,
0, 1, 1, 0, 1, 1, 1
])
epsilons_list = [[0], [3, 2, 1, 0]]
local_cluster = LocalCluster(threads_per_worker=1)
client = Client(local_cluster)
def distance(x, y):
"""
Compare the number of ones in one vs. the other.
"""
return abs(x.sum() - y.sum())
def simulate(priors, obs):
"""
Data is binomially distributed.
"""
return np.random.binomial(n=1, p=priors['uniform'], size=len(obs))
data_to_display = [
[
{
'title': f"obs: {obs[:3]}",
'data': []
},
{
'title': f"after {obs[3:7]}",
'data': []
},
{
'title': f"after {obs[7:]}",
'data': []
},
{
'title': "Full batch",
'data': []
}
]
]
for row, epsilons in enumerate(epsilons_list):
models = Models(
[
Model(
name='Uniform over (0.5, 1)',
priors=[
Uniform(alpha=0.5, beta=1, name="uniform"),
],
simulate=simulate,
prior_model_proba=1,
),
]
)
# Update with 1st batch
abc_smc(
num_particles=num_particles,
epsilons=epsilons,
models=models,
obs=obs[:3],
distance=distance,
client=client
)
data_to_display[0][0]['data'].append(
pd.DataFrame(models[0].prev_accepted_proposals).rename(
columns={'uniform': f'eps: {epsilons}'}
)
)
# The posterior distribution becomes the prior
models.use_distribution_from_samples()
# Update with 2nd batch
abc_smc(
num_particles=num_particles,
epsilons=epsilons,
models=models,
obs=obs[3:7],
distance=distance,
client=client
)
# The posterior distribution becomes the prior
models.use_distribution_from_samples()
data_to_display[0][1]['data'].append(
pd.DataFrame(
models[0].prev_accepted_proposals).rename(
columns={'uniform': f'eps: {epsilons}'}
)
)
# Update with 3rd batch
abc_smc(
num_particles=num_particles,
epsilons=epsilons,
models=models,
obs=obs[7:],
distance=distance,
client=client
)
data_to_display[0][2]['data'].append(
pd.DataFrame(
models[0].prev_accepted_proposals).rename(
columns={'uniform': f'eps: {epsilons}'}
)
)
models_full_batch = Models(
[
Model(
name='flat prior',
priors=[
Uniform(alpha=0.5, beta=1, name="uniform"),
],
simulate=simulate,
prior_model_proba=1,
),
]
)
# Update full batch
abc_smc(
num_particles=num_particles,
epsilons=epsilons,
models=models_full_batch,
obs=obs,
distance=distance,
client=client
)
data_to_display[0][3]['data'].append(
pd.DataFrame(
models_full_batch[0].prev_accepted_proposals
).rename(columns={'uniform': f'eps: {epsilons}'})
)
create_images_from_data(
data={
'title': '3 batch updates',
'data': data_to_display
},
xlim=(0, 1),
figsize_mult=(2, 8),
save_path=Path(
os.getenv("PWD")
) / "images" / "uniform_half_to_1_binomial_mini_batch.png",
)
(models[0].prev_accepted_proposals < 0.5).sum()['uniform'] == 0
(models_full_batch[0].prev_accepted_proposals < 0.5).sum()['uniform'] == 0
assert (models[0].prev_accepted_proposals < 0.5).sum()['uniform'] == 0
assert (models_full_batch[0].prev_accepted_proposals < 0.5)\
.sum()['uniform'] == 0
assert (models[0].prev_accepted_proposals > 1)\
.sum()['uniform'] == 0
assert (models_full_batch[0].prev_accepted_proposals > 1)\
.sum()['uniform'] == 0
def test_beta_binomial_non_abc_rejection_sampling():
"""
To see how settings affect the dispersion of the posterior distribution,
here we vary a bunch of settings. We vary the number of epsilons, whether
or not to use a constant standard deviation for the perturbation process
within one abc_smc run, and if not using a constant standard deviation,
varying how thin the adaptive standard deviation.
"""
num_particles = 2000
obs = np.array([
1, 1, 1,
1, 1, 1, 0,
0, 1, 1, 0, 1, 1, 1
])
def distance(x, y):
"""
Compare the number of ones in one vs. the other.
"""
return abs(x.sum() - y.sum())
def simulate(priors, obs):
"""
Data is binomially distributed.
"""
return np.random.binomial(n=1, p=priors['beta'], size=len(obs))
data_to_display = []
constant_devs = [True, True, True]
beta_std_divs = [1.0, 2.0, 3.0]
cols = list(range(len(beta_std_divs)))
for col, beta_std_div, use_constant_dev in zip(
cols,
beta_std_divs,
constant_devs
):
data_to_display.append([
{
'title': f"batch 1: {obs[:3]}"
+ f", std_div: {beta_std_div},"
+ f" constant_dev: {use_constant_dev}",
'data': []
},
{
'title': f"batch 2: {obs[3:7]},"
+ f" std_div: {beta_std_div},"
+ f" constant_dev: {use_constant_dev}",
'data': []
},
{
'title': f"batch 3: {obs[7:]},"
+ f" std_div: {beta_std_div},"
+ f" constant_dev: {use_constant_dev}",
'data': []
},
{
'title': "full batch,"
+ f" std_div: {beta_std_div},"
+ f" constant_dev: {use_constant_dev}",
'data': []
}
])
obs_indices = [(0, 3), (3, 7), (7, len(obs))]
epsilon_sets = [[0], [0], [1, 0], [3, 2, 1, 0]]
perturbations_config = [False, True, True, True]
for i, (epsilons, use_perturbation) in enumerate(
zip(epsilon_sets, perturbations_config)
):
models = Models(
[
Model(
name='flat prior',
priors=[
Beta(
alpha=1,
beta=1,
name="beta",
)
],
simulate=simulate,
prior_model_proba=1,
perturb=use_perturbation
),
],
use_constant_dev=use_constant_dev
)
# Loop through the rows
for j, (start_index, end_index) in enumerate(obs_indices):
obs_batch = obs[start_index:end_index]
if i == 0:
data_to_display[col][j]['data'].append(
pd.DataFrame(
{
'target': np.random.beta(
obs[:end_index].sum() + 1,
len(obs[:end_index])
- obs[:end_index].sum() + 1,
num_particles
)
}
)
)
# Update with 1st batch
abc_smc(
num_particles=num_particles,
epsilons=epsilons,
models=models,
obs=obs_batch,
distance=distance,
)
data_to_display[col][j]['data'].append(
| pd.DataFrame(models[0].prev_accepted_proposals) | pandas.DataFrame |
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Development'
import numpy as np
import pandas as pd
import random
import math
import os
from scipy.spatial.distance import cdist
from sklearn import metrics
from sklearn.cluster import KMeans
OUTPUTPATH = os.path.join(os.path.dirname(__file__), '../data/')
# apply K-means
def mat_kmeans(df_Feat_Norm, k, outputname, init='k-means++'):
mydata = pd.DataFrame()
mydata = df_Feat_Norm.copy()
kmeans = KMeans(init=init, n_clusters=k, n_init=10, max_iter=300, random_state=0).fit(mydata)
pred = kmeans.labels_
represent = pd.DataFrame()
for j in np.unique(pred):
df_dist = pd.DataFrame(metrics.pairwise.euclidean_distances(mydata[pred == j], mydata[pred == j]), index=mydata[pred == j].index.values, columns=mydata[pred == j].index.values)
reptiv = df_dist.sum().idxmin()
represent = represent.append({'representative':reptiv, 'label':j}, ignore_index=True)
# save hillslope groups as csv file
mydata['pred'] = pred
for r in np.unique(pred):
mydata.loc[mydata['pred'] == r,'rep'] = represent.loc[represent['label'] == r, 'representative'].values[0]
# mydata[['pred', 'rep']].to_csv(OUTPUTPATH + outputname + str(k) + '.csv', header=['label', 'representative'])
# represent.to_csv(OUTPUTPATH + outputname + str(k) + 'rep.csv', index=False, header=True)
mydata_dict = mydata[['pred', 'rep']].to_dict()
mydata_list = [mydata.index.values.tolist()] + [mydata['rep'].tolist()] + [mydata['pred'].tolist()]
return [mydata_list, represent['representative'].tolist(), mydata_dict, kmeans.cluster_centers_]
# calculate RMSE
def mat_rmse(o_path, c_path, df_HS_runtime, out_count):
df_rmse = pd.DataFrame()
arr_ctime = np.empty([0])
output_all = pd.read_csv(c_path + 'output_all_.csv')
output_all = output_all.fillna(0)
# map representatives outputs into all hillslopes
for k in range(1, out_count):
ctime = 0.0
rmse_ = pd.DataFrame()
clust_data = pd.read_csv(c_path + 'mat_kmeans' + str(k) + '.csv')
clust_data.columns = ['hsname', 'label', 'rep']
# output_data = pd.read_csv(o_path + 'output_' + str(k) + '/output_all_.csv')
# output_data = output_data.fillna(0)
output_mapped = | pd.read_csv(c_path + 'output_names.csv') | pandas.read_csv |
import numpy as np
import scipy as sp
import pandas as pd
from sklearn.base import BaseEstimator, RegressorMixin
from modules.KernelRegWrapper import KernelRegWrapper
class DependentKernelReg(BaseEstimator, RegressorMixin):
"""
A sklearn-style NW kernel regression with dependent bandwidth matrix
and multi-variate normal kernel.
"""
def __init__(self, kernel="exp", bw_init="scott"):
self.outp = None
self.regressors = None
self.kernel = kernel
self.bw_init = bw_init
self.kernel_params = None
self.params = []
def fit(self, X, y):
self.regressors = X
self.outp = y
cov_mat = np.cov(X, rowvar=False)
n = X.shape[0]
d = X.shape[1]
H_init = 1
if self.bw_init == "scott":
H_init = 1.06 * n ** (-1. / (d + 4))
elif self.bw_init == "silverman":
H_init = (n * (d+2) / 4.) ** (-1. / (d + 4))
if self.regressors.shape[1] == 1:
H = H_init * np.sqrt(cov_mat)
else:
H = H_init * sp.linalg.sqrtm(cov_mat)
self.params = {"sample_size": n, "no_of_regressors": d, "bw_matrix": H}
if self.kernel == "exp":
if self.regressors.shape[1] == 1:
H_inv = H**(-1)
H_det = H
else:
H_inv = np.linalg.inv(H)
H_det = np.linalg.det(H)
H_const = 1. / ((np.sqrt(2 * np.pi) ** d) * H_det) #0.5))
self.kernel_params = {"H": H, "invH": H_inv, "detH": H_det,
"dim": d, "const": H_const}
return self
def exp_kernel(self, X):
if isinstance(X, pd.DataFrame):
X = X.to_numpy()
if self.regressors.shape[1] == 1:
powa = -0.5 * (self.kernel_params["invH"] ** 2) * (X * X).sum(-1)
else:
xtimesH = np.matmul(X, self.kernel_params["invH"])
powa = -0.5 * (xtimesH * xtimesH).sum(-1) # equiv. to (invH * X)^T (invH *X)
# powa = np.expand_dims(powa, axis=0)
weight = self.kernel_params["const"] * np.exp(powa)
return weight
def predict(self, X):
# preds = np.empty(X.shape[0])
if isinstance(X, pd.DataFrame):
X = X.to_numpy()
def predict_step(i):
x_shifted = np.subtract(self.regressors, i)
k_weights = self.exp_kernel(x_shifted)
pred = np.matmul(k_weights, np.squeeze(self.outp))
pred /= np.sum(k_weights)
return pred
preds = np.apply_along_axis(predict_step, axis=1, arr=X)
if isinstance(self.outp, pd.Series):
preds = pd.Series(preds, name=self.outp.name)
elif isinstance(self.outp, pd.DataFrame):
preds = | pd.DataFrame(preds, columns=self.outp.columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
#
# License: This module is released under the terms of the LICENSE file
# contained within this applications INSTALL directory
"""
Utility functions for model generation
"""
# -- Coding Conventions
# http://www.python.org/dev/peps/pep-0008/ - Use the Python style guide
# http://sphinx.pocoo.org/rest.html - Use Restructured Text for
# docstrings
# -- Public Imports
import logging
import math
import numpy as np
import pandas as pd
from datetime import datetime
# -- Private Imports
# -- Globals
logger = logging.getLogger(__name__)
dict_wday_name = {
0: 'W-MON',
1: 'W-TUE',
2: 'W-WED',
3: 'W-THU',
4: 'W-FRI',
5: 'W-SAT',
6: 'W-SUN',
}
# -- Exception classes
# -- Functions
def logger_info(msg, data):
# Convenience function for easier log typing
logger.info(msg + '\n%s', data)
def array_transpose(a):
"""
Transpose a 1-D numpy array
:param a: An array with shape (n,)
:type a: numpy.Array
:return: The original array, with shape (n,1)
:rtype: numpy.Array
"""
return a[np.newaxis, :].T
# TODO: rework to support model composition
def model_requires_scaling(model):
"""
Given a :py:class:`anticipy.forecast_models.ForecastModel`
return True if the function requires scaling a_x
:param model: A get_model_<modeltype> function from
:py:mod:`anticipy.model.periodic_models` or
:py:mod:`anticipy.model.aperiodic_models`
:type model: function
:return: True if function is logistic or sigmoidal
:rtype: bool
"""
requires_scaling = model is not None and model.name in [
'logistic',
'sigmoid'
]
return requires_scaling
def apply_a_x_scaling(a_x, model=None, scaling_factor=100.0):
"""
Modify a_x for forecast_models that require it
:param a_x: x axis of time series
:type a_x: numpy array
:param model: a :py:class:`anticipy.forecast_models.ForecastModel`
:type model: function or None
:param scaling_factor: Value used for scaling t_values for logistic models
:type scaling_factor: float
:return: a_x with scaling applied, if required
:rtype: numpy array
"""
if model_requires_scaling(model): # todo: check that this is still useful
a_x = a_x / scaling_factor
return a_x
dict_freq_units_per_year = dict(
A=1.0,
Y=1.0,
D=365.0,
W=52.0,
M=12,
Q=4,
H=24 * 365.0
)
dict_dateoffset_input = dict(
Y='years',
A='years',
M='months',
W='weeks',
D='days',
H='hours'
)
def get_normalized_x_from_date(s_date):
"""Get column of days since Monday of first date"""
date_start = s_date.iloc[0]
# Convert to Monday
date_start = date_start - pd.to_timedelta(date_start.weekday(), unit='D')
s_x = (s_date - date_start).dt.days
return s_x
def get_s_x_extrapolate(
date_start_actuals,
date_end_actuals,
model=None,
freq=None,
extrapolate_years=2.5,
scaling_factor=100.0,
x_start_actuals=0.):
"""
Return a_x series with DateTimeIndex, covering the date range for the
actuals, plus a forecast period.
:param date_start_actuals: date or numeric index for first actuals sample
:type date_start_actuals: str, datetime, int or float
:param date_end_actuals: date or numeric index for last actuals sample
:type date_end_actuals: str, datetime, int or float
:param extrapolate_years:
:type extrapolate_years: float
:param model:
:type model: function
:param freq: Time unit between samples. Supported units are 'W' for weekly
samples, or 'D' for daily samples. (untested) Any date unit or time
unit accepted by numpy should also work, see
https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.datetime.html#arrays-dtypes-dateunits # noqa
:type freq: basestring
:param shifted_origin: Offset to apply to a_x
:type shifted_origin: int
:param scaling_factor: Value used for scaling a_x for certain model
functions
:type scaling_factor: float
:param x_start_actuals: numeric index for the first actuals sample
:type x_start_actuals: int
:return: Series of floats with DateTimeIndex. To be used as (a_date, a_x)
input for a model function.
:rtype: pandas.Series
The returned series covers the actuals time domain plus a forecast period
lasting extrapolate_years, in years.
The number of additional samples for the forecast period is
time_resolution * extrapolate_years, rounded down
"""
if isinstance(date_start_actuals, str) or \
isinstance(date_start_actuals, datetime): # Use dates if available
date_start_actuals = | pd.to_datetime(date_start_actuals) | pandas.to_datetime |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import abc
from modin.data_management.functions.default_methods import (
DataFrameDefault,
SeriesDefault,
DateTimeDefault,
StrDefault,
BinaryDefault,
ResampleDefault,
RollingDefault,
CatDefault,
GroupByDefault,
)
from pandas.core.dtypes.common import is_scalar
import pandas.core.resample
import pandas
import numpy as np
def _get_axis(axis):
def axis_getter(self):
return self.to_pandas().axes[axis]
return axis_getter
def _set_axis(axis):
def axis_setter(self, labels):
new_qc = DataFrameDefault.register(pandas.DataFrame.set_axis)(
self, axis=axis, labels=labels
)
self.__dict__.update(new_qc.__dict__)
return axis_setter
class BaseQueryCompiler(abc.ABC):
"""Abstract Class that handles the queries to Modin dataframes.
Note: See the Abstract Methods and Fields section immediately below this
for a list of requirements for subclassing this object.
"""
@abc.abstractmethod
def default_to_pandas(self, pandas_op, *args, **kwargs):
"""Default to pandas behavior.
Parameters
----------
pandas_op : callable
The operation to apply, must be compatible pandas DataFrame call
args
The arguments for the `pandas_op`
kwargs
The keyword arguments for the `pandas_op`
Returns
-------
BaseQueryCompiler
The result of the `pandas_op`, converted back to BaseQueryCompiler
"""
pass
# Abstract Methods and Fields: Must implement in children classes
# In some cases, there you may be able to use the same implementation for
# some of these abstract methods, but for the sake of generality they are
# treated differently.
lazy_execution = False
# Metadata modification abstract methods
def add_prefix(self, prefix, axis=1):
if axis:
return DataFrameDefault.register(pandas.DataFrame.add_prefix)(
self, prefix=prefix
)
else:
return SeriesDefault.register(pandas.Series.add_prefix)(self, prefix=prefix)
def add_suffix(self, suffix, axis=1):
if axis:
return DataFrameDefault.register(pandas.DataFrame.add_suffix)(
self, suffix=suffix
)
else:
return SeriesDefault.register(pandas.Series.add_suffix)(self, suffix=suffix)
# END Metadata modification abstract methods
# Abstract copy
# For copy, we don't want a situation where we modify the metadata of the
# copies if we end up modifying something here. We copy all of the metadata
# to prevent that.
def copy(self):
return DataFrameDefault.register(pandas.DataFrame.copy)(self)
# END Abstract copy
# Abstract join and append helper functions
def concat(self, axis, other, **kwargs):
"""Concatenates two objects together.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other: The other_index to concat with.
Returns:
Concatenated objects.
"""
concat_join = ["inner", "outer"]
def concat(df, axis, other, **kwargs):
kwargs.pop("join_axes", None)
ignore_index = kwargs.get("ignore_index", False)
if kwargs.get("join", "outer") in concat_join:
if not isinstance(other, list):
other = [other]
other = [df] + other
result = pandas.concat(other, axis=axis, **kwargs)
else:
if isinstance(other, (list, np.ndarray)) and len(other) == 1:
other = other[0]
how = kwargs.pop("join", None)
ignore_index = kwargs.pop("ignore_index", None)
kwargs["how"] = how
result = df.join(other, **kwargs)
if ignore_index:
if axis == 0:
result = result.reset_index(drop=True)
else:
result.columns = pandas.RangeIndex(len(result.columns))
return result
return DataFrameDefault.register(concat)(self, axis=axis, other=other, **kwargs)
# END Abstract join and append helper functions
# Data Management Methods
@abc.abstractmethod
def free(self):
"""In the future, this will hopefully trigger a cleanup of this object."""
# TODO create a way to clean up this object.
pass
# END Data Management Methods
# To/From Pandas
@abc.abstractmethod
def to_pandas(self):
"""Converts Modin DataFrame to Pandas DataFrame.
Returns:
Pandas DataFrame of the QueryCompiler.
"""
pass
@classmethod
@abc.abstractmethod
def from_pandas(cls, df, data_cls):
"""Improve simple Pandas DataFrame to an advanced and superior Modin DataFrame.
Parameters
----------
df: pandas.DataFrame
The pandas DataFrame to convert from.
data_cls :
Modin DataFrame object to convert to.
Returns
-------
BaseQueryCompiler
QueryCompiler containing data from the Pandas DataFrame.
"""
pass
# END To/From Pandas
# From Arrow
@classmethod
@abc.abstractmethod
def from_arrow(cls, at, data_cls):
"""Improve simple Arrow Table to an advanced and superior Modin DataFrame.
Parameters
----------
at : Arrow Table
The Arrow Table to convert from.
data_cls :
Modin DataFrame object to convert to.
Returns
-------
BaseQueryCompiler
QueryCompiler containing data from the Pandas DataFrame.
"""
pass
# END From Arrow
# To NumPy
def to_numpy(self, **kwargs):
"""
Converts Modin DataFrame to NumPy array.
Returns
-------
NumPy array of the QueryCompiler.
"""
return DataFrameDefault.register(pandas.DataFrame.to_numpy)(self, **kwargs)
# END To NumPy
# Abstract inter-data operations (e.g. add, sub)
# These operations require two DataFrames and will change the shape of the
# data if the index objects don't match. An outer join + op is performed,
# such that columns/rows that don't have an index on the other DataFrame
# result in NaN values.
def add(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.add)(self, other=other, **kwargs)
def combine(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.combine)(
self, other=other, **kwargs
)
def combine_first(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.combine_first)(
self, other=other, **kwargs
)
def eq(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.eq)(self, other=other, **kwargs)
def floordiv(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.floordiv)(
self, other=other, **kwargs
)
def ge(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.ge)(self, other=other, **kwargs)
def gt(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.gt)(self, other=other, **kwargs)
def le(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.le)(self, other=other, **kwargs)
def lt(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.lt)(self, other=other, **kwargs)
def mod(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.mod)(self, other=other, **kwargs)
def mul(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.mul)(self, other=other, **kwargs)
def corr(self, **kwargs):
return DataFrameDefault.register(pandas.DataFrame.corr)(self, **kwargs)
def cov(self, **kwargs):
return DataFrameDefault.register(pandas.DataFrame.cov)(self, **kwargs)
def dot(self, other, **kwargs):
if kwargs.get("squeeze_self", False):
applyier = pandas.Series.dot
else:
applyier = pandas.DataFrame.dot
return BinaryDefault.register(applyier)(self, other=other, **kwargs)
def ne(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.ne)(self, other=other, **kwargs)
def pow(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.pow)(self, other=other, **kwargs)
def rfloordiv(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.rfloordiv)(
self, other=other, **kwargs
)
def rmod(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.rmod)(
self, other=other, **kwargs
)
def rpow(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.rpow)(
self, other=other, **kwargs
)
def rsub(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.rsub)(
self, other=other, **kwargs
)
def rtruediv(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.rtruediv)(
self, other=other, **kwargs
)
def sub(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.sub)(self, other=other, **kwargs)
def truediv(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.truediv)(
self, other=other, **kwargs
)
def __and__(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.__and__)(
self, other=other, **kwargs
)
def __or__(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.__or__)(
self, other=other, **kwargs
)
def __rand__(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.__rand__)(
self, other=other, **kwargs
)
def __ror__(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.__ror__)(
self, other=other, **kwargs
)
def __rxor__(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.__rxor__)(
self, other=other, **kwargs
)
def __xor__(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.__xor__)(
self, other=other, **kwargs
)
def df_update(self, other, **kwargs):
return BinaryDefault.register(pandas.DataFrame.update, inplace=True)(
self, other=other, **kwargs
)
def series_update(self, other, **kwargs):
return BinaryDefault.register(pandas.Series.update, inplace=True)(
self, other=other, squeeze_self=True, squeeze_other=True, **kwargs
)
def clip(self, lower, upper, **kwargs):
return DataFrameDefault.register(pandas.DataFrame.clip)(
self, lower=lower, upper=upper, **kwargs
)
def where(self, cond, other, **kwargs):
"""Gets values from this manager where cond is true else from other.
Args:
cond: Condition on which to evaluate values.
Returns:
New QueryCompiler with updated data and index.
"""
return DataFrameDefault.register(pandas.DataFrame.where)(
self, cond=cond, other=other, **kwargs
)
def merge(self, right, **kwargs):
"""
Merge DataFrame or named Series objects with a database-style join.
Parameters
----------
right : BaseQueryCompiler
The query compiler of the right DataFrame to merge with.
Returns
-------
BaseQueryCompiler
A new query compiler that contains result of the merge.
Notes
-----
See pd.merge or pd.DataFrame.merge for more info on kwargs.
"""
return DataFrameDefault.register(pandas.DataFrame.merge)(
self, right=right, **kwargs
)
def join(self, right, **kwargs):
"""
Join columns of another DataFrame.
Parameters
----------
right : BaseQueryCompiler
The query compiler of the right DataFrame to join with.
Returns
-------
BaseQueryCompiler
A new query compiler that contains result of the join.
Notes
-----
See pd.DataFrame.join for more info on kwargs.
"""
return DataFrameDefault.register(pandas.DataFrame.join)(self, right, **kwargs)
# END Abstract inter-data operations
# Abstract Transpose
def transpose(self, *args, **kwargs):
"""Transposes this QueryCompiler.
Returns:
Transposed new QueryCompiler.
"""
return DataFrameDefault.register(pandas.DataFrame.transpose)(
self, *args, **kwargs
)
def columnarize(self):
"""
Transposes this QueryCompiler if it has a single row but multiple columns.
This method should be called for QueryCompilers representing a Series object,
i.e. self.is_series_like() should be True.
Returns
-------
BaseQueryCompiler
Transposed new QueryCompiler or self.
"""
if len(self.columns) != 1 or (
len(self.index) == 1 and self.index[0] == "__reduced__"
):
return self.transpose()
return self
def is_series_like(self):
"""Return True if QueryCompiler has a single column or row"""
return len(self.columns) == 1 or len(self.index) == 1
# END Abstract Transpose
# Abstract reindex/reset_index (may shuffle data)
def reindex(self, axis, labels, **kwargs):
"""Fits a new index for this Manger.
Args:
axis: The axis index object to target the reindex on.
labels: New labels to conform 'axis' on to.
Returns:
New QueryCompiler with updated data and new index.
"""
return DataFrameDefault.register(pandas.DataFrame.reindex)(
self, axis=axis, labels=labels, **kwargs
)
def reset_index(self, **kwargs):
"""Removes all levels from index and sets a default level_0 index.
Returns:
New QueryCompiler with updated data and reset index.
"""
return DataFrameDefault.register(pandas.DataFrame.reset_index)(self, **kwargs)
# END Abstract reindex/reset_index
# Full Reduce operations
#
# These operations result in a reduced dimensionality of data.
# Currently, this means a Pandas Series will be returned, but in the future
# we will implement a Distributed Series, and this will be returned
# instead.
def is_monotonic(self):
"""Return boolean if values in the object are monotonic_increasing.
Returns
-------
bool
"""
return SeriesDefault.register(pandas.Series.is_monotonic)(self)
def is_monotonic_decreasing(self):
"""Return boolean if values in the object are monotonic_decreasing.
Returns
-------
bool
"""
return SeriesDefault.register(pandas.Series.is_monotonic_decreasing)(self)
def count(self, **kwargs):
"""Counts the number of non-NaN objects for each column or row.
Return:
Pandas series containing counts of non-NaN objects from each column or row.
"""
return DataFrameDefault.register(pandas.DataFrame.count)(self, **kwargs)
def max(self, **kwargs):
"""Returns the maximum value for each column or row.
Return:
Pandas series with the maximum values from each column or row.
"""
return DataFrameDefault.register(pandas.DataFrame.max)(self, **kwargs)
def mean(self, **kwargs):
"""Returns the mean for each numerical column or row.
Return:
Pandas series containing the mean from each numerical column or row.
"""
return DataFrameDefault.register(pandas.DataFrame.mean)(self, **kwargs)
def min(self, **kwargs):
"""Returns the minimum from each column or row.
Return:
Pandas series with the minimum value from each column or row.
"""
return DataFrameDefault.register(pandas.DataFrame.min)(self, **kwargs)
def prod(self, **kwargs):
"""Returns the product of each numerical column or row.
Return:
Pandas series with the product of each numerical column or row.
"""
return DataFrameDefault.register(pandas.DataFrame.prod)(self, **kwargs)
def sum(self, **kwargs):
"""Returns the sum of each numerical column or row.
Return:
Pandas series with the sum of each numerical column or row.
"""
return DataFrameDefault.register(pandas.DataFrame.sum)(self, **kwargs)
def to_datetime(self, *args, **kwargs):
return SeriesDefault.register(pandas.to_datetime)(self, *args, **kwargs)
# END Abstract full Reduce operations
# Abstract map partitions operations
# These operations are operations that apply a function to every partition.
def abs(self):
return DataFrameDefault.register(pandas.DataFrame.abs)(self)
def applymap(self, func):
return DataFrameDefault.register(pandas.DataFrame.applymap)(self, func=func)
def conj(self, **kwargs):
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained
by changing the sign of its imaginary part.
"""
def conj(df, *args, **kwargs):
return pandas.DataFrame(np.conj(df))
return DataFrameDefault.register(conj)(self, **kwargs)
def isin(self, **kwargs):
return DataFrameDefault.register(pandas.DataFrame.isin)(self, **kwargs)
def isna(self):
return DataFrameDefault.register(pandas.DataFrame.isna)(self)
def negative(self, **kwargs):
return DataFrameDefault.register(pandas.DataFrame.__neg__)(self, **kwargs)
def notna(self):
return DataFrameDefault.register(pandas.DataFrame.notna)(self)
def round(self, **kwargs):
return DataFrameDefault.register(pandas.DataFrame.round)(self, **kwargs)
def replace(self, **kwargs):
return DataFrameDefault.register(pandas.DataFrame.replace)(self, **kwargs)
def series_view(self, **kwargs):
return SeriesDefault.register(pandas.Series.view)(self, **kwargs)
def to_numeric(self, *args, **kwargs):
return SeriesDefault.register(pandas.to_numeric)(self, *args, **kwargs)
def unique(self, **kwargs):
return SeriesDefault.register(pandas.Series.unique)(self, **kwargs)
def searchsorted(self, **kwargs):
return SeriesDefault.register(pandas.Series.searchsorted)(self, **kwargs)
# END Abstract map partitions operations
def value_counts(self, **kwargs):
return SeriesDefault.register(pandas.Series.value_counts)(self, **kwargs)
def stack(self, level, dropna):
return DataFrameDefault.register(pandas.DataFrame.stack)(
self, level=level, dropna=dropna
)
# Abstract map partitions across select indices
def astype(self, col_dtypes, **kwargs):
"""Converts columns dtypes to given dtypes.
Args:
col_dtypes: Dictionary of {col: dtype,...} where col is the column
name and dtype is a numpy dtype.
Returns:
DataFrame with updated dtypes.
"""
return DataFrameDefault.register(pandas.DataFrame.astype)(
self, dtype=col_dtypes, **kwargs
)
@property
def dtypes(self):
return self.to_pandas().dtypes
# END Abstract map partitions across select indices
# Abstract column/row partitions reduce operations
#
# These operations result in a reduced dimensionality of data.
# Currently, this means a Pandas Series will be returned, but in the future
# we will implement a Distributed Series, and this will be returned
# instead.
def all(self, **kwargs):
"""Returns whether all the elements are true, potentially over an axis.
Return:
Pandas Series containing boolean values or boolean.
"""
return DataFrameDefault.register(pandas.DataFrame.all)(self, **kwargs)
def any(self, **kwargs):
"""Returns whether any the elements are true, potentially over an axis.
Return:
Pandas Series containing boolean values or boolean.
"""
return DataFrameDefault.register(pandas.DataFrame.any)(self, **kwargs)
def first_valid_index(self):
"""Returns index of first non-NaN/NULL value.
Return:
Scalar of index name.
"""
return (
DataFrameDefault.register(pandas.DataFrame.first_valid_index)(self)
.to_pandas()
.squeeze()
)
def idxmax(self, **kwargs):
"""Returns the first occurance of the maximum over requested axis.
Returns:
Series containing the maximum of each column or axis.
"""
return DataFrameDefault.register(pandas.DataFrame.idxmax)(self, **kwargs)
def idxmin(self, **kwargs):
"""Returns the first occurance of the minimum over requested axis.
Returns:
Series containing the minimum of each column or axis.
"""
return DataFrameDefault.register(pandas.DataFrame.idxmin)(self, **kwargs)
def last_valid_index(self):
"""Returns index of last non-NaN/NULL value.
Return:
Scalar of index name.
"""
return (
DataFrameDefault.register(pandas.DataFrame.last_valid_index)(self)
.to_pandas()
.squeeze()
)
def median(self, **kwargs):
"""Returns median of each column or row.
Returns:
Series containing the median of each column or row.
"""
return DataFrameDefault.register(pandas.DataFrame.median)(self, **kwargs)
def memory_usage(self, **kwargs):
"""Returns the memory usage of each column.
Returns:
Series containing the memory usage of each column.
"""
return DataFrameDefault.register(pandas.DataFrame.memory_usage)(self, **kwargs)
def nunique(self, **kwargs):
"""Returns the number of unique items over each column or row.
Returns:
Series of ints indexed by column or index names.
"""
return DataFrameDefault.register(pandas.DataFrame.nunique)(self, **kwargs)
def quantile_for_single_value(self, **kwargs):
"""Returns quantile of each column or row.
Returns:
Series containing the quantile of each column or row.
"""
return DataFrameDefault.register(pandas.DataFrame.quantile)(self, **kwargs)
def skew(self, **kwargs):
"""Returns skew of each column or row.
Returns:
Series containing the skew of each column or row.
"""
return DataFrameDefault.register(pandas.DataFrame.skew)(self, **kwargs)
def sem(self, **kwargs):
"""
Returns standard deviation of the mean over requested axis.
Returns
-------
BaseQueryCompiler
QueryCompiler containing the standard deviation of the mean over requested axis.
"""
return DataFrameDefault.register(pandas.DataFrame.sem)(self, **kwargs)
def std(self, **kwargs):
"""Returns standard deviation of each column or row.
Returns:
Series containing the standard deviation of each column or row.
"""
return DataFrameDefault.register(pandas.DataFrame.std)(self, **kwargs)
def var(self, **kwargs):
"""Returns variance of each column or row.
Returns:
Series containing the variance of each column or row.
"""
return DataFrameDefault.register(pandas.DataFrame.var)(self, **kwargs)
# END Abstract column/row partitions reduce operations
# Abstract column/row partitions reduce operations over select indices
#
# These operations result in a reduced dimensionality of data.
# Currently, this means a Pandas Series will be returned, but in the future
# we will implement a Distributed Series, and this will be returned
# instead.
def describe(self, **kwargs):
"""Generates descriptive statistics.
Returns:
DataFrame object containing the descriptive statistics of the DataFrame.
"""
return DataFrameDefault.register(pandas.DataFrame.describe)(self, **kwargs)
# END Abstract column/row partitions reduce operations over select indices
# Map across rows/columns
# These operations require some global knowledge of the full column/row
# that is being operated on. This means that we have to put all of that
# data in the same place.
def cumsum(self, **kwargs):
return DataFrameDefault.register(pandas.DataFrame.cumsum)(self, **kwargs)
def cummax(self, **kwargs):
return DataFrameDefault.register(pandas.DataFrame.cummax)(self, **kwargs)
def cummin(self, **kwargs):
return DataFrameDefault.register(pandas.DataFrame.cummin)(self, **kwargs)
def cumprod(self, **kwargs):
return DataFrameDefault.register(pandas.DataFrame.cumprod)(self, **kwargs)
def diff(self, **kwargs):
return DataFrameDefault.register(pandas.DataFrame.diff)(self, **kwargs)
def dropna(self, **kwargs):
"""Returns a new QueryCompiler with null values dropped along given axis.
Return:
New QueryCompiler
"""
return DataFrameDefault.register(pandas.DataFrame.dropna)(self, **kwargs)
def nlargest(self, n=5, columns=None, keep="first"):
if columns is None:
return SeriesDefault.register(pandas.Series.nlargest)(self, n=n, keep=keep)
else:
return DataFrameDefault.register(pandas.DataFrame.nlargest)(
self, n=n, columns=columns, keep=keep
)
def nsmallest(self, n=5, columns=None, keep="first"):
if columns is None:
return SeriesDefault.register(pandas.Series.nsmallest)(self, n=n, keep=keep)
else:
return DataFrameDefault.register(pandas.DataFrame.nsmallest)(
self, n=n, columns=columns, keep=keep
)
def eval(self, expr, **kwargs):
"""Returns a new QueryCompiler with expr evaluated on columns.
Args:
expr: The string expression to evaluate.
Returns:
A new QueryCompiler with new columns after applying expr.
"""
return DataFrameDefault.register(pandas.DataFrame.eval)(
self, expr=expr, **kwargs
)
def mode(self, **kwargs):
"""Returns a new QueryCompiler with modes calculated for each label along given axis.
Returns:
A new QueryCompiler with modes calculated.
"""
return DataFrameDefault.register(pandas.DataFrame.mode)(self, **kwargs)
def fillna(self, **kwargs):
"""Replaces NaN values with the method provided.
Returns:
A new QueryCompiler with null values filled.
"""
return DataFrameDefault.register(pandas.DataFrame.fillna)(self, **kwargs)
def query(self, expr, **kwargs):
"""Query columns of the QueryCompiler with a boolean expression.
Args:
expr: Boolean expression to query the columns with.
Returns:
QueryCompiler containing the rows where the boolean expression is satisfied.
"""
return DataFrameDefault.register(pandas.DataFrame.query)(
self, expr=expr, **kwargs
)
def rank(self, **kwargs):
"""Computes numerical rank along axis. Equal values are set to the average.
Returns:
QueryCompiler containing the ranks of the values along an axis.
"""
return DataFrameDefault.register(pandas.DataFrame.rank)(self, **kwargs)
def sort_index(self, **kwargs):
"""Sorts the data with respect to either the columns or the indices.
Returns:
QueryCompiler containing the data sorted by columns or indices.
"""
return DataFrameDefault.register(pandas.DataFrame.sort_index)(self, **kwargs)
def melt(self, *args, **kwargs):
return DataFrameDefault.register(pandas.DataFrame.melt)(self, *args, **kwargs)
def sort_columns_by_row_values(self, rows, ascending=True, **kwargs):
return DataFrameDefault.register(pandas.DataFrame.sort_values)(
self, by=rows, axis=1, ascending=ascending, **kwargs
)
def sort_rows_by_column_values(self, rows, ascending=True, **kwargs):
return DataFrameDefault.register(pandas.DataFrame.sort_values)(
self, by=rows, axis=0, ascending=ascending, **kwargs
)
# END Abstract map across rows/columns
# Map across rows/columns
# These operations require some global knowledge of the full column/row
# that is being operated on. This means that we have to put all of that
# data in the same place.
def quantile_for_list_of_values(self, **kwargs):
"""Returns Manager containing quantiles along an axis for numeric columns.
Returns:
QueryCompiler containing quantiles of original QueryCompiler along an axis.
"""
return DataFrameDefault.register(pandas.DataFrame.quantile)(self, **kwargs)
# END Abstract map across rows/columns
# Abstract __getitem__ methods
def getitem_array(self, key):
"""
Get column or row data specified by key.
Parameters
----------
key : BaseQueryCompiler, numpy.ndarray, pandas.Index or list
Target numeric indices or labels by which to retrieve data.
Returns
-------
BaseQueryCompiler
A new Query Compiler.
"""
def getitem_array(df, key):
return df[key]
return DataFrameDefault.register(getitem_array)(self, key)
def getitem_column_array(self, key, numeric=False):
"""Get column data for target labels.
Args:
key: Target labels by which to retrieve data.
numeric: A boolean representing whether or not the key passed in represents
the numeric index or the named index.
Returns:
A new Query Compiler.
"""
def get_column(df, key):
if numeric:
return df.iloc[:, key]
else:
return df[key]
return DataFrameDefault.register(get_column)(self, key=key)
def getitem_row_array(self, key):
"""Get row data for target labels.
Args:
key: Target numeric indices by which to retrieve data.
Returns:
A new Query Compiler.
"""
def get_row(df, key):
return df.iloc[key]
return DataFrameDefault.register(get_row)(self, key=key)
# END Abstract __getitem__ methods
# Abstract insert
# This method changes the shape of the resulting data. In Pandas, this
# operation is always inplace, but this object is immutable, so we just
# return a new one from here and let the front end handle the inplace
# update.
def insert(self, loc, column, value):
"""Insert new column data.
Args:
loc: Insertion index.
column: Column labels to insert.
value: Dtype object values to insert.
Returns:
A new QueryCompiler with new data inserted.
"""
return DataFrameDefault.register(pandas.DataFrame.insert, inplace=True)(
self, loc=loc, column=column, value=value
)
# END Abstract insert
# Abstract drop
def drop(self, index=None, columns=None):
"""Remove row data for target index and columns.
Args:
index: Target index to drop.
columns: Target columns to drop.
Returns:
A new QueryCompiler.
"""
if index is None and columns is None:
return self
else:
return DataFrameDefault.register(pandas.DataFrame.drop)(
self, index=index, columns=columns
)
# END drop
# UDF (apply and agg) methods
# There is a wide range of behaviors that are supported, so a lot of the
# logic can get a bit convoluted.
def apply(self, func, axis, *args, **kwargs):
"""Apply func across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new QueryCompiler.
"""
return DataFrameDefault.register(pandas.DataFrame.apply)(
self, func=func, axis=axis, *args, **kwargs
)
# END UDF
# Manual Partitioning methods (e.g. merge, groupby)
# These methods require some sort of manual partitioning due to their
# nature. They require certain data to exist on the same partition, and
# after the shuffle, there should be only a local map required.
def groupby_count(
self,
by,
axis,
groupby_args,
map_args,
reduce_args=None,
numeric_only=True,
drop=False,
):
"""Perform a groupby count.
Parameters
----------
by : BaseQueryCompiler
The query compiler object to groupby.
axis : 0 or 1
The axis to groupby. Must be 0 currently.
groupby_args : dict
The arguments for the groupby component.
map_args : dict
The arguments for the `map_func`.
reduce_args : dict
The arguments for `reduce_func`.
numeric_only : bool
Whether to drop non-numeric columns.
drop : bool
Whether the data in `by` was dropped.
Returns
-------
BaseQueryCompiler
"""
return GroupByDefault.register(pandas.core.groupby.DataFrameGroupBy.count)(
self,
by=by,
axis=axis,
groupby_args=groupby_args,
map_args=map_args,
reduce_args=reduce_args,
numeric_only=numeric_only,
drop=drop,
)
def groupby_any(
self,
by,
axis,
groupby_args,
map_args,
reduce_args=None,
numeric_only=True,
drop=False,
):
"""Perform a groupby any.
Parameters
----------
by : BaseQueryCompiler
The query compiler object to groupby.
axis : 0 or 1
The axis to groupby. Must be 0 currently.
groupby_args : dict
The arguments for the groupby component.
map_args : dict
The arguments for the `map_func`.
reduce_args : dict
The arguments for `reduce_func`.
numeric_only : bool
Whether to drop non-numeric columns.
drop : bool
Whether the data in `by` was dropped.
Returns
-------
BaseQueryCompiler
"""
return GroupByDefault.register(pandas.core.groupby.DataFrameGroupBy.any)(
self,
by=by,
axis=axis,
groupby_args=groupby_args,
map_args=map_args,
reduce_args=reduce_args,
numeric_only=numeric_only,
drop=drop,
)
def groupby_min(
self,
by,
axis,
groupby_args,
map_args,
reduce_args=None,
numeric_only=True,
drop=False,
):
"""Perform a groupby min.
Parameters
----------
by : BaseQueryCompiler
The query compiler object to groupby.
axis : 0 or 1
The axis to groupby. Must be 0 currently.
groupby_args : dict
The arguments for the groupby component.
map_args : dict
The arguments for the `map_func`.
reduce_args : dict
The arguments for `reduce_func`.
numeric_only : bool
Whether to drop non-numeric columns.
drop : bool
Whether the data in `by` was dropped.
Returns
-------
BaseQueryCompiler
"""
return GroupByDefault.register(pandas.core.groupby.DataFrameGroupBy.min)(
self,
by=by,
axis=axis,
groupby_args=groupby_args,
map_args=map_args,
reduce_args=reduce_args,
numeric_only=numeric_only,
drop=drop,
)
def groupby_prod(
self,
by,
axis,
groupby_args,
map_args,
reduce_args=None,
numeric_only=True,
drop=False,
):
"""Perform a groupby prod.
Parameters
----------
by : BaseQueryCompiler
The query compiler object to groupby.
axis : 0 or 1
The axis to groupby. Must be 0 currently.
groupby_args : dict
The arguments for the groupby component.
map_args : dict
The arguments for the `map_func`.
reduce_args : dict
The arguments for `reduce_func`.
numeric_only : bool
Whether to drop non-numeric columns.
drop : bool
Whether the data in `by` was dropped.
Returns
-------
BaseQueryCompiler
"""
return GroupByDefault.register(pandas.core.groupby.DataFrameGroupBy.prod)(
self,
by=by,
axis=axis,
groupby_args=groupby_args,
map_args=map_args,
reduce_args=reduce_args,
numeric_only=numeric_only,
drop=drop,
)
def groupby_max(
self,
by,
axis,
groupby_args,
map_args,
reduce_args=None,
numeric_only=True,
drop=False,
):
"""Perform a groupby max.
Parameters
----------
by : BaseQueryCompiler
The query compiler object to groupby.
axis : 0 or 1
The axis to groupby. Must be 0 currently.
groupby_args : dict
The arguments for the groupby component.
map_args : dict
The arguments for the `map_func`.
reduce_args : dict
The arguments for `reduce_func`.
numeric_only : bool
Whether to drop non-numeric columns.
drop : bool
Whether the data in `by` was dropped.
Returns
-------
BaseQueryCompiler
"""
return GroupByDefault.register(pandas.core.groupby.DataFrameGroupBy.max)(
self,
by=by,
axis=axis,
groupby_args=groupby_args,
map_args=map_args,
reduce_args=reduce_args,
numeric_only=numeric_only,
drop=drop,
)
def groupby_all(
self,
by,
axis,
groupby_args,
map_args,
reduce_args=None,
numeric_only=True,
drop=False,
):
"""Perform a groupby all.
Parameters
----------
by : BaseQueryCompiler
The query compiler object to groupby.
axis : 0 or 1
The axis to groupby. Must be 0 currently.
groupby_args : dict
The arguments for the groupby component.
map_args : dict
The arguments for the `map_func`.
reduce_args : dict
The arguments for `reduce_func`.
numeric_only : bool
Whether to drop non-numeric columns.
drop : bool
Whether the data in `by` was dropped.
Returns
-------
BaseQueryCompiler
"""
return GroupByDefault.register(pandas.core.groupby.DataFrameGroupBy.all)(
self,
by=by,
axis=axis,
groupby_args=groupby_args,
map_args=map_args,
reduce_args=reduce_args,
numeric_only=numeric_only,
drop=drop,
)
def groupby_sum(
self,
by,
axis,
groupby_args,
map_args,
reduce_args=None,
numeric_only=True,
drop=False,
):
"""Perform a groupby sum.
Parameters
----------
by : BaseQueryCompiler
The query compiler object to groupby.
axis : 0 or 1
The axis to groupby. Must be 0 currently.
groupby_args : dict
The arguments for the groupby component.
map_args : dict
The arguments for the `map_func`.
reduce_args : dict
The arguments for `reduce_func`.
numeric_only : bool
Whether to drop non-numeric columns.
drop : bool
Whether the data in `by` was dropped.
Returns
-------
BaseQueryCompiler
"""
return GroupByDefault.register(pandas.core.groupby.DataFrameGroupBy.sum)(
self,
by=by,
axis=axis,
groupby_args=groupby_args,
map_args=map_args,
reduce_args=reduce_args,
numeric_only=numeric_only,
drop=drop,
)
def groupby_size(
self,
by,
axis,
groupby_args,
map_args,
reduce_args=None,
numeric_only=True,
drop=False,
):
"""Perform a groupby size.
Parameters
----------
by : BaseQueryCompiler
The query compiler object to groupby.
axis : 0 or 1
The axis to groupby. Must be 0 currently.
groupby_args : dict
The arguments for the groupby component.
map_args : dict
The arguments for the `map_func`.
reduce_args : dict
The arguments for `reduce_func`.
numeric_only : bool
Whether to drop non-numeric columns.
drop : bool
Whether the data in `by` was dropped.
Returns
-------
BaseQueryCompiler
"""
return GroupByDefault.register(pandas.core.groupby.DataFrameGroupBy.size)(
self,
by=by,
axis=axis,
groupby_args=groupby_args,
map_args=map_args,
reduce_args=reduce_args,
numeric_only=numeric_only,
drop=drop,
)
def groupby_agg(self, by, axis, agg_func, groupby_args, agg_args, drop=False):
return GroupByDefault.register(pandas.core.groupby.DataFrameGroupBy.aggregate)(
self,
by=by,
axis=axis,
agg_func=agg_func,
groupby_args=groupby_args,
agg_args=agg_args,
drop=drop,
)
def groupby_dict_agg(self, by, func_dict, groupby_args, agg_args, drop=False):
return GroupByDefault.register(pandas.core.groupby.DataFrameGroupBy.aggregate)(
self,
by=by,
func_dict=func_dict,
groupby_args=groupby_args,
agg_args=agg_args,
drop=drop,
)
# END Manual Partitioning methods
def unstack(self, level, fill_value):
return DataFrameDefault.register(pandas.DataFrame.unstack)(
self, level=level, fill_value=fill_value
)
def pivot(self, index, columns, values):
return DataFrameDefault.register(pandas.DataFrame.pivot)(
self, index=index, columns=columns, values=values
)
def pivot_table(
self,
index,
values,
columns,
aggfunc,
fill_value,
margins,
dropna,
margins_name,
observed,
):
return DataFrameDefault.register(pandas.DataFrame.pivot_table)(
self,
index=index,
values=values,
columns=columns,
aggfunc=aggfunc,
fill_value=fill_value,
margins=margins,
dropna=dropna,
margins_name=margins_name,
observed=observed,
)
def get_dummies(self, columns, **kwargs):
"""Convert categorical variables to dummy variables for certain columns.
Args:
columns: The columns to convert.
Returns:
A new QueryCompiler.
"""
def get_dummies(df, columns, **kwargs):
return | pandas.get_dummies(df, columns=columns, **kwargs) | pandas.get_dummies |
import glob, os
import pandas as pd
from Exp_Main.models import OCA, ExpBase, ExpPath, RSD
from Analysis.models import OszAnalysis
from Exp_Sub.models import LSP, MFR
from dbfread import DBF
from Lab_Misc import General
import datetime
from django.apps import apps
import numpy as np
from django.utils import timezone
cwd = os.getcwd()
rel_path = General.get_BasePath()
def Load_from_Model(ModelName, pk):
if ModelName == 'OCA':
return Load_OCA(pk)
if ModelName == 'RSD':
return Load_RSD(pk)
if ModelName == 'LSP':
return Load_LSP(pk)
if ModelName == 'MFL':
return Load_MFL(pk)
if ModelName == 'MFR':
return Load_MFR(pk)
if ModelName == 'HME':
return Load_HME(pk)
if ModelName == 'SEL':
return Load_SEL(pk)
def Load_SEL(pk):
entry = General.get_in_full_model(pk)
file = os.path.join( rel_path, entry.Link_XLSX)
df = pd.read_excel(file, 'Tabelle1')
new_vals = df[df>1]/1000000#correct for wrong format
Curr_Dash = entry.Dash
df.update(new_vals)
df["Time (min.)"] = Curr_Dash.Start_datetime_elli + pd.TimedeltaIndex(df["Time (min.)"], unit='m')
df["time"] = df["Time (min.)"].dt.tz_convert(timezone.get_current_timezone())
df['time_loc'] = df["time"]
return df
def get_subs_in_dic(pk):
main_entry = General.get_in_full_model(pk)
Sub_Exps = main_entry.Sub_Exp.all()
data = {}
for Sub_Exp in Sub_Exps:
Sub_Exp = General.get_in_full_model_sub(Sub_Exp.pk)
data_sub = Load_from_Model(Sub_Exp.Device.Abbrev, Sub_Exp.id)
try:
Sub_Exp.Gas.first().Name
data[Sub_Exp.Name + '_' + Sub_Exp.Gas.first().Name] = data_sub
except:
data[Sub_Exp.Name] = data_sub
return data
def get_subs_by_model(pk, sub_model):
# sub_model = 'mfr'
main_entry = General.get_in_full_model(pk)
main_model = str.lower(main_entry.Device.Abbrev)
model = apps.get_model('Exp_Sub', sub_model)
data = {}
mfrs = model.objects.filter(**{main_model: ExpBase.objects.get(id = pk)}).all()
for mfr in mfrs:
try:
mfr.Gas.first().Name
data[mfr.Name + '_' + mfr.Gas.first().Name] = Load_from_Model(mfr.Device.Abbrev, mfr.id)
except:
data[mfr.Name] = Load_from_Model(mfr.Device.Abbrev, mfr.id)
return data
def Load_RSD_subs(pk):
Gases = {}
mfrs = MFR.objects.filter(rsd = ExpBase.objects.get(id = pk)).all()
for mfr in mfrs:
Gases[mfr.Gas.first().Name] = Load_MFR(mfr.id)
Pump = {}
lsps = LSP.objects.filter(rsd = ExpBase.objects.get(id = pk)).all()
for lsp in lsps:
Pump[lsp.Name] = Load_LSP(lsp.id)
if len(Gases)>0:
Gases = pd.concat(Gases)
if len(Pump)>0:
Pump = pd.concat(Pump)
return Gases, Pump
def Load_RSD(pk):
cwd = os.getcwd()
entry = General.get_in_full_model(pk)
os.chdir(os.path.join(General.get_BasePath(),entry.Link_Data))
Drops = {}
Drops_names = []
for file in glob.glob("*.xlsx"):
if file[0:4] == 'Drop':
Drops[file[:-5]] = pd.read_excel(file)
Drops_names.append(file[:-5])
os.chdir(cwd)
dropss = pd.concat(Drops, keys=Drops_names)
dropss['time_loc'] = dropss['abs_time'].dt.tz_localize(timezone.get_current_timezone())
return dropss
def Load_sliced_RSD(Main_id):
data = Load_RSD(Main_id)
entry = General.get_in_full_model(Main_id)
DashTab = entry.Dash
return Slice_data(data, DashTab)
def Load_MFL(pk):
entry = General.get_in_full_model_sub(pk)
MFL_N2_data = Load_csv(entry)
MFL_N2_data['Date_Time'] = pd.to_datetime(MFL_N2_data['Date_Time'], format='%d.%m.%Y %H:%M:%S', errors="coerce")
MFL_N2_data['time'] = MFL_N2_data['Date_Time'].dt.tz_localize(timezone.get_current_timezone())
return MFL_N2_data
def Load_MFR(pk):
entry = General.get_in_full_model_sub(pk)
file = os.path.join( rel_path, entry.Link)
data = pd.read_csv(file, sep=' ', error_bad_lines=False)
data['date_time'] = | pd.to_datetime(data['date'] + '_' + data['time'], format='%Y-%m-%d_%H:%M:%S.%f', errors="coerce") | pandas.to_datetime |
# Import relevant modules
from agent.agent_DDQNN import RNNAgent, GTNAgent, TTNNAgent, GNNAgent
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import random
# Set random seed
random.seed(0)
np.random.seed(0)
# Initialize Agent variables
trading_currency = 'USDSEK'
window_size = 30
episode_count = 15
batch_size = 64 # batch size for replaying/training the agent
agent_type = 'GNN' # RNN or GTN or TTNN or GNN
# Initialize training variables
total_rewards_df = pd.DataFrame(dtype=float)
# Get returns data
rs_types = ['open', 'high', 'low', 'last']
file_names = [f'g10_minute_{t}_rs_2019-10-01.csv' for t in rs_types]
rs_data = dict(zip(rs_types, [pd.read_csv(f'data/{f}', index_col=0, header=0) for f in file_names]))
rs_y = rs_data['last'][trading_currency]
# Get graphs data
A_t = | pd.read_csv('data/A_t_22.csv', index_col=0, header=0) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import pandas as pd
import pandas_should # noqa
class TestEqualAccessorMixin(object):
def test_equal_true(self):
df1 = pd.DataFrame([1, 2, 3], columns=['id'])
df2 = pd.DataFrame([1, 2, 3], columns=['id'])
assert df1.should.equal(df2)
def test_equal_false(self):
df1 = pd.DataFrame([1, 2, 3], columns=['id'])
df2 = pd.DataFrame([1, 2, 3, 4], columns=['id'])
assert not df1.should.equal(df2)
@pytest.mark.parametrize('alias_name', [
'be_equal_to', 'be_equals_to', 'be_eq_to', 'eq',
])
def test_qeual_aliases(self, alias_name):
df = pd.DataFrame([1, 2, 3], columns=['id'])
assert hasattr(df.should, alias_name)
def test_not_equal_true(self):
df1 = pd.DataFrame([1, 2, 3], columns=['id'])
df2 = pd.DataFrame([1, 2, 3, 4], columns=['id'])
assert df1.should.not_equal(df2)
def test_not_equal_false(self):
df1 = pd.DataFrame([1, 2, 3], columns=['id'])
df2 = pd.DataFrame([1, 2, 3], columns=['id'])
assert not df1.should.not_equal(df2)
@pytest.mark.parametrize('alias_name', [
'be_not_equal_to', 'be_not_equals_to', 'be_neq_to', 'neq',
])
def test_not_qeual_aliases(self, alias_name):
df = pd.DataFrame([1, 2, 3], columns=['id'])
assert hasattr(df.should, alias_name)
def test_have_same_length_true(self):
df1 = pd.DataFrame([1, 2, 3], columns=['id'])
df2 = pd.DataFrame([1, 2, 3], columns=['id'])
assert df1.should.have_same_length(df2)
def test_have_same_length_false(self):
df1 = pd.DataFrame([1, 2, 3], columns=['id'])
df2 = pd.DataFrame([1, 2, 3, 4], columns=['id'])
assert not df1.should.have_same_length(df2)
def test_have_same_length_multiple(self):
df1 = pd.DataFrame([1, 2, 3], columns=['id'])
df2 = pd.DataFrame([1, 2], columns=['id'])
df3 = pd.DataFrame([3], columns=['id'])
assert df1.should.have_same_length(df2, df3)
def test_have_same_width_true(self):
data1 = [
(1, 'alice', 20),
(2, 'bob', None),
(3, 'carol', 40),
]
df1 = pd.DataFrame(data1, columns=['id', 'name', 'age'])
data2 = [
('apple', 198, 'red'),
('banana', 128, 'yellow'),
]
df2 = pd.DataFrame(data2, columns=['fruit', 'price', 'color'])
assert df1.should.have_same_width(df2)
def test_have_same_width_false(self):
data1 = [
(1, 'alice', 20),
(2, 'bob', None),
(3, 'carol', 40),
]
df1 = pd.DataFrame(data1, columns=['id', 'name', 'age'])
data2 = [
('apple', 198),
('banana', 128),
]
df2 = pd.DataFrame(data2, columns=['fruit', 'price'])
assert not df1.should.have_same_width(df2)
def test_have_same_width_multiple(self):
data1 = [
(1, 'alice', 20),
(2, 'bob', None),
(3, 'carol', 40),
]
df1 = pd.DataFrame(data1, columns=['id', 'name', 'age'])
data2 = [
('apple', 198),
('banana', 128),
]
df2 = pd.DataFrame(data2, columns=['fruit', 'price'])
df3 = pd.DataFrame(['red', 'blue', 'green'])
assert df1.should.have_same_width(df2, df3)
class TestNullAccessorMixin(object):
def test_have_null_true(self):
data = [
(1, 'alice', 20),
(2, 'bob', None),
(3, 'carol', 40),
]
df = pd.DataFrame(data, columns=['id', 'name', 'age'])
assert df.should.have_null()
def test_have_null_false(self):
data = [
(1, 'alice', 20),
(2, 'bob', 30),
(3, 'carol', 40),
]
df = pd.DataFrame(data, columns=['id', 'name', 'age'])
assert not df.should.have_null()
def test_have_null_count(self):
data = [
(1, 'alice', 20),
(2, 'bob', None),
(3, 'carol', 40),
]
df = pd.DataFrame(data, columns=['id', 'name', 'age'])
assert df.should.have_null(count=True) == (True, {'age': 1, 'id': 0, 'name': 0})
def test_have_not_null_true(self):
data = [
(1, 'alice', 20),
(2, 'bob', 30),
(3, 'carol', 40),
]
df = pd.DataFrame(data, columns=['id', 'name', 'age'])
assert df.should.have_not_null()
def test_have_not_null_false(self):
data = [
(1, 'alice', 20),
(2, 'bob', None),
(3, 'carol', 40),
]
df = pd.DataFrame(data, columns=['id', 'name', 'age'])
assert not df.should.have_not_null()
@pytest.mark.parametrize('alias_name', ['havent_null'])
def test_have_not_null_aliases(self, alias_name):
df = pd.DataFrame([1, 2, 3], columns=['id'])
assert hasattr(df.should, alias_name)
class TestShapeAccessorMixin(object):
@pytest.mark.parametrize('df1, df2', [
(pd.DataFrame([1, 2, 3], columns=['id']),
pd.DataFrame(['a', 'b', 'c'], columns=['name'])),
(pd.DataFrame([(1, 'a'), (2, 'b')], columns=['id', 'name']),
pd.DataFrame([(-2, True), (-1, False)], columns=['a', 'b']))
])
def test_be_shaped_like_df(self, df1, df2):
assert df1.should.be_shaped_like(df2)
@pytest.mark.parametrize('df, shape', [
(pd.DataFrame([1, 2, 3], columns=['id']), (3, 1)),
(pd.DataFrame([(1, 'a'), (2, 'b')], columns=['id', 'name']), (2, 2)),
])
def test_be_shaped_like_tuple(self, df, shape):
assert df.should.be_shaped_like(shape)
@pytest.mark.parametrize('df, rows, columns', [
(pd.DataFrame([1, 2, 3], columns=['id']), 3, 1),
(pd.DataFrame([(1, 'a'), (2, 'b')], columns=['id', 'name']), 2, 2),
])
def test_be_shaped_like(self, df, rows, columns):
assert df.should.be_shaped_like(rows, columns)
@pytest.mark.parametrize('alias_name', ['shape'])
def test_be_shaped_like_aliases(self, alias_name):
df = pd.DataFrame([1, 2, 3], columns=['id'])
assert hasattr(df.should, alias_name)
@pytest.mark.parametrize('df, length', [
(pd.DataFrame([1, 2, 3], columns=['id']), 1),
(pd.DataFrame([(1, 'a'), (2, 'b')], columns=['id', 'name']), 2),
])
def test_have_width(self, df, length):
assert df.should.have_width(length)
@pytest.mark.parametrize('alias_name', ['columns', 'columns_len', 'have_length_of_columns'])
def test_have_length_of_columns_aliases(self, alias_name):
df = | pd.DataFrame([1, 2, 3], columns=['id']) | pandas.DataFrame |
"""
Download, transform and simulate various datasets.
"""
# Author: <NAME> <<EMAIL>>
# License: MIT
from os.path import join
from urllib.parse import urljoin
from string import ascii_lowercase
from sqlite3 import connect
from rich.progress import track
import numpy as np
import pandas as pd
from .base import Datasets, FETCH_URLS
class ContinuousCategoricalDatasets(Datasets):
"""Class to download, transform and save datasets with both continuous
and categorical features."""
@staticmethod
def _modify_columns(data, categorical_features):
"""Rename and reorder columns of dataframe."""
X, y = data.drop(columns="target"), data.target
X.columns = range(len(X.columns))
return pd.concat([X, y], axis=1), categorical_features
def download(self):
"""Download the datasets."""
if self.names == "all":
func_names = [func_name for func_name in dir(self) if "fetch_" in func_name]
else:
func_names = [
f"fetch_{name}".lower().replace(" ", "_") for name in self.names
]
self.content_ = []
for func_name in track(func_names, description="Datasets"):
name = func_name.replace("fetch_", "").upper().replace("_", " ")
fetch_data = getattr(self, func_name)
data, categorical_features = self._modify_columns(*fetch_data())
self.content_.append((name, data, categorical_features))
return self
def save(self, path, db_name):
"""Save datasets."""
with connect(join(path, f"{db_name}.db")) as connection:
for name, data in self.content_:
data.to_sql(name, connection, index=False, if_exists="replace")
def fetch_adult(self):
"""Download and transform the Adult Data Set.
https://archive.ics.uci.edu/ml/datasets/Adult
"""
data = pd.read_csv(FETCH_URLS["adult"], header=None, na_values=" ?").dropna()
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [1, 3, 5, 6, 7, 8, 9, 13]
return data, categorical_features
def fetch_abalone(self):
"""Download and transform the Abalone Data Set.
https://archive.ics.uci.edu/ml/datasets/Abalone
"""
data = pd.read_csv(FETCH_URLS["abalone"], header=None)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [0]
return data, categorical_features
def fetch_acute(self):
"""Download and transform the Acute Inflammations Data Set.
https://archive.ics.uci.edu/ml/datasets/Acute+Inflammations
"""
data = pd.read_csv(
FETCH_URLS["acute"], header=None, sep="\t", decimal=",", encoding="UTF-16"
)
data["target"] = data[6].str[0] + data[7].str[0]
data.drop(columns=[6, 7], inplace=True)
categorical_features = list(range(1, 6))
return data, categorical_features
def fetch_annealing(self):
"""Download and transform the Annealing Data Set.
https://archive.ics.uci.edu/ml/datasets/Annealing
"""
data = pd.read_csv(FETCH_URLS["annealing"], header=None, na_values="?")
# some features are dropped; they have too many missing values
missing_feats = (data.isnull().sum(0) / data.shape[0]) < 0.1
data = data.iloc[:, missing_feats.values]
data[2].fillna(data[2].mode().squeeze(), inplace=True)
data = data.T.reset_index(drop=True).T
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [0, 1, 5, 9]
return data, categorical_features
def fetch_census(self):
"""Download and transform the Census-Income (KDD) Data Set.
https://archive.ics.uci.edu/ml/datasets/Census-Income+%28KDD%29
"""
data = pd.read_csv(FETCH_URLS["census"], header=None)
categorical_features = (
list(range(1, 5))
+ list(range(6, 16))
+ list(range(19, 29))
+ list(range(30, 38))
+ [39]
)
# some features are dropped; they have too many missing values
cols_ids = [1, 6, 9, 13, 14, 20, 21, 29, 31, 37]
categorical_features = np.argwhere(
np.delete(
data.rename(columns={k: f"nom_{k}" for k in categorical_features})
.columns.astype("str")
.str.startswith("nom_"),
cols_ids,
)
).squeeze()
data = data.drop(columns=cols_ids).T.reset_index(drop=True).T
# some rows are dropped; they have rare missing values
data = data.iloc[
data.applymap(lambda x: x != " Not in universe").all(1).values, :
]
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
return data, categorical_features
def fetch_contraceptive(self):
"""Download and transform the Contraceptive Method Choice Data Set.
https://archive.ics.uci.edu/ml/datasets/Contraceptive+Method+Choice
"""
data = | pd.read_csv(FETCH_URLS["contraceptive"], header=None) | pandas.read_csv |
import pandas as pd
import os
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score
# from IPython.display import display
currentDirectory = os.getcwd()
final_train = pd.read_csv(currentDirectory+"/final_train.csv")
train = | pd.read_csv('./unbalanced/train.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
__author__ = ["<NAME>"]
__copyright__ = "Copyright 2021, European Commission Joint Research Centre"
__credits__ = ["GTCAP Team"]
__license__ = "3-Clause BSD"
__version__ = ""
__maintainer__ = [""]
__status__ = "Development"
import pandas as pd
import requests
import datetime
pd.options.mode.chained_assignment = None # default='warn'
def create_list_of_tiles_to_be_downloaded_from_RESTful(MS, year, parcel_id,
search_window_start_date, search_window_end_date,
cloud_categories, api_user, api_pass, tstype, ptype):
was_error = False
ms = MS.lower()
if ms == "be-wa":
ms = "bewa"
url_base = "https://cap.users.creodias.eu"
if ptype == "":
url = url_base + "/query/parcelTimeSeries?aoi=" + ms + "&year=" + str(year) + "&pid=" + str(parcel_id) + "&tstype=" + tstype + "&scl=True&ref=True"
else:
url = url_base + "/query/parcelTimeSeries?aoi=" + ms + "&year=" + str(year) + "&pid=" + str(parcel_id) + "&ptype=" + ptype + "&tstype=" + tstype + "&scl=True&ref=True"
print(url)
try:
response = requests.get(url, auth=(api_user, api_pass))
print(response)
if response.status_code == 404 or response.status_code == 500 or response.status_code == 401:
was_error = True
if response.status_code == 401:
print("Please, provide valid credentials to access the RESTFul server")
tiles_to_download = []
else:
df = | pd.read_json(response.text) | pandas.read_json |
#!/usr/bin/env python
# -*- coding=utf-8 -*-
###########################################################################
# Copyright (C) 2013-2016 by Caspar. All rights reserved.
# File Name: txtclf.py
# Author: <NAME>
# E-mail: <EMAIL>
# Created Time: 2016-07-05 14:39:18
###########################################################################
#
import os, sys, difflib, itertools
from time import time
import numpy as np
import scipy as sp
import scipy.stats as stats
import pandas as pd
from sklearn.base import clone
from sklearn.preprocessing import MinMaxScaler, LabelBinarizer, label_binarize, normalize
from sklearn.multiclass import OneVsRestClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold, KFold, GridSearchCV, RandomizedSearchCV
from sklearn import metrics
from .util import io, func, plot
from .util import math as imath
common_cfg = {}
def init(plot_cfg={}, plot_common={}):
if (len(plot_cfg) > 0 and plot_cfg['MON'] is not None):
plot.MON = plot_cfg['MON']
global common_cfg
if (len(plot_common) > 0):
common_cfg = plot_common
def get_featw(pipeline, feat_num):
feat_w_dict, sub_feat_w = [{} for i in range(2)]
filt_feat_idx = feature_idx = np.arange(feat_num)
for component in ('featfilt', 'clf'):
if (type(pipeline) != Pipeline):
if (component == 'featfilt'):
continue
else:
cmpn = pipeline
elif (component in pipeline.named_steps):
cmpn = pipeline.named_steps[component]
else:
continue
if (hasattr(cmpn, 'estimators_')):
for i, estm in enumerate(cmpn.estimators_):
filt_subfeat_idx = feature_idx[:]
if (hasattr(estm, 'get_support')):
filt_subfeat_idx = feature_idx[estm.get_support()]
for measure in ('feature_importances_', 'coef_', 'scores_'):
if (hasattr(estm, measure)):
filt_subfeat_w = getattr(estm, measure)
subfeat_w = (filt_subfeat_w.min() - 1) * np.ones_like(feature_idx)
# subfeat_w[filt_subfeat_idx] = normalize(estm.feature_importances_, norm='l1')
subfeat_w[filt_subfeat_idx] = filt_subfeat_w
# print 'Sub FI shape: (%s)' % ','.join([str(x) for x in filt_subfeat_w.shape])
# print 'Feature Importance inside %s Ensemble Method: %s' % (component, filt_subfeat_w)
sub_feat_w[(component, i)] = subfeat_w
if (hasattr(component, 'get_support')):
filt_feat_idx = feature_idx[component.get_support()]
for measure in ('feature_importances_', 'coef_', 'scores_'):
if (hasattr(cmpn, measure)):
filt_feat_w = getattr(cmpn, measure)
# print '*' * 80 + '\n%s\n'%filt_feat_w + '*' * 80
feat_w = (filt_feat_w.min() - 1) * np.ones_like(feature_idx)
# feat_w[filt_feat_idx] = normalize(filt_feat_w, norm='l1')
feat_w[filt_feat_idx] = filt_feat_w
# print '*' * 80 + '\n%s\n'%feat_w + '*' * 80
feat_w_dict[(component, measure)] = feat_w
print('FI shape: (%s)' % ','.join([str(x) for x in feat_w_dict[(component, measure)].shape]))
print('Sample 10 Feature from %s.%s: %s' % (component, measure, feat_w[feat_w > 0][:10]))
# print 'Feature Importance from %s.%s: %s' % (component, measure, feat_w)
return feat_w_dict, sub_feat_w
def get_score(pipeline, X_test, mltl=False):
if ((not isinstance(pipeline, Pipeline) and hasattr(pipeline, 'predict_proba')) or(isinstance(pipeline.named_steps['clf'], OneVsRestClassifier) and hasattr(pipeline.named_steps['clf'].estimators_[0], 'predict_proba')) or (not isinstance(pipeline.named_steps['clf'], OneVsRestClassifier) and hasattr(pipeline, 'predict_proba'))):
if (mltl):
return pipeline.predict_proba(X_test)
else:
# return pipeline.predict_proba(X_test)[:, 1]
return pipeline.predict_proba(X_test)
elif (hasattr(pipeline, 'decision_function')):
return pipeline.decision_function(X_test)
else:
print('Neither probability estimate nor decision function is supported in the classification model!')
return [0] * Y_test.shape[0]
# Benchmark
def benchmark(pipeline, X_train, Y_train, X_test, Y_test, mltl=False, signed=False, average='micro'):
print('+' * 80)
print('Training Model: ')
print(pipeline)
t0 = time()
pipeline.fit(X_train, Y_train)
train_time = time() - t0
print('train time: %0.3fs' % train_time)
t0 = time()
orig_pred = pred = pipeline.predict(X_test)
orig_prob = prob = pipeline.predict_proba(X_test) if hasattr(pipeline, 'predict_proba') else pipeline.decision_function(X_test)
test_time = time() - t0
print('+' * 80)
print('Testing: ')
print('test time: %0.3fs' % test_time)
is_mltl = mltl
if (signed):
Y_test = np.column_stack([np.abs(Y_test).reshape((Y_test.shape[0],-1))] + [label_binarize(lb, classes=[-1,1,0])[:,1] for lb in (np.sign(Y_test).astype('int8').reshape((Y_test.shape[0],-1))).T]) if (len(Y_test.shape) < 2 or Y_test.shape[1] == 1 or np.where(Y_test<0)[0].shape[0]>0) else Y_test
pred = np.column_stack([np.abs(pred).reshape((pred.shape[0],-1))] + [label_binarize(lb, classes=[-1,1,0])[:,1] for lb in (np.sign(pred).astype('int8').reshape((pred.shape[0],-1))).T]) if (len(pred.shape) < 2 or pred.shape[1] == 1 or np.where(pred<0)[0].shape[0]>0) else pred
is_mltl = True
try:
accuracy = metrics.accuracy_score(Y_test, pred)
except ValueError as e:
print(e)
Y_test, pred = Y_test.ravel(), pred.ravel()
accuracy = metrics.accuracy_score(Y_test, pred)
print('accuracy: %0.3f' % accuracy)
if (is_mltl and average == 'all'):
micro_precision = metrics.precision_score(Y_test, pred, average='micro')
print('micro-precision: %0.3f' % micro_precision)
micro_recall = metrics.recall_score(Y_test, pred, average='micro')
print('micro-recall: %0.3f' % micro_recall)
micro_fscore = metrics.fbeta_score(Y_test, pred, beta=1, average='micro')
print('micro-fscore: %0.3f' % micro_fscore)
macro_precision = metrics.precision_score(Y_test, pred, average='macro')
print('macro-precision: %0.3f' % macro_precision)
macro_recall = metrics.recall_score(Y_test, pred, average='macro')
print('macro-recall: %0.3f' % macro_recall)
macro_fscore = metrics.fbeta_score(Y_test, pred, beta=1, average='macro')
print('macro-fscore: %0.3f' % macro_fscore)
else:
precision = metrics.precision_score(Y_test, pred, average=average if is_mltl else 'binary')
print('precision: %0.3f' % precision)
recall = metrics.recall_score(Y_test, pred, average=average if is_mltl else 'binary')
print('recall: %0.3f' % recall)
fscore = metrics.fbeta_score(Y_test, pred, beta=1, average=average if is_mltl else 'binary')
print('fscore: %0.3f' % fscore)
print('classification report:')
# print metrics.classification_report(Y_test, pred)
metric_df = pd.DataFrame(metrics.classification_report(Y_test, pred, output_dict=True)).T[['precision', 'recall', 'f1-score', 'support']]
print(metric_df)
print('confusion matrix:')
if (is_mltl):
pass
else:
print(metrics.confusion_matrix(Y_test, pred))
print('+' * 80)
clf = pipeline.named_steps['clf'] if (type(pipeline) is Pipeline) else pipeline
if ((isinstance(clf, OneVsRestClassifier) and hasattr(clf.estimators_[0], 'predict_proba')) or (not isinstance(clf, OneVsRestClassifier) and hasattr(pipeline, 'predict_proba'))):
if (mltl):
scores = pipeline.predict_proba(X_test)
if (type(scores) == list):
scores = np.concatenate([score[:, -1].reshape((-1, 1)) for score in scores], axis=1)
else:
scores = pipeline.predict_proba(X_test)[:, -1]
elif (hasattr(pipeline, 'decision_function')):
scores = pipeline.decision_function(X_test)
else:
print('Neither probability estimate nor decision function is supported in the classification model! ROC and PRC figures will be invalid.')
scores = [0] * Y_test.shape[0]
if (signed and (len(scores.shape) < 2 or scores.shape[1] < pred.shape[1])):
scores = np.concatenate([np.abs(scores).reshape((scores.shape[0],-1))] + [label_binarize(lb, classes=[-1,1,0])[:,:2] for lb in (np.sign(scores).astype('int8').reshape((scores.shape[0],-1))).T], axis=1)
if (is_mltl):
if ((len(Y_test.shape) == 1 or Y_test.shape[1] == 1) and len(np.unique(Y_test)) > 2):
lbz = LabelBinarizer()
Y_test = lbz.fit_transform(Y_test)
def micro():
# Micro-average ROC curve
y_true = np.array(Y_test)
s_array = np.array(scores)
if (len(s_array.shape) == 3):
s_array = s_array[:,:,1].reshape((s_array.shape[0],s_array.shape[1],))
if (y_true.shape[0] == s_array.shape[1] and y_true.shape[1] == s_array.shape[0]):
s_array = s_array.T
return metrics.roc_curve(y_true.ravel(), s_array.ravel())
def macro():
# Macro-average ROC curve
n_classes = Y_test.shape[1]
fpr, tpr = [dict() for i in range(2)]
for i in range(n_classes):
fpr[i], tpr[i], _ = metrics.roc_curve(Y_test[:, i], scores[:, i])
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
return all_fpr, mean_tpr, _
if (average == 'micro'):
roc = micro()
elif (average == 'macro'):
roc = macro()
elif (average == 'all'):
micro_roc = micro()
macro_roc = macro()
if (type(scores) == list):
scores = np.array(scores)[:,:,0]
prc = metrics.precision_recall_curve(Y_test.ravel(), scores.ravel()) # Only micro-prc is supported
else:
roc = metrics.roc_curve(Y_test, scores)
prc = metrics.precision_recall_curve(Y_test, scores)
# print 'ROC:\n%s\n%s' % (roc[0], roc[1])
# print 'PRC:\n%s\n%s' % (prc[0], prc[1])
print('Training and Testing X shape: %s; %s' % (', '.join(['(%s)' % ','.join([str(x) for x in X.shape]) for X in X_train]) if type(X_train) is list else '(%s)' % ','.join([str(x) for x in X_train.shape]), ', '.join(['(%s)' % ','.join([str(x) for x in X.shape]) for X in X_test]) if type(X_test) is list else '(%s)' % ','.join([str(x) for x in X_test.shape])))
feat_w_dict, sub_feat_w = [{} for i in range(2)]
filt_feat_idx = feature_idx = np.arange(X_train[0].shape[1] if type(X_train) is list else X_train.shape[1])
for component in ('featfilt', 'clf'):
if (type(pipeline) != Pipeline):
if (component == 'featfilt'):
continue
else:
cmpn = pipeline
elif (component in pipeline.named_steps):
cmpn = pipeline.named_steps[component]
else:
continue
if (hasattr(cmpn, 'estimators_')):
for i, estm in enumerate(cmpn.estimators_):
filt_subfeat_idx = filt_feat_idx[:]
if (hasattr(estm, 'get_support')):
filt_subfeat_idx = filt_feat_idx[estm.get_support()]
for measure in ('feature_importances_', 'coef_', 'scores_'):
if (hasattr(estm, measure)):
filt_subfeat_w = getattr(estm, measure)
subfeat_w = (filt_subfeat_w.min() - 1) * np.ones_like(feature_idx)
# subfeat_w[filt_subfeat_idx][:len(estm.feature_importances_)] = normalize(estm.feature_importances_, norm='l1')
subfeat_w[filt_subfeat_idx][:len(filt_subfeat_w)] = filt_subfeat_w
# print 'Sub FI shape: (%s)' % ','.join([str(x) for x in filt_subfeat_w.shape])
# print 'Feature Importance inside %s Ensemble Method: %s' % (component, filt_subfeat_w)
sub_feat_w[(component, i)] = subfeat_w
for measure in ('feature_importances_', 'coef_', 'scores_'):
if (hasattr(cmpn, measure)):
filt_feat_w = getattr(cmpn, measure)
# print '*' * 80 + '\n%s\n'%filt_feat_w + '*' * 80
feat_w = (filt_feat_w.min() - 1) * np.ones_like(feature_idx)
# feat_w[filt_feat_idx][:filt_feat_w.shape[1] if len(filt_feat_w.shape) > 1 else len(filt_feat_w)] = normalize(filt_feat_w[1,:] if len(filt_feat_w.shape) > 1 else filt_feat_w, norm='l1')
feat_w[filt_feat_idx][:filt_feat_w.shape[1] if len(filt_feat_w.shape) > 1 else len(filt_feat_w)] = filt_feat_w[1,:] if len(filt_feat_w.shape) > 1 else filt_feat_w
# print '*' * 80 + '\n%s\n'%feat_w + '*' * 80
feat_w_dict[(component, measure)] = feat_w
print('FI shape: (%s)' % ','.join([str(x) for x in feat_w_dict[(component, measure)].shape]))
print('Sample 10 Feature from %s.%s: %s' % (component, measure, feat_w[feat_w > 0][:10]))
# print 'Feature Importance from %s.%s: %s' % (component, measure, feat_w)
if (hasattr(cmpn, 'get_support')):
filt_feat_idx = filt_feat_idx[cmpn.get_support()]
print('\n')
if (is_mltl and average == 'all'):
return {'accuracy':accuracy, 'micro-precision':micro_precision, 'micro-recall':micro_recall, 'micro-fscore':micro_fscore, 'macro-precision':macro_precision, 'macro-recall':macro_recall, 'macro-fscore':macro_fscore, 'train_time':train_time, 'test_time':test_time, 'micro-roc':micro_roc, 'macro-roc':macro_roc, 'prc':prc, 'feat_w':feat_w_dict, 'sub_feat_w':sub_feat_w, 'pred_lb':orig_pred, 'metrics':metric_df}
else:
return {'accuracy':accuracy, 'precision':precision, 'recall':recall, 'fscore':fscore, 'train_time':train_time, 'test_time':test_time, 'roc':roc, 'prc':prc, 'feat_w':feat_w_dict, 'sub_feat_w':sub_feat_w, 'pred_lb':orig_pred, 'pred_prob':orig_prob, 'metrics':metric_df}
# Calculate the venn digram overlaps
def pred_ovl(preds, pred_true=None, axis=1):
if (axis == 0):
preds = preds.T
if (pred_true is not None):
pred_true = pred_true.reshape((-1,))
# Row represents feature, column represents instance
var_num, dim = preds.shape[0], preds.shape[1]
orig_idx = np.arange(var_num)
if (len(preds.shape) < 2 or preds.shape[1] == 1):
if (pred_true is None):
return np.ones(shape=(1,), dtype='int')
else:
overlap_mt = np.ones(shape=(1,2), dtype='int')
overlap_mt[0,1] = orig_idx[preds.reshape((-1,)) == pred_true].shape[0]
return overlap_mt
# Calculate possible subsets of all the instance indices
subset_idx = list(imath.subset(list(range(dim)), min_crdnl=1))
# Initialize result matrix
if (pred_true is None):
overlap_mt = np.zeros(shape=(len(subset_idx),), dtype='int')
else:
overlap_mt = np.zeros(shape=(len(subset_idx), 2), dtype='int')
# Calculate overlap for each subset
for i, idx in enumerate(subset_idx):
rmn_idx = set(range(dim)) - set(idx)
# Select the positions of the target instance that without any overlap with other instances
pred_sum, chsn_sum, rmn_sum = preds.sum(axis=1), preds[:,idx].sum(axis=1), preds[:,list(rmn_idx)].sum(axis=1)
condition = np.all([np.logical_or(chsn_sum == 0, chsn_sum == len(idx)), np.logical_or(rmn_sum == 0, rmn_sum == len(rmn_idx)), np.logical_or(pred_sum == len(idx), pred_sum == len(rmn_idx))], axis=0)
if (pred_true is None):
overlap_mt[i] = orig_idx[condition].shape[0]
else:
# And the selected positions should be true
true_cond = np.logical_and(condition, preds[:,idx[0]] == pred_true)
overlap_mt[i,0] = orig_idx[condition].shape[0]
overlap_mt[i,1] = orig_idx[true_cond].shape[0]
return overlap_mt
def save_featw(features, crsval_featw, crsval_subfeatw, cfg_param={}, lbid=''):
lbidstr = ('_' + (str(lbid) if lbid != -1 else 'all')) if lbid is not None and lbid != '' else ''
for k, v in crsval_featw.items():
measure_str = k.replace(' ', '_').strip('_').lower()
feat_w_mt = np.column_stack(v)
mms = MinMaxScaler()
feat_w_mt = mms.fit_transform(feat_w_mt)
feat_w_avg = feat_w_mt.mean(axis=1)
feat_w_std = feat_w_mt.std(axis=1)
sorted_idx = np.argsort(feat_w_avg, axis=-1)[::-1]
# sorted_idx = sorted(range(feat_w_avg.shape[0]), key=lambda k: feat_w_avg[k])[::-1]
sorted_feat_w = np.column_stack((features[sorted_idx], feat_w_avg[sorted_idx], feat_w_std[sorted_idx]))
feat_w_df = pd.DataFrame(sorted_feat_w, index=sorted_idx, columns=['Feature Name', 'Importance Mean', 'Importance Std'])
if (cfg_param.setdefault('save_featw', False)):
feat_w_df.to_excel('featw%s_%s.xlsx' % (lbidstr, measure_str))
if (cfg_param.setdefault('save_featw_npz', False)):
io.write_df(feat_w_df, 'featw%s_%s' % (lbidstr, measure_str), with_idx=True)
if (cfg_param.setdefault('plot_featw', False)):
plot.plot_bar(feat_w_avg[sorted_idx[:10]].reshape((1,-1)), feat_w_std[sorted_idx[:10]].reshape((1,-1)), features[sorted_idx[:10]], labels=None, title='Feature importances', fname='fig_featw%s_%s' % (lbidstr, measure_str), plot_cfg=common_cfg)
for k, v in crsval_subfeatw.items():
measure_str = k.replace(' ', '_').strip('_').lower()
subfeat_w_mt = np.column_stack(v)
mms = MinMaxScaler()
subfeat_w_mt = mms.fit_transform(subfeat_w_mt)
subfeat_w_avg = subfeat_w_mt.mean(axis=1)
subfeat_w_std = subfeat_w_mt.std(axis=1)
sorted_idx = np.argsort(subfeat_w_avg, axis=-1)[::-1]
sorted_subfeat_w = np.column_stack((features[sorted_idx], subfeat_w_avg[sorted_idx], subfeat_w_std[sorted_idx]))
subfeat_w_df = pd.DataFrame(sorted_subfeat_w, index=sorted_idx, columns=['Feature Name', 'Importance Mean', 'Importance Std'])
if (cfg_param.setdefault('save_subfeatw', False)):
subfeat_w_df.to_excel('subfeatw%s_%s.xlsx' % (lbidstr, measure_str))
if (cfg_param.setdefault('save_subfeatw_npz', False)):
io.write_df(subfeat_w_df, 'subfeatw%s_%s' % (lbidstr, measure_str), with_idx=True)
if (cfg_param.setdefault('plot_subfeatw', False)):
plot.plot_bar(subfeat_w_avg[sorted_idx[:10]].reshape((1,-1)), subfeat_w_std[sorted_idx[:10]].reshape((1,-1)), features[sorted_idx[:10]], labels=None, title='Feature importances', fname='fig_subfeatw_%s' % measure_str, plot_cfg=common_cfg)
# Classification
def classification(X_train, Y_train, X_test, model_iter, model_param={}, cfg_param={}, global_param={}, lbid=''):
print('Classifing...')
global common_cfg
FILT_NAMES, CLF_NAMES, PL_NAMES, PL_SET = model_param['glb_filtnames'], model_param['glb_clfnames'], global_param['pl_names'], global_param['pl_set']
lbidstr = ('_' + (str(lbid) if lbid != -1 else 'all')) if lbid is not None and lbid != '' else ''
to_hdf, hdf5_fpath = cfg_param.setdefault('to_hdf', False), '%s' % 'crsval_dataset.h5' if cfg_param.setdefault('hdf5_fpath', 'crsval_dataset.h5') is None else cfg_param['hdf5_fpath']
# Format the data
if (type(X_train) == list):
assert all([len(x) == len(X_train[0]) for x in X_train[1:]])
X_train = [pd.DataFrame(x) if (type(x) != pd.io.parsers.TextFileReader and type(x) != pd.DataFrame) else x for x in X_train]
X_train = [pd.concat(x) if (type(x) == pd.io.parsers.TextFileReader and not to_hdf) else x for x in X_train]
else:
if (type(X_train) != pd.io.parsers.TextFileReader and type(X_train) != pd.DataFrame):
X_train = pd.DataFrame(X_train)
X_train = pd.concat(X_train) if (type(X_train) == pd.io.parsers.TextFileReader and not to_hdf) else X_train
if (type(X_test) == list):
assert all([len(x) == len(X_test[0]) for x in X_test[1:]])
X_test = [pd.DataFrame(x) if (type(x) != pd.io.parsers.TextFileReader and type(x) != pd.DataFrame) else x for x in X_test]
X_test = [pd.concat(x) if (type(x) == pd.io.parsers.TextFileReader and not to_hdf) else x for x in X_test]
else:
if (type(X_test) != pd.io.parsers.TextFileReader and type(X_test) != pd.DataFrame):
X_test = pd.DataFrame(X_test)
X_test = pd.concat(X_test) if (type(X_test) == pd.io.parsers.TextFileReader and not to_hdf) else X_test
if (type(Y_train) != pd.io.parsers.TextFileReader and type(Y_train) != pd.DataFrame):
Y_train = pd.DataFrame(Y_train)
Y_train_mt = Y_train.values.reshape((Y_train.shape[0],)) if (len(Y_train.shape) == 1 or Y_train.shape[1] == 1) else Y_train.values
mltl=True if len(Y_train_mt.shape) > 1 and Y_train_mt.shape[1] > 1 or 2 in Y_train_mt else False
print('Classification is starting...')
preds, probs, scores = [[] for i in range(3)]
crsval_featw, crsval_subfeatw = [{} for i in range(2)]
for vars in model_iter(**model_param):
if (global_param['comb']):
mdl_name, mdl = [vars[x] for x in range(2)]
else:
filt_name, filter, clf_name, clf= [vars[x] for x in range(4)]
print('#' * 80)
# Assemble a pipeline
if ('filter' in locals() and filter != None):
model_name = '%s [Ft Filt] & %s [CLF]' % (filt_name, clf_name)
pipeline = Pipeline([('featfilt', clone(filter)), ('clf', clf)])
elif ('clf' in locals() and clf != None):
model_name = '%s [CLF]' % clf_name
pipeline = Pipeline([('clf', clf)])
else:
model_name = mdl_name
pipeline = mdl if (type(mdl) is Pipeline) else Pipeline([('clf', mdl)])
if (model_name in PL_SET): continue
PL_NAMES.append(model_name)
PL_SET.add(model_name)
print(model_name)
# Build the model
print('+' * 80)
print('Training Model: ')
print(pipeline)
t0 = time()
pipeline.fit(X_train, Y_train_mt)
train_time = time() - t0
print('train time: %0.3fs' % train_time)
t0 = time()
pred = pipeline.predict(X_test)
prob = pipeline.predict_proba(X_test)
test_time = time() - t0
print('+' * 80)
print('Testing: ')
print('test time: %0.3fs' % test_time)
preds.append(pred)
probs.append(prob)
scores.append(get_score(pipeline, X_test, mltl))
# Save predictions and model
if (cfg_param.setdefault('save_pred', True)):
io.write_npz(dict(pred_lb=pred, pred_prob=prob), 'clf_pred_%s%s' % (model_name.replace(' ', '_').lower(), lbidstr))
if (cfg_param.setdefault('save_model', True)):
mdl_name = '%s' % model_name.replace(' ', '_').lower()
if (all([hasattr(pipeline.steps[i][1], 'save') for i in range(len(pipeline.steps))])):
for sub_mdl_name, mdl in pipeline.steps:
mdl.save('%s_%s%s' % (mdl_name, sub_mdl_name.replace(' ', '_').lower(), lbidstr), **global_param.setdefault('mdl_save_kwargs', {}))
else:
io.write_obj(pipeline, '%s%s' % (mdl_name, lbidstr))
# Feature importances
feat_w, sub_feat_w = get_featw(pipeline, X_train[0].shape[1] if (type(X_train) is list) else X_train.shape[1])
for k, v in feat_w.items():
key = '%s_%s_%s' % (model_name, k[0], k[1])
crsval_featw.setdefault(key, []).append(v)
for k, v in sub_feat_w.items():
key = '%s_%s_%s' % (model_name, k[0], k[1])
crsval_subfeatw.setdefault(key, []).append(v)
print('\n')
if (len(preds) > 1):
# Prediction overlap
preds_mt = np.column_stack([x.ravel() for x in preds])
povl = np.array(pred_ovl(preds_mt))
# Spearman's rank correlation
spmnr, spmnr_pval = stats.spearmanr(preds_mt)
# Kendall rank correlation
# kendalltau = stats.kendalltau(preds_mt)[0]
# Pearson correlation
# pearson = tats.pearsonr(preds_mt)[0]
## Save performance data
povl_idx = [' & '.join(x) for x in imath.subset(PL_NAMES, min_crdnl=1)]
povl_df = pd.DataFrame(povl, index=povl_idx, columns=['pred_ovl'])
spmnr_df = pd.DataFrame(spmnr, index=PL_NAMES, columns=PL_NAMES)
spmnr_pval_df = pd.DataFrame(spmnr_pval, index=PL_NAMES, columns=PL_NAMES)
if (cfg_param.setdefault('save_povl', False)):
povl_df.to_excel('cpovl_clf%s.xlsx' % lbidstr)
if (cfg_param.setdefault('save_povl_npz', False)):
io.write_df(povl_df, 'povl_clf%s.npz' % lbidstr, with_idx=True)
if (cfg_param.setdefault('save_spmnr', False)):
spmnr_df.to_excel('spmnr_clf%s.xlsx' % lbidstr)
if (cfg_param.setdefault('save_spmnr_npz', False)):
io.write_df(spmnr_df, 'spmnr_clf%s.npz' % lbidstr, with_idx=True)
if (cfg_param.setdefault('save_spmnr_pval', False)):
spmnr_pval_df.to_excel('spmnr_pval_clf%s.xlsx' % lbidstr)
if (cfg_param.setdefault('save_spmnr_pval_npz', False)):
io.write_df(spmnr_pval_df, 'spmnr_pval_clf%s.npz' % lbidstr, with_idx=True)
save_featw(X_train[0].columns.values if (type(X_train) is list) else X_train.columns.values, crsval_featw, crsval_subfeatw, cfg_param=cfg_param, lbid=lbid)
return preds, scores
def kf2data(kf, X, Y, to_hdf=False, hdf5_fpath='crsval_dataset.h5'):
if (to_hdf):
import h5py
from keras.utils.io_utils import HDF5Matrix
hdf5_fpath = hdf5_fpath if hdf5_fpath else os.path.abspath('crsval_dataset.h5')
for i, (train_idx, test_idx) in enumerate(kf):
if (type(X)==list):
if (type(X[0]) == pd.io.parsers.TextFileReader):
pass
assert all([len(x) == len(X[0]) for x in X[1:]])
X_train, X_test = [x[train_idx,:] for x in X] if to_hdf and type(X[0]) == HDF5Matrix or type(X[0]) != pd.DataFrame else [x.iloc[train_idx,:] for x in X], [x[test_idx,:] for x in X] if to_hdf and type(X[0]) == HDF5Matrix or type(X[0]) != pd.DataFrame else [x.iloc[test_idx,:] for x in X]
train_idx_df, test_idx_df = pd.DataFrame(np.arange(X_train[0].shape[0]), index=X[0].index[train_idx]), pd.DataFrame(np.arange(X_test[0].shape[0]), index=X[0].index[test_idx])
else:
if (type(X) == pd.io.parsers.TextFileReader):
pass
X_train, X_test = X[train_idx] if to_hdf and type(X) == HDF5Matrix or type(X) != pd.DataFrame else X.iloc[train_idx,:], X[test_idx] if to_hdf and type(X) == HDF5Matrix or type(X) != pd.DataFrame else X.iloc[test_idx,:]
train_idx_df, test_idx_df = pd.DataFrame(np.arange(X_train.shape[0]), index=None if to_hdf and type(X) == HDF5Matrix or type(X) != pd.DataFrame else X.index[train_idx]), pd.DataFrame(np.arange(X_test.shape[0]), index=None if to_hdf and type(X) == HDF5Matrix or type(X) != pd.DataFrame else X.index[test_idx])
Y_train, Y_test = Y[train_idx], Y[test_idx]
# Y_train = Y_train.reshape((Y_train.shape[0],)) if (len(Y_train.shape) > 1 and Y_train.shape[1] == 1) else Y_train
# Y_test = Y_test.reshape((Y_test.shape[0],)) if (len(Y_test.shape) > 1 and Y_test.shape[1] == 1) else Y_test
if (to_hdf):
with h5py.File(hdf5_fpath, 'w') as hf:
if (type(X_train) == list):
for idx, x_train in enumerate(X_train):
hf.create_dataset('X_train%i' % idx, data=x_train.values if type(X) != HDF5Matrix else x_train[:])
else:
hf.create_dataset('X_train', data=X_train.values if type(X) != HDF5Matrix else X_train[:])
if (type(X_test) == list):
for idx, x_test in enumerate(X_test):
hf.create_dataset('X_test%i' % idx, data=x_test.values if type(X) != HDF5Matrix else x_test[:])
else:
hf.create_dataset('X_test', data=X_test.values if type(X) != HDF5Matrix else X_test[:])
hf.create_dataset('Y_train', data=Y_train if type(Y) != HDF5Matrix else Y_train[:])
hf.create_dataset('Y_test', data=Y_test if type(Y) != HDF5Matrix else Y_test[:])
yield i, [HDF5Matrix(hdf5_fpath, 'X_train%i' % idx) for idx in range(len(X_train))] if (type(X_train) == list) else HDF5Matrix(hdf5_fpath, 'X_train'), [HDF5Matrix(hdf5_fpath, 'X_test%i' % idx) for idx in range(len(X_test))] if (type(X_test) == list) else HDF5Matrix(hdf5_fpath, 'X_test'), HDF5Matrix(hdf5_fpath, 'Y_train'), HDF5Matrix(hdf5_fpath, 'Y_test'), train_idx_df, test_idx_df
# The implementation of HDF5Matrix is not good since it keep all the hdf5 file opened, so we need to manually close them.
remove_hfps = []
for hfpath, hf in HDF5Matrix.refs.items():
if (hfpath.startswith(hdf5_fpath)):
hf.close()
remove_hfps.append(hfpath)
for hfpath in remove_hfps:
HDF5Matrix.refs.pop(hfpath, None)
else:
yield i, [x.values for x in X_train] if (type(X_train) == list) else X_train.values, [x.values for x in X_test] if (type(X_test) == list) else X_test.values, Y_train, Y_test, train_idx_df, test_idx_df
# Evaluation
def evaluate(X_train, Y_train, X_test, Y_test, model_iter, model_param={}, avg='micro', kfold=5, cfg_param={}, global_param={}, lbid=''):
print('Evaluating...')
from keras.utils.io_utils import HDF5Matrix
global common_cfg
FILT_NAMES, CLF_NAMES, PL_NAMES, PL_SET = model_param['glb_filtnames'], model_param['glb_clfnames'], global_param['pl_names'], global_param['pl_set']
lbidstr = ('_' + (str(lbid) if lbid != -1 else 'all')) if lbid is not None and lbid != '' else ''
# Format the data
if (type(X_train) == list):
assert all([len(x) == len(X_train[0]) for x in X_train[1:]])
X_train = [pd.DataFrame(x) if (type(x) != pd.io.parsers.TextFileReader and type(x) != pd.DataFrame) else x for x in X_train]
X_train = [pd.concat(x) if (type(x) == pd.io.parsers.TextFileReader and not to_hdf) else x for x in X_train]
else:
if (type(X_train) != pd.io.parsers.TextFileReader and type(X_train) != pd.DataFrame):
X_train = pd.DataFrame(X_train) if type(X_train) != HDF5Matrix else X_train
X_train = pd.concat(X_train) if (type(X_train) == pd.io.parsers.TextFileReader and not to_hdf) else X_train
if (type(Y_train) != pd.io.parsers.TextFileReader and type(Y_train) != pd.DataFrame):
Y_train = pd.DataFrame(Y_train) if (type(Y_train) == pd.io.parsers.TextFileReader and not to_hdf) else Y_train
if (type(Y_train) != HDF5Matrix):
Y_train = Y_train.values.reshape((Y_train.shape[0],)) if (len(Y_train.shape) == 1 or Y_train.shape[1] == 1) else Y_train.values
else:
Y_train = Y_train
if (type(X_test) == list):
assert all([len(x) == len(X_test[0]) for x in X_test[1:]])
X_test = [pd.DataFrame(x) if (type(x) != pd.io.parsers.TextFileReader and type(x) != pd.DataFrame) else x for x in X_test]
X_test = [pd.concat(x) if (type(x) == pd.io.parsers.TextFileReader and not to_hdf) else x for x in X_test]
else:
if (type(X_test) != pd.io.parsers.TextFileReader and type(X_test) != pd.DataFrame):
X_test = pd.DataFrame(X_test) if type(X_test) != HDF5Matrix else X_test
X_test = pd.concat(X_test) if (type(X_test) == pd.io.parsers.TextFileReader and not to_hdf) else X_test
if (type(Y_test) != pd.io.parsers.TextFileReader and type(Y_test) != pd.DataFrame):
Y_test = pd.DataFrame(Y_test) if (type(Y_test) == pd.io.parsers.TextFileReader and not to_hdf) else Y_test
if (type(Y_test) != HDF5Matrix):
Y_test = Y_test.values.reshape((Y_test.shape[0],)) if (len(Y_test.shape) == 1 or Y_test.shape[1] == 1) else Y_test.values
else:
Y_test = Y_test
is_mltl = True if len(Y_train.shape) > 1 and Y_train.shape[1] > 1 or 2 in Y_train else False
print('Benchmark is starting...')
mean_fpr = np.linspace(0, 1, 100)
mean_recall = np.linspace(0, 1, 100)
xdf = X_train[0] if type(X_train)==list else X_train
roc_dict, prc_dict, featw_data, subfeatw_data = [{} for i in range(4)]
## Copy from cross_validate function Start ##
del PL_NAMES[:]
PL_SET.clear()
if (cfg_param.setdefault('npg_ratio', None) is not None):
npg_ratio = cfg_param['npg_ratio']
Y_train = np.array(Y_train) # HDF5Matrix is not working in matrix slicing and boolean operation
y = Y_train[:,0] if (len(Y_train.shape) > 1) else Y_train
if (1.0 * np.abs(y).sum() / Y_train.shape[0] < 1.0 / (npg_ratio + 1)):
all_true = np.arange(Y_train.shape[0])[y > 0].tolist()
all_false = np.arange(Y_train.shape[0])[y <= 0].tolist()
true_id = np.random.choice(len(all_true), size=int(1.0 / npg_ratio * len(all_false)), replace=True)
true_idx = [all_true[i] for i in true_id]
all_train_idx = sorted(set(true_idx + all_false))
X_train = [x.iloc[all_train_idx] if type(x) != HDF5Matrix else x[all_train_idx] for x in X_train] if (type(X_train) is list) else X_train.iloc[all_train_idx] if type(x) != HDF5Matrix else X_train[all_train_idx]
Y_train = Y_train[all_train_idx,:] if (len(Y_train.shape) > 1) else Y_train[all_train_idx]
results, preds = [[] for x in range(2)]
# Y_test = np.column_stack([np.abs(Y_test).reshape((Y_test.shape[0],-1))] + [label_binarize(lb, classes=[-1,1,0])[:,1] for lb in (np.sign(Y_test).astype('int8').reshape((Y_test.shape[0],-1))).T]) if (len(Y_test.shape) < 2 or Y_test.shape[1] == 1 or np.where(Y_test<0)[0].shape[0]>0) else Y_test
for vars in model_iter(**model_param):
if (global_param['comb']):
mdl_name, mdl = [vars[x] for x in range(2)]
else:
filt_name, filter, clf_name, clf= [vars[x] for x in range(4)]
print('#' * 80)
# Assemble a pipeline
if ('filter' in locals() and filter != None):
model_name = '%s [Ft Filt] & %s [CLF]' % (filt_name, clf_name)
pipeline = Pipeline([('featfilt', clone(filter)), ('clf', clf)])
elif ('clf' in locals() and clf != None):
model_name = '%s [CLF]' % clf_name
pipeline = Pipeline([('clf', clf)])
else:
model_name = mdl_name
pipeline = mdl
if (model_name in PL_SET): continue
PL_NAMES.append(model_name)
PL_SET.add(model_name)
print(model_name)
# Benchmark results
bm_results = benchmark(pipeline, X_train, Y_train, X_test, Y_test, mltl=is_mltl, signed=global_param.setdefault('signed', True if np.where(Y_train<0)[0].shape[0]>0 else False), average=avg)
# Clear the model environment (e.g. GPU resources)
del pipeline
# if (type(pipeline) is Pipeline):
# for cmpn in pipeline.named_steps.values():
# if (getattr(cmpn, "clear", None)): cmpn.clear()
# else:
# if (getattr(pipeline, "clear", None)):
# pipeline.clear()
# Obtain the results
if (is_mltl and avg == 'all'):
results.append([bm_results[x] for x in ['accuracy', 'micro-precision', 'micro-recall', 'micro-fscore', 'macro-precision', 'macro-recall', 'macro-fscore', 'train_time', 'test_time']])
else:
results.append([bm_results[x] for x in ['accuracy', 'precision', 'recall', 'fscore', 'train_time', 'test_time']])
preds.append(bm_results['pred_lb'])
if (cfg_param.setdefault('save_pred', False)):
io.write_npz(dict(pred_lb=bm_results['pred_lb'], pred_prob=bm_results['pred_prob'], true_lb=Y_test), 'pred_%s%s' % (model_name.replace(' ', '_').lower(), lbidstr))
if (is_mltl and avg == 'all'):
micro_id, macro_id = '-'.join([model_name,'micro']), '-'.join([model_name,'macro'])
roc_dict[micro_id] = roc_dict.setdefault(micro_id, 0) + np.interp(mean_fpr, bm_results['micro-roc'][0], bm_results['micro-roc'][1])
roc_dict[macro_id] = roc_dict.setdefault(macro_id, 0) + np.interp(mean_fpr, bm_results['macro-roc'][0], bm_results['macro-roc'][1])
else:
roc_dict[model_name] = roc_dict.setdefault(model_name, 0) + np.interp(mean_fpr, bm_results['roc'][0], bm_results['roc'][1])
prc_dict[model_name] = prc_dict.setdefault(model_name, 0) + np.interp(mean_recall, bm_results['prc'][0], bm_results['prc'][1])
for k, v in bm_results['feat_w'].items():
key = '%s_%s_%s' % (model_name, k[0], k[1])
featw_data[key] = v
for k, v in bm_results['sub_feat_w'].items():
key = '%s_%s_%s' % (model_name, k[0], k[1])
subfeatw_data[key] = v
print('\n')
# Prediction overlap
if (True if len(Y_train.shape) > 1 and Y_train.shape[1] > 1 else False):
preds_mt = np.column_stack([x.ravel() for x in preds])
else:
preds_mt = np.column_stack(preds)
preds.append(Y_test)
tpreds_mt = np.column_stack([x.ravel() for x in preds])
## Copy from cross_validate function End ##
povl = pred_ovl(preds_mt, Y_test)
# Spearman's rank correlation
spearman = stats.spearmanr(tpreds_mt)
# Kendall rank correlation
# kendalltau = stats.kendalltau(preds_mt)
# Pearson correlation
# pearson = stats.pearsonr(preds_mt)
## Save performance data
if (is_mltl and avg == 'all'):
metric_idx = ['Accuracy', 'Micro Precision', 'Micro Recall', 'Micro F score', 'Macro Precision', 'Macro Recall', 'Macro F score', 'Train time', 'Test time']
else:
metric_idx = ['Accuracy', 'Precision', 'Recall', 'F score', 'Train time', 'Test time']
perf_df = pd.DataFrame(np.array(results).T, index=metric_idx, columns=PL_NAMES)
povl_idx = [' & '.join(x) for x in imath.subset(PL_NAMES, min_crdnl=1)]
povl_df = pd.DataFrame(np.array(povl), index=povl_idx, columns=['pred_ovl', 'tpred_ovl'])
spmnr_val_df = pd.DataFrame(spearman[0], index=PL_NAMES+['Annotations'], columns=PL_NAMES+['Annotations'])
spmnr_pval_df = pd.DataFrame(spearman[1], index=PL_NAMES+['Annotations'], columns=PL_NAMES+['Annotations'])
if (cfg_param.setdefault('save_tpred', True)):
io.write_npz(tpreds_mt, 'tpred_clf%s' % lbidstr)
if (cfg_param.setdefault('save_perf', True)):
perf_df.to_excel('perf_clf%s.xlsx' % lbidstr)
if (cfg_param.setdefault('save_perf_npz', False)):
io.write_df(perf_df, 'perf_clf%s.npz' % lbidstr, with_idx=True)
if (cfg_param.setdefault('save_povl', False)):
povl_df.to_excel('povl_clf%s.xlsx' % lbidstr)
if (cfg_param.setdefault('save_povl_npz', False)):
io.write_df(povl_df, 'povl_clf%s.npz' % lbidstr, with_idx=True)
if (cfg_param.setdefault('save_spmnr', False)):
spmnr_val_df.to_excel('spmnr_clf%s.xlsx' % lbidstr)
if (cfg_param.setdefault('save_spmnr_npz', False)):
io.write_df(spmnr_val_df, 'spmnr_clf%s.npz' % lbidstr, with_idx=True)
if (cfg_param.setdefault('save_spmnr_pval', False)):
spmnr_pval_df.to_excel('spmnr_pval_clf%s.xlsx' % lbidstr)
if (cfg_param.setdefault('save_spmnr_pval_npz', False)):
io.write_df(spmnr_pval_df, 'spmnr_pval_clf%s.npz' % lbidstr, with_idx=True)
# Feature importances
try:
save_featw(xdf.columns.values if type(xdf) != HDF5Matrix else np.arange(xdf.shape[1]), featw_data, subfeatw_data, cfg_param=cfg_param, lbid=lbid)
except Exception as e:
print(e)
## Plot figures
if (is_mltl and avg == 'all'):
micro_roc_data, micro_roc_labels, micro_roc_aucs, macro_roc_data, macro_roc_labels, macro_roc_aucs = [[] for i in range(6)]
else:
roc_data, roc_labels, roc_aucs = [[] for i in range(3)]
prc_data, prc_labels, prc_aucs = [[] for i in range(3)]
for pl in PL_NAMES:
if (is_mltl and avg == 'all'):
micro_id, macro_id = '-'.join([pl,'micro']), '-'.join([pl,'macro'])
micro_mean_tpr, macro_mean_tpr = roc_dict[micro_id], roc_dict[macro_id]
micro_roc_auc = metrics.auc(mean_fpr, micro_mean_tpr)
macro_roc_auc = metrics.auc(mean_fpr, macro_mean_tpr)
micro_roc_data.append([mean_fpr, micro_mean_tpr])
micro_roc_aucs.append(micro_roc_auc)
micro_roc_labels.append('%s (AUC=%0.2f)' % (pl, micro_roc_auc))
macro_roc_data.append([mean_fpr, macro_mean_tpr])
macro_roc_aucs.append(macro_roc_auc)
macro_roc_labels.append('%s (AUC=%0.2f)' % (pl, macro_roc_auc))
else:
mean_tpr = roc_dict[pl]
mean_roc_auc = metrics.auc(mean_fpr, mean_tpr)
roc_data.append([mean_fpr, mean_tpr])
roc_aucs.append(mean_roc_auc)
roc_labels.append('%s (AUC=%0.2f)' % (pl, mean_roc_auc))
mean_prcn = prc_dict[pl]
mean_prc_auc = metrics.auc(mean_recall, mean_prcn)
prc_data.append([mean_recall, mean_prcn])
prc_aucs.append(mean_prc_auc)
prc_labels.append('%s (AUC=%0.2f)' % (pl, mean_prc_auc))
group_dict = {}
for i, pl in enumerate(PL_NAMES):
group_dict.setdefault(tuple(set(difflib.get_close_matches(pl, PL_NAMES))), []).append(i)
if (not cfg_param.setdefault('group_by_name', False) or len(group_dict) == len(PL_NAMES)):
groups = None
else:
group_array = np.array(group_dict.values())
group_array.sort()
groups = group_array.tolist()
if (is_mltl and avg == 'all'):
aucs_df = pd.DataFrame([micro_roc_aucs, macro_roc_aucs, prc_aucs], index=['Micro ROC AUC', 'Macro ROC AUC', 'PRC AUC'], columns=PL_NAMES)
if (cfg_param.setdefault('plot_roc', True)):
plot.plot_roc(micro_roc_data, micro_roc_labels, groups=groups, fname='micro_roc%s'%lbidstr, plot_cfg=common_cfg)
plot.plot_roc(macro_roc_data, macro_roc_labels, groups=groups, fname='macro_roc%s'%lbidstr, plot_cfg=common_cfg)
else:
aucs_df = pd.DataFrame([roc_aucs, prc_aucs], index=['ROC AUC', 'PRC AUC'], columns=PL_NAMES)
if (cfg_param.setdefault('plot_roc', True)):
plot.plot_roc(roc_data, roc_labels, groups=groups, fname='roc%s'%lbidstr, plot_cfg=common_cfg)
if (cfg_param.setdefault('plot_prc', True)):
plot.plot_prc(prc_data, prc_labels, groups=groups, fname='prc%s'%lbidstr, plot_cfg=common_cfg)
if (cfg_param.setdefault('save_auc', False)):
aucs_df.to_excel('auc%s.xlsx' % lbidstr)
filt_num, clf_num = len(FILT_NAMES), len(CLF_NAMES)
if (cfg_param.setdefault('plot_metric', False)):
for mtrc in metric_idx:
mtrc_avg_list, mtrc_std_list = [[] for i in range(2)]
if (global_param['comb']):
mtrc_avg = perf_avg_df.ix[mtrc,:].values.reshape((1,-1))
mtrc_std = perf_std_df.ix[mtrc,:].values.reshape((1,-1))
plot.plot_bar(mtrc_avg, mtrc_std, xlabels=PL_NAMES, labels=None, title='%s by Classifier and Feature Selection' % mtrc, fname='%s_clf_ft%s' % (mtrc.replace(' ', '_').lower(), lbidstr), plot_cfg=common_cfg)
else:
for i in range(filt_num):
offset = i * clf_num
mtrc_avg_list.append(perf_avg_df.ix[mtrc,offset:offset+clf_num].values.reshape((1,-1)))
mtrc_std_list.append(perf_std_df.ix[mtrc,offset:offset+clf_num].values.reshape((1,-1)))
mtrc_avg = np.concatenate(mtrc_avg_list)
mtrc_std = np.concatenate(mtrc_std_list)
plot.plot_bar(mtrc_avg, mtrc_std, xlabels=CLF_NAMES, labels=FILT_NAMES, title='%s by Classifier and Feature Selection' % mtrc, fname='%s_clf_ft%s' % (mtrc.replace(' ', '_').lower(), lbidstr), plot_cfg=common_cfg)
# Cross validation
def cross_validate(X, Y, model_iter, model_param={}, avg='micro', kfold=5, cfg_param={}, split_param={}, global_param={}, lbid=''):
print('Cross validating...')
from keras.utils.io_utils import HDF5Matrix
global common_cfg
FILT_NAMES, CLF_NAMES, PL_NAMES, PL_SET = model_param['glb_filtnames'], model_param['glb_clfnames'], global_param['pl_names'], global_param['pl_set']
lbidstr = ('_' + (str(lbid) if lbid != -1 else 'all')) if lbid is not None and lbid != '' else ''
to_hdf, hdf5_fpath = cfg_param.setdefault('to_hdf', False), 'crsval_dataset%s.h5' % lbidstr if cfg_param.setdefault('hdf5_fpath', 'crsval_dataset%s.h5' % lbidstr) is None else cfg_param['hdf5_fpath']
# Format the data
if (type(X) == list):
assert all([len(x) == len(X[0]) for x in X[1:]])
X = [pd.DataFrame(x) if (type(x) != pd.io.parsers.TextFileReader and type(x) != pd.DataFrame) else x for x in X]
X = [pd.concat(x) if (type(x) == pd.io.parsers.TextFileReader and not to_hdf) else x for x in X]
else:
if (type(X) != pd.io.parsers.TextFileReader and type(X) != pd.DataFrame):
X = pd.DataFrame(X) if type(X) != HDF5Matrix else X
X = pd.concat(X) if (type(X) == pd.io.parsers.TextFileReader and not to_hdf) else X
if (type(Y) != pd.io.parsers.TextFileReader and type(Y) != pd.DataFrame):
Y = pd.DataFrame(Y) if (type(Y) == pd.io.parsers.TextFileReader and not to_hdf) else Y
if (type(Y) != HDF5Matrix):
Y_mt = Y.values.reshape((Y.shape[0],)) if (len(Y.shape) == 1 or Y.shape[1] == 1) else Y.values
else:
Y_mt = Y
is_mltl = True if len(Y_mt.shape) > 1 and Y_mt.shape[1] > 1 or 2 in Y_mt else False
print('Benchmark is starting...')
mean_fpr = np.linspace(0, 1, 100)
mean_recall = np.linspace(0, 1, 100)
xdf = X[0] if type(X)==list else X
if (len(split_param) == 0):
if (type(xdf) != HDF5Matrix):
kf = list(KFold(n_splits=kfold, shuffle=True, random_state=0).split(xdf, Y_mt)) if (len(Y_mt.shape) == 1) else list(KFold(n_splits=kfold, shuffle=True, random_state=0).split(xdf, Y_mt[:,0].reshape((Y_mt.shape[0],))))
else:
kf = list(KFold(n_splits=kfold, shuffle=False, random_state=0).split(xdf[:], Y_mt[:])) if (len(Y_mt.shape) == 1) else list(KFold(n_splits=kfold, shuffle=False, random_state=0).split(xdf[:], Y_mt[:].reshape((-1,)))) # HDF5Matrix is not working in shuffle indices
else:
split_param['shuffle'] = True if type(xdf) != HDF5Matrix else False
# To-do: implement the split method for multi-label data
if ('train_size' in split_param and 'test_size' in split_param):
kf = list(StratifiedShuffleSplit(n_splits=kfold, train_size=split_param['train_size'], test_size=split_param['test_size'], random_state=0).split(xdf, Y_mt)) if (len(Y_mt.shape) == 1) else list(StratifiedShuffleSplit(n_splits=kfold, train_size=split_param['train_size'], test_size=split_param['test_size'], random_state=0).split(xdf, Y_mt[:,0].reshape((Y_mt.shape[0],))))
else:
kf = list(StratifiedKFold(n_splits=kfold, shuffle=split_param.setdefault('shuffle', True), random_state=0).split(xdf, Y_mt)) if (len(Y_mt.shape) == 1) else list(StratifiedKFold(n_splits=kfold, shuffle=split_param.setdefault('shuffle', True), random_state=0).split(xdf, Y_mt[:,0].reshape((Y_mt.shape[0],))))
crsval_results, crsval_tpreds, crsval_povl, crsval_spearman, crsval_kendalltau, crsval_pearson = [[] for i in range(6)]
crsval_roc, crsval_prc, crsval_featw, crsval_subfeatw = [{} for i in range(4)]
# for i, (train_idx, test_idx) in enumerate(kf):
for i, X_train, X_test, Y_train, Y_test, train_idx_df, test_idx_df in kf2data(kf, X, Y_mt, to_hdf=to_hdf, hdf5_fpath=hdf5_fpath):
del PL_NAMES[:]
PL_SET.clear()
print('\n' + '-' * 80 + '\n' + '%s time validation' % imath.ordinal(i+1) + '\n' + '-' * 80 + '\n')
if (cfg_param.setdefault('save_crsval_idx', False)):
io.write_df(train_idx_df, 'train_idx_crsval_%s%s.npz' % (i, lbidstr), with_idx=True)
io.write_df(test_idx_df, 'test_idx_crsval_%s%s.npz' % (i, lbidstr), with_idx=True)
if (cfg_param.setdefault('npg_ratio', None) is not None):
npg_ratio = cfg_param['npg_ratio']
Y_train = np.array(Y_train) # HDF5Matrix is not working in matrix slicing and boolean operation
y = Y_train[:,0] if (len(Y_train.shape) > 1) else Y_train
if (1.0 * np.abs(y).sum() / Y_train.shape[0] < 1.0 / (npg_ratio + 1)):
all_true = np.arange(Y_train.shape[0])[y > 0].tolist()
all_false = np.arange(Y_train.shape[0])[y <= 0].tolist()
true_id = np.random.choice(len(all_true), size=int(1.0 / npg_ratio * len(all_false)), replace=True)
true_idx = [all_true[i] for i in true_id]
all_train_idx = sorted(set(true_idx + all_false))
X_train = [x.iloc[all_train_idx] if type(x) != HDF5Matrix else x[all_train_idx] for x in X_train] if (type(X_train) is list) else X_train.iloc[all_train_idx] if type(x) != HDF5Matrix else X_train[all_train_idx]
Y_train = Y_train[all_train_idx,:] if (len(Y_train.shape) > 1) else Y_train[all_train_idx]
results, preds = [[] for x in range(2)]
Y_test = np.array(Y_test)
for vars in model_iter(**model_param):
if (global_param['comb']):
mdl_name, mdl = [vars[x] for x in range(2)]
else:
filt_name, filter, clf_name, clf= [vars[x] for x in range(4)]
print('#' * 80)
# Assemble a pipeline
if ('filter' in locals() and filter != None):
model_name = '%s [Ft Filt] & %s [CLF]' % (filt_name, clf_name)
pipeline = Pipeline([('featfilt', clone(filter)), ('clf', clf)])
elif ('clf' in locals() and clf != None):
model_name = '%s [CLF]' % clf_name
pipeline = Pipeline([('clf', clf)])
else:
model_name = mdl_name
pipeline = mdl
if (model_name in PL_SET): continue
PL_NAMES.append(model_name)
PL_SET.add(model_name)
print(model_name)
# Benchmark results
bm_results = benchmark(pipeline, X_train, Y_train, X_test, Y_test, mltl=is_mltl, signed=global_param.setdefault('signed', True if np.where(Y_mt<0)[0].shape[0]>0 else False), average=avg)
# Clear the model environment (e.g. GPU resources)
del pipeline
# if (type(pipeline) is Pipeline):
# for cmpn in pipeline.named_steps.values():
# if (getattr(cmpn, "clear", None)): cmpn.clear()
# else:
# if (getattr(pipeline, "clear", None)):
# pipeline.clear()
# Obtain the results
if (is_mltl and avg == 'all'):
results.append([bm_results[x] for x in ['accuracy', 'micro-precision', 'micro-recall', 'micro-fscore', 'macro-precision', 'macro-recall', 'macro-fscore', 'train_time', 'test_time']])
else:
# for k, v in zip(['precision', 'recall', 'fscore'], bm_results['metrics'].loc['weighted avg',['precision', 'recall', 'f1-score']]):
# bm_results[k] = v
results.append([bm_results[x] for x in ['accuracy', 'precision', 'recall', 'fscore', 'train_time', 'test_time']])
preds.append(bm_results['pred_lb'])
if (cfg_param.setdefault('save_crsval_pred', False)):
io.write_npz(dict(pred_lb=bm_results['pred_lb'], true_lb=Y_test), 'pred_crsval_%s_%s%s' % (i, model_name.replace(' ', '_').lower(), lbidstr))
if (is_mltl and avg == 'all'):
micro_id, macro_id = '-'.join([model_name,'micro']), '-'.join([model_name,'macro'])
crsval_roc[micro_id] = crsval_roc.setdefault(micro_id, 0) + np.interp(mean_fpr, bm_results['micro-roc'][0], bm_results['micro-roc'][1])
crsval_roc[macro_id] = crsval_roc.setdefault(macro_id, 0) + np.interp(mean_fpr, bm_results['macro-roc'][0], bm_results['macro-roc'][1])
else:
crsval_roc[model_name] = crsval_roc.setdefault(model_name, 0) + np.interp(mean_fpr, bm_results['roc'][0], bm_results['roc'][1])
crsval_prc[model_name] = crsval_prc.setdefault(model_name, 0) + np.interp(mean_recall, bm_results['prc'][0], bm_results['prc'][1])
for k, v in bm_results['feat_w'].items():
key = '%s_%s_%s' % (model_name, k[0], k[1])
crsval_featw.setdefault(key, []).append(v)
for k, v in bm_results['sub_feat_w'].items():
key = '%s_%s_%s' % (model_name, k[0], k[1])
crsval_subfeatw.setdefault(key, []).append(v)
print('\n')
# Cross validation results
crsval_results.append(results)
# Prediction overlap
if (True if len(Y_mt.shape) > 1 and Y_mt.shape[1] > 1 else False):
preds_mt = np.column_stack([x.ravel() for x in preds])
else:
preds_mt = np.column_stack(preds)
preds.append(Y_test)
tpreds_mt = np.column_stack([x.ravel() for x in preds])
crsval_tpreds.append(tpreds_mt)
crsval_povl.append(pred_ovl(preds_mt, Y_test))
# Spearman's rank correlation
crsval_spearman.append(stats.spearmanr(tpreds_mt))
# Kendall rank correlation
# crsval_kendalltau.append(stats.kendalltau(preds_mt))
# Pearson correlation
# crsval_pearson.append(stats.pearsonr(preds_mt))
del X_train, X_test, Y_train, Y_test
print('\n')
perf_avg = np.array(crsval_results).mean(axis=0)
perf_std = np.array(crsval_results).std(axis=0)
povl_avg = np.array(crsval_povl).mean(axis=0).round()
spmnr_avg = np.array([crsp[0] for crsp in crsval_spearman]).mean(axis=0)
spmnr_pval = np.array([crsp[1] for crsp in crsval_spearman]).mean(axis=0)
# kndtr_avg = np.array([crkdt[0] for crkdt in crsval_kendalltau).mean(axis=0)
# kndtr_pval = np.array([crkdt[1] for crkdt in crsval_kendalltau]).mean(axis=0)
# prsnr_avg = np.array([crprs[0] for crprs in crsval_pearson).mean(axis=0)
# prsnr_pval = np.array([crprs[1] for crprs in crsval_pearson]).mean(axis=0)
## Save performance data
if (is_mltl and avg == 'all'):
metric_idx = ['Accuracy', 'Micro Precision', 'Micro Recall', 'Micro F score', 'Macro Precision', 'Macro Recall', 'Macro F score', 'Train time', 'Test time']
else:
metric_idx = ['Accuracy', 'Precision', 'Recall', 'F score', 'Train time', 'Test time']
perf_avg_df = pd.DataFrame(perf_avg.T, index=metric_idx, columns=PL_NAMES)
perf_std_df = pd.DataFrame(perf_std.T, index=metric_idx, columns=PL_NAMES)
povl_idx = [' & '.join(x) for x in imath.subset(PL_NAMES, min_crdnl=1)]
povl_avg_df = pd.DataFrame(povl_avg, index=povl_idx, columns=['pred_ovl', 'tpred_ovl'])
spmnr_avg_df = pd.DataFrame(spmnr_avg, index=PL_NAMES+['Annotations'], columns=PL_NAMES+['Annotations'])
spmnr_pval_df = pd.DataFrame(spmnr_pval, index=PL_NAMES+['Annotations'], columns=PL_NAMES+['Annotations'])
if (cfg_param.setdefault('save_tpred', True)):
io.write_npz(crsval_tpreds, 'tpred_clf%s' % lbidstr)
if (cfg_param.setdefault('save_perf_avg', True)):
perf_avg_df.to_excel('perf_avg_clf%s.xlsx' % lbidstr)
if (cfg_param.setdefault('save_perf_avg_npz', False)):
io.write_df(perf_avg_df, 'perf_avg_clf%s.npz' % lbidstr, with_idx=True)
if (cfg_param.setdefault('save_perf_std', True)):
perf_std_df.to_excel('perf_std_clf%s.xlsx' % lbidstr)
if (cfg_param.setdefault('save_perf_std_npz', False)):
io.write_df(perf_std_df, 'perf_std_clf%s.npz' % lbidstr, with_idx=True)
if (cfg_param.setdefault('save_povl', False)):
povl_avg_df.to_excel('cpovl_avg_clf%s.xlsx' % lbidstr)
if (cfg_param.setdefault('save_povl_npz', False)):
io.write_df(povl_avg_df, 'povl_avg_clf%s.npz' % lbidstr, with_idx=True)
if (cfg_param.setdefault('save_spmnr_avg', False)):
spmnr_avg_df.to_excel('spmnr_avg_clf%s.xlsx' % lbidstr)
if (cfg_param.setdefault('save_spmnr_avg_npz', False)):
io.write_df(spmnr_avg_df, 'spmnr_avg_clf%s.npz' % lbidstr, with_idx=True)
if (cfg_param.setdefault('save_spmnr_pval', False)):
spmnr_pval_df.to_excel('spmnr_pval_clf%s.xlsx' % lbidstr)
if (cfg_param.setdefault('save_spmnr_pval_npz', False)):
io.write_df(spmnr_pval_df, 'spmnr_pval_clf%s.npz' % lbidstr, with_idx=True)
# Feature importances
try:
save_featw(xdf.columns.values if type(xdf) != HDF5Matrix else np.arange(xdf.shape[1]), crsval_featw, crsval_subfeatw, cfg_param=cfg_param, lbid=lbid)
except Exception as e:
print(e)
## Plot figures
if (is_mltl and avg == 'all'):
micro_roc_data, micro_roc_labels, micro_roc_aucs, macro_roc_data, macro_roc_labels, macro_roc_aucs = [[] for i in range(6)]
else:
roc_data, roc_labels, roc_aucs = [[] for i in range(3)]
prc_data, prc_labels, prc_aucs = [[] for i in range(3)]
for pl in PL_NAMES:
if (is_mltl and avg == 'all'):
micro_id, macro_id = '-'.join([pl,'micro']), '-'.join([pl,'macro'])
micro_mean_tpr, macro_mean_tpr = crsval_roc[micro_id], crsval_roc[macro_id]
micro_mean_tpr, macro_mean_tpr = micro_mean_tpr / len(kf), macro_mean_tpr / len(kf)
micro_roc_auc = metrics.auc(mean_fpr, micro_mean_tpr)
macro_roc_auc = metrics.auc(mean_fpr, macro_mean_tpr)
micro_roc_data.append([mean_fpr, micro_mean_tpr])
micro_roc_aucs.append(micro_roc_auc)
micro_roc_labels.append('%s (AUC=%0.2f)' % (pl, micro_roc_auc))
macro_roc_data.append([mean_fpr, macro_mean_tpr])
macro_roc_aucs.append(macro_roc_auc)
macro_roc_labels.append('%s (AUC=%0.2f)' % (pl, macro_roc_auc))
else:
mean_tpr = crsval_roc[pl]
mean_tpr /= len(kf)
mean_roc_auc = metrics.auc(mean_fpr, mean_tpr)
roc_data.append([mean_fpr, mean_tpr])
roc_aucs.append(mean_roc_auc)
roc_labels.append('%s (AUC=%0.2f)' % (pl, mean_roc_auc))
mean_prcn = crsval_prc[pl]
mean_prcn /= len(kf)
mean_prc_auc = metrics.auc(mean_recall, mean_prcn)
prc_data.append([mean_recall, mean_prcn])
prc_aucs.append(mean_prc_auc)
prc_labels.append('%s (AUC=%0.2f)' % (pl, mean_prc_auc))
group_dict = {}
for i, pl in enumerate(PL_NAMES):
group_dict.setdefault(tuple(set(difflib.get_close_matches(pl, PL_NAMES))), []).append(i)
if (not cfg_param.setdefault('group_by_name', False) or len(group_dict) == len(PL_NAMES)):
groups = None
else:
group_array = np.array(group_dict.values())
group_array.sort()
groups = group_array.tolist()
if (is_mltl and avg == 'all'):
aucs_df = pd.DataFrame([micro_roc_aucs, macro_roc_aucs, prc_aucs], index=['Micro ROC AUC', 'Macro ROC AUC', 'PRC AUC'], columns=PL_NAMES)
if (cfg_param.setdefault('plot_roc', True)):
plot.plot_roc(micro_roc_data, micro_roc_labels, groups=groups, fname='micro_roc%s'%lbidstr, plot_cfg=common_cfg)
plot.plot_roc(macro_roc_data, macro_roc_labels, groups=groups, fname='macro_roc%s'%lbidstr, plot_cfg=common_cfg)
else:
aucs_df = pd.DataFrame([roc_aucs, prc_aucs], index=['ROC AUC', 'PRC AUC'], columns=PL_NAMES)
if (cfg_param.setdefault('plot_roc', True)):
plot.plot_roc(roc_data, roc_labels, groups=groups, fname='roc%s'%lbidstr, plot_cfg=common_cfg)
if (cfg_param.setdefault('plot_prc', True)):
plot.plot_prc(prc_data, prc_labels, groups=groups, fname='prc%s'%lbidstr, plot_cfg=common_cfg)
if (cfg_param.setdefault('save_auc', False)):
aucs_df.to_excel('auc%s.xlsx' % lbidstr)
filt_num, clf_num = len(FILT_NAMES), len(CLF_NAMES)
if (cfg_param.setdefault('plot_metric', False)):
for mtrc in metric_idx:
mtrc_avg_list, mtrc_std_list = [[] for i in range(2)]
if (global_param['comb']):
mtrc_avg = perf_avg_df.ix[mtrc,:].values.reshape((1,-1))
mtrc_std = perf_std_df.ix[mtrc,:].values.reshape((1,-1))
plot.plot_bar(mtrc_avg, mtrc_std, xlabels=PL_NAMES, labels=None, title='%s by Classifier and Feature Selection' % mtrc, fname='%s_clf_ft%s' % (mtrc.replace(' ', '_').lower(), lbidstr), plot_cfg=common_cfg)
else:
for i in range(filt_num):
offset = i * clf_num
mtrc_avg_list.append(perf_avg_df.ix[mtrc,offset:offset+clf_num].values.reshape((1,-1)))
mtrc_std_list.append(perf_std_df.ix[mtrc,offset:offset+clf_num].values.reshape((1,-1)))
mtrc_avg = np.concatenate(mtrc_avg_list)
mtrc_std = np.concatenate(mtrc_std_list)
plot.plot_bar(mtrc_avg, mtrc_std, xlabels=CLF_NAMES, labels=FILT_NAMES, title='%s by Classifier and Feature Selection' % mtrc, fname='%s_clf_ft%s' % (mtrc.replace(' ', '_').lower(), lbidstr), plot_cfg=common_cfg)
def tune_param(mdl_name, mdl, X, Y, rdtune, params, mltl=False, avg='micro', n_jobs=-1):
if (rdtune):
param_dist, n_iter = [params[k] for k in ['param_dist', 'n_iter']]
grid = RandomizedSearchCV(estimator=mdl, param_distributions=param_dist, n_iter=n_iter, scoring='f1_%s' % avg if mltl else 'f1', n_jobs=n_jobs, error_score=0)
else:
param_grid, cv = [params[k] for k in ['param_grid', 'cv']]
grid = GridSearchCV(estimator=mdl, param_grid=param_grid, scoring='f1_micro' if mltl else 'f1', cv=cv, n_jobs=n_jobs, error_score=0)
grid.fit(X, Y)
print("The best parameters of [%s] are %s, with a score of %0.3f" % (mdl_name, grid.best_params_, grid.best_score_))
# Store all the parameter candidates into a dictionary of list
if (rdtune):
param_grid = {}
for p_option in grid.cv_results_['params']:
for p_name, p_val in p_option.items():
param_grid.setdefault(p_name, []).append(p_val)
else:
param_grid = grid.param_grid
# Index the parameter names and valules
dim_names = dict([(k, i) for i, k in enumerate(param_grid.keys())])
dim_vals = {}
for pn in dim_names.keys():
dim_vals[pn] = dict([(k, i) for i, k in enumerate(param_grid[pn])])
# Create data cube
score_avg_cube = np.ndarray(shape=[len(param_grid[k]) for k in param_grid.keys()], dtype='float')
score_std_cube = np.ndarray(shape=[len(param_grid[k]) for k in param_grid.keys()], dtype='float')
# Calculate the score list
score_avg_list = (np.array(grid.cv_results_['mean_train_score']) + np.array(grid.cv_results_['mean_test_score'])) / 2
score_std_list = (np.array(grid.cv_results_['std_train_score']) + np.array(grid.cv_results_['std_test_score'])) / 2
# Fill in the data cube
for i, p_option in enumerate(grid.cv_results_['params']):
idx = np.zeros((len(dim_names),), dtype='int')
for k, v in p_option.items():
idx[dim_names[k]] = dim_vals[k][v]
score_avg_cube[tuple(idx)] = score_avg_list[i]
score_std_cube[tuple(idx)] = score_std_list[i]
return grid.best_params_, grid.best_score_, score_avg_cube, score_std_cube, dim_names, dim_vals
def tune_param_optunity(mdl_name, mdl, X, Y, perf_func=None, scoring='f1', optfunc='max', solver='particle swarm', params={}, mltl=False, avg='micro', n_jobs=-1):
import optunity
struct, param_space, folds, n_iter = [params.setdefault(k, None) for k in ['struct', 'param_space', 'folds', 'n_iter']]
ext_params = dict.fromkeys(param_space.keys()) if (not struct) else dict.fromkeys(params.setdefault('param_names', []))
kwargs = dict([('num_iter', n_iter), ('num_folds', folds)]) if (type(folds) is int) else dict([('num_iter', n_iter), ('num_folds', folds.get_n_splits()), ('folds', [list(folds.split(X))] * n_iter)])
@optunity.cross_validated(x=X, y=Y, **kwargs)
def default_perf(x_train, y_train, x_test, y_test, **ext_params):
mdl.fit(x_train, y_train)
if (scoring == 'roc'):
preds = get_score(mdl, x_test, mltl)
if (mltl):
from . import metric as imetric
return imetric.mltl_roc(y_test, preds, average=avg)
else:
preds = mdl.predict(x_test)
score_func = getattr(optunity, scoring) if (hasattr(optunity, scoring)) else None
score_func = getattr(metrics, scoring+'_score') if (score_func is None and hasattr(metrics, scoring+'_score')) else score_func
if (score_func is None):
print('Score function %s is not supported!' % scoring)
sys.exit(1)
return score_func(y_test, preds, average=avg)
perf = perf_func if callable(perf_func) else default_perf
if (optfunc == 'max'):
config, info, _ = optunity.maximize(perf, num_evals=n_iter, solver_name=solver, pmap=optunity.parallel.create_pmap(n_jobs), **param_space) if (not struct) else optunity.maximize_structured(perf, search_space=param_space, num_evals=n_iter, pmap=optunity.parallel.create_pmap(n_jobs))
elif (optfunc == 'min'):
config, info, _ = optunity.minimize(perf, num_evals=n_iter, solver_name=solver, pmap=optunity.parallel.create_pmap(n_jobs), **param_space) if (not struct) else optunity.minimize_structured(perf, search_space=param_space, num_evals=n_iter, pmap=optunity.parallel.create_pmap(n_jobs))
print("The best parameters of [%s] are %s, with a score of %0.3f" % (mdl_name, config, info.optimum))
cl_df = optunity.call_log2dataframe(info.call_log)
cl_df.to_csv('call_log.csv')
# Store all the parameter candidates into a dictionary of list
param_grid = dict([(x, sorted(set(cl_df[x]))) for x in cl_df.columns if x != 'value'])
param_names = param_grid.keys()
# Index the parameter names and valules
dim_names = dict([(k, i) for i, k in enumerate(param_names)])
dim_vals = {}
for pn in dim_names.keys():
dim_vals[pn] = dict([(k, i) for i, k in enumerate(param_grid[pn])])
# Create data cube
score_avg_cube = np.ndarray(shape=[len(param_grid[k]) for k in param_names], dtype='float') * np.nan
score_std_cube = np.ndarray(shape=[len(param_grid[k]) for k in param_names], dtype='float') * np.nan
# Calculate the score list
score_avg_list = cl_df['value']
score_std_list = np.zeros_like(cl_df['value'])
# Fill in the data cube
for i, p_option in cl_df[param_names].iterrows():
idx = np.zeros((len(dim_names),), dtype='int')
for k, v in p_option.items():
idx[dim_names[k]] = dim_vals[k][v]
score_avg_cube[tuple(idx)] = score_avg_list[i]
score_std_cube[tuple(idx)] = score_std_list[i]
return config, info.optimum, score_avg_cube, score_std_cube, dim_names, dim_vals
def tune_param_hyperopt(mdl_name, mdl, X, Y, obj_func=None, scoring='f1', solver=None, params={}, mltl=False, avg='micro', n_jobs=-1):
import hyperopt
param_space, trials, folds, max_evals = [params.setdefault(k, v) for k, v in zip(['param_space', 'trials', 'folds', 'n_iter'], [{}, hyperopt.Trials(), 5, 500])]
ext_params = dict.fromkeys(param_space.keys())
num_folds = folds if (type(folds) is int) else folds.get_n_splits()
def default_obj(parameters):
from sklearn.model_selection import cross_validate as cv
cv_results = cv(mdl, X, Y, scoring=scoring, cv=num_folds, return_train_score=False)
return {'loss': 1-cv_results['test_score'].mean(), 'params': parameters, 'status': hyperopt.STATUS_OK}
objective = obj_func if callable(obj_func) else default_obj
best_config = hyperopt.fmin(fn=objective, space=param_space, algo=solver if solver else hyperopt.tpe.suggest, max_evals=max_evals, trials=trials)
best_trials = sorted(trials.results, key=lambda x: x['loss'], reverse=False)
best_score = 1 - best_trials[0]['loss']
print("The best parameters of [%s] are %s, with a score of %0.3f" % (mdl_name, best_config, best_score))
params, losses = zip(*[(x['params'], x['loss']) for x in best_trials])
tune_df = pd.concat([ | pd.DataFrame(params) | pandas.DataFrame |
"""
Tests the usecols functionality during parsing
for all of the parsers defined in parsers.py
"""
from io import StringIO
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas import DataFrame, Index
import pandas._testing as tm
_msg_validate_usecols_arg = (
"'usecols' must either be list-like "
"of all strings, all unicode, all "
"integers or a callable."
)
_msg_validate_usecols_names = (
"Usecols do not match columns, columns expected but not found: {0}"
)
def test_raise_on_mixed_dtype_usecols(all_parsers):
# See gh-12678
data = """a,b,c
1000,2000,3000
4000,5000,6000
"""
usecols = [0, "b", 2]
parser = all_parsers
with pytest.raises(ValueError, match=_msg_validate_usecols_arg):
parser.read_csv(StringIO(data), usecols=usecols)
@pytest.mark.parametrize("usecols", [(1, 2), ("b", "c")])
def test_usecols(all_parsers, usecols):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
result = parser.read_csv(StringIO(data), usecols=usecols)
expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=["b", "c"])
tm.assert_frame_equal(result, expected)
def test_usecols_with_names(all_parsers):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
names = ["foo", "bar"]
result = parser.read_csv(StringIO(data), names=names, usecols=[1, 2], header=0)
expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=names)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"names,usecols", [(["b", "c"], [1, 2]), (["a", "b", "c"], ["b", "c"])]
)
def test_usecols_relative_to_names(all_parsers, names, usecols):
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
result = parser.read_csv(StringIO(data), names=names, header=None, usecols=usecols)
expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=["b", "c"])
tm.assert_frame_equal(result, expected)
def test_usecols_relative_to_names2(all_parsers):
# see gh-5766
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
result = parser.read_csv(
StringIO(data), names=["a", "b"], header=None, usecols=[0, 1]
)
expected = DataFrame([[1, 2], [4, 5], [7, 8], [10, 11]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_usecols_name_length_conflict(all_parsers):
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
msg = "Number of passed names did not match number of header fields in the file"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), names=["a", "b"], header=None, usecols=[1])
def test_usecols_single_string(all_parsers):
# see gh-20558
parser = all_parsers
data = """foo, bar, baz
1000, 2000, 3000
4000, 5000, 6000"""
with pytest.raises(ValueError, match=_msg_validate_usecols_arg):
parser.read_csv(StringIO(data), usecols="foo")
@pytest.mark.parametrize(
"data", ["a,b,c,d\n1,2,3,4\n5,6,7,8", "a,b,c,d\n1,2,3,4,\n5,6,7,8,"]
)
def test_usecols_index_col_false(all_parsers, data):
# see gh-9082
parser = all_parsers
usecols = ["a", "c", "d"]
expected = DataFrame({"a": [1, 5], "c": [3, 7], "d": [4, 8]})
result = parser.read_csv(StringIO(data), usecols=usecols, index_col=False)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", ["b", 0])
@pytest.mark.parametrize("usecols", [["b", "c"], [1, 2]])
def test_usecols_index_col_conflict(all_parsers, usecols, index_col):
# see gh-4201: test that index_col as integer reflects usecols
parser = all_parsers
data = "a,b,c,d\nA,a,1,one\nB,b,2,two"
expected = DataFrame({"c": [1, 2]}, index=Index(["a", "b"], name="b"))
result = parser.read_csv(StringIO(data), usecols=usecols, index_col=index_col)
tm.assert_frame_equal(result, expected)
def test_usecols_index_col_conflict2(all_parsers):
# see gh-4201: test that index_col as integer reflects usecols
parser = all_parsers
data = "a,b,c,d\nA,a,1,one\nB,b,2,two"
expected = DataFrame({"b": ["a", "b"], "c": [1, 2], "d": ("one", "two")})
expected = expected.set_index(["b", "c"])
result = parser.read_csv(
StringIO(data), usecols=["b", "c", "d"], index_col=["b", "c"]
)
tm.assert_frame_equal(result, expected)
def test_usecols_implicit_index_col(all_parsers):
# see gh-2654
parser = all_parsers
data = "a,b,c\n4,apple,bat,5.7\n8,orange,cow,10"
result = parser.read_csv(StringIO(data), usecols=["a", "b"])
expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_regex_sep(all_parsers):
# see gh-2733
parser = all_parsers
data = "a b c\n4 apple bat 5.7\n8 orange cow 10"
result = parser.read_csv(StringIO(data), sep=r"\s+", usecols=("a", "b"))
expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_with_whitespace(all_parsers):
parser = all_parsers
data = "a b c\n4 apple bat 5.7\n8 orange cow 10"
result = parser.read_csv(StringIO(data), delim_whitespace=True, usecols=("a", "b"))
expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"usecols,expected",
[
# Column selection by index.
([0, 1], DataFrame(data=[[1000, 2000], [4000, 5000]], columns=["2", "0"])),
# Column selection by name.
(["0", "1"], DataFrame(data=[[2000, 3000], [5000, 6000]], columns=["0", "1"])),
],
)
def test_usecols_with_integer_like_header(all_parsers, usecols, expected):
parser = all_parsers
data = """2,0,1
1000,2000,3000
4000,5000,6000"""
result = parser.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("usecols", [[0, 2, 3], [3, 0, 2]])
def test_usecols_with_parse_dates(all_parsers, usecols):
# see gh-9755
data = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parser = all_parsers
parse_dates = [[1, 2]]
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
result = parser.read_csv(StringIO(data), usecols=usecols, parse_dates=parse_dates)
tm.assert_frame_equal(result, expected)
def test_usecols_with_parse_dates2(all_parsers):
# see gh-13604
parser = all_parsers
data = """2008-02-07 09:40,1032.43
2008-02-07 09:50,1042.54
2008-02-07 10:00,1051.65"""
names = ["date", "values"]
usecols = names[:]
parse_dates = [0]
index = Index(
[
Timestamp("2008-02-07 09:40"),
Timestamp("2008-02-07 09:50"),
Timestamp("2008-02-07 10:00"),
],
name="date",
)
cols = {"values": [1032.43, 1042.54, 1051.65]}
expected = DataFrame(cols, index=index)
result = parser.read_csv(
StringIO(data),
parse_dates=parse_dates,
index_col=0,
usecols=usecols,
header=None,
names=names,
)
tm.assert_frame_equal(result, expected)
def test_usecols_with_parse_dates3(all_parsers):
# see gh-14792
parser = all_parsers
data = """a,b,c,d,e,f,g,h,i,j
2016/09/21,1,1,2,3,4,5,6,7,8"""
usecols = list("abcdefghij")
parse_dates = [0]
cols = {
"a": Timestamp("2016-09-21"),
"b": [1],
"c": [1],
"d": [2],
"e": [3],
"f": [4],
"g": [5],
"h": [6],
"i": [7],
"j": [8],
}
expected = DataFrame(cols, columns=usecols)
result = parser.read_csv(StringIO(data), usecols=usecols, parse_dates=parse_dates)
tm.assert_frame_equal(result, expected)
def test_usecols_with_parse_dates4(all_parsers):
data = "a,b,c,d,e,f,g,h,i,j\n2016/09/21,1,1,2,3,4,5,6,7,8"
usecols = list("abcdefghij")
parse_dates = [[0, 1]]
parser = all_parsers
cols = {
"a_b": "2016/09/21 1",
"c": [1],
"d": [2],
"e": [3],
"f": [4],
"g": [5],
"h": [6],
"i": [7],
"j": [8],
}
expected = DataFrame(cols, columns=["a_b"] + list("cdefghij"))
result = parser.read_csv(StringIO(data), usecols=usecols, parse_dates=parse_dates)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("usecols", [[0, 2, 3], [3, 0, 2]])
@pytest.mark.parametrize(
"names",
[
list("abcde"), # Names span all columns in original data.
list("acd"), # Names span only the selected columns.
],
)
def test_usecols_with_parse_dates_and_names(all_parsers, usecols, names):
# see gh-9755
s = """0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
parser = all_parsers
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
result = parser.read_csv(
StringIO(s), names=names, parse_dates=parse_dates, usecols=usecols
)
tm.assert_frame_equal(result, expected)
def test_usecols_with_unicode_strings(all_parsers):
# see gh-13219
data = """AAA,BBB,CCC,DDD
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a"""
parser = all_parsers
exp_data = {
"AAA": {0: 0.056674972999999997, 1: 2.6132309819999997, 2: 3.5689350380000002},
"BBB": {0: 8, 1: 2, 2: 7},
}
expected = | DataFrame(exp_data) | pandas.DataFrame |
import pandas as pd
import numpy as np
from torch.utils.data import Dataset
from .electricity_utils import *
from .utils import *
class ElectricityDataset(Dataset):
@staticmethod
def get_split(data_folder, num_encoder_steps=7 * 24):
data_path = os.path.join(data_folder, 'LD2011_2014.txt')
if not os.path.isfile(data_path):
ElectricityDataset.download(data_path)
formatter = ElectricityFormatter()
train, val, test = formatter.split_data(
ElectricityDataset.aggregating_to_hourly_data(
pd.read_csv(data_path, index_col=0, sep=';', decimal=',')
)
)
return [ElectricityDataset(data, formatter, num_encoder_steps) for data in [train, val, test]]
def __init__(self, data, formatter, num_encoder_steps):
super().__init__()
self.formatter = formatter
self.num_encoder_steps = num_encoder_steps
self.data = data.reset_index(drop=True)
self.data_index, self.col_mappings = self.build_data_index(self.data)
def __len__(self):
return self.data_index.shape[0]
def __getitem__(self, idx):
_data_index = self.data.iloc[self.data_index.init_abs.iloc[idx]:self.data_index.end_abs.iloc[idx]]
data_map = {}
for k in self.col_mappings:
cols = self.col_mappings[k]
if k not in data_map:
data_map[k] = [_data_index[cols].values]
else:
data_map[k].append(_data_index[cols].values)
for k in data_map:
data_map[k] = np.concatenate(data_map[k], axis=0)
scaler = self.formatter._target_scaler[data_map["identifier"][0][0]]
outputs = data_map['outputs'][self.num_encoder_steps:, 0]
return data_map['inputs'], outputs, scaler.mean_, scaler.scale_
def build_data_index(self, data):
column_definition = self.formatter._column_definition
col_mappings = {
'identifier': [get_single_col_by_input_type(InputTypes.ID, column_definition)],
'time': [get_single_col_by_input_type(InputTypes.TIME, column_definition)],
'outputs': [get_single_col_by_input_type(InputTypes.TARGET, column_definition)],
'inputs': [tup[0] for tup in column_definition if tup[2] not in {InputTypes.ID, InputTypes.TIME}]
}
lookback = self.formatter.get_time_steps()
data_index = self.get_index_filtering(data, col_mappings["identifier"], col_mappings["outputs"], lookback)
group_size = data.groupby(col_mappings["identifier"]).apply(lambda x: x.shape[0]).mean()
data_index = data_index[data_index.end_rel < group_size].reset_index()
return data_index, col_mappings
def get_index_filtering(self, data, id_col, target_col, lookback):
g = data.groupby(id_col)
df_index_abs = g[target_col].transform(lambda x: x.index+lookback) \
.reset_index() \
.rename(columns={'index': 'init_abs', target_col[0]: 'end_abs'})
df_index_rel_init = g[target_col].transform(lambda x: x.reset_index(drop=True).index) \
.rename(columns={target_col[0]: 'init_rel'})
df_index_rel_end = g[target_col].transform(lambda x: x.reset_index(drop=True).index+lookback) \
.rename(columns={target_col[0]: 'end_rel'})
df_total_count = g[target_col].transform(lambda x: x.shape[0] - lookback + 1) \
.rename(columns = {target_col[0]: 'group_count'})
return pd.concat([df_index_abs,
df_index_rel_init,
df_index_rel_end,
data[id_col],
df_total_count], axis = 1).reset_index(drop = True)
@staticmethod
def aggregating_to_hourly_data(df):
df.index = pd.to_datetime(df.index)
df.sort_index(inplace=True)
# Used to determine the start and end dates of a series
output = df.resample('1h').mean().replace(0., np.nan)
earliest_time = output.index.min()
df_list = []
for label in output:
print('Processing {}'.format(label))
srs = output[label]
start_date = min(srs.fillna(method='ffill').dropna().index)
end_date = max(srs.fillna(method='bfill').dropna().index)
active_range = (srs.index >= start_date) & (srs.index <= end_date)
srs = srs[active_range].fillna(0.)
tmp = | pd.DataFrame({'power_usage': srs}) | pandas.DataFrame |
from kernels import *
from classifiers import *
import time, os
import pandas as pd
import numpy as np
def make_dataset_uniform_start(save_dir="../datasets"):
Xtr0 = pd.read_csv(os.path.join(save_dir, "Xtr0.csv"))
Xtr1 = pd.read_csv(os.path.join(save_dir, "Xtr1.csv"))
Xtr2 = pd.read_csv(os.path.join(save_dir, "Xtr2.csv"))
Ytr0 = pd.read_csv(os.path.join(save_dir, "Ytr0.csv"))
Ytr1 = pd.read_csv(os.path.join(save_dir, "Ytr1.csv"))
Ytr2 = pd.read_csv(os.path.join(save_dir, "Ytr2.csv"))
Xte0 = pd.read_csv(os.path.join(save_dir, "Xte0.csv"))
Xte1 = pd.read_csv(os.path.join(save_dir, "Xte1.csv"))
Xte2 = pd.read_csv(os.path.join(save_dir, "Xte2.csv"))
Xtr = | pd.concat([Xtr0, Xtr1, Xtr2]) | pandas.concat |
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
import sys
import glob
import pandas as pd
import numpy as np
from sklearn import decomposition
import deprecated
import logging
sys.path.append(root_path)
from config.globalLog import logger
def generate_monoscale_samples(source_file, save_path, lags_dict, column, test_len, lead_time=1,regen=False):
"""Generate learning samples for autoregression problem using original time series.
Args:
'source_file' -- ['String'] The source data file path.
'save_path' --['String'] The path to restore the training, development and testing samples.
'lags_dict' -- ['int dict'] The lagged time for original time series.
'column' -- ['String']The column's name for read the source data by pandas.
'test_len' --['int'] The length of development and testing set.
'lead_time' --['int'] The lead time.
"""
logger.info('Generating muliti-step decomposition-ensemble hindcasting samples')
save_path = save_path+'/'+str(lead_time)+'_ahead_pacf/'
logger.info('Source file:{}'.format(source_file))
logger.info('Save path:{}'.format(save_path))
if not os.path.exists(save_path):
os.makedirs(save_path)
if len(os.listdir(save_path))>0 and not regen:
logger.info('Learning samples have been generated!')
else:
# Load data from local dick
if '.xlsx' in source_file:
dataframe = pd.read_excel(source_file)[column]
elif '.csv' in source_file:
dataframe = pd.read_csv(source_file)[column]
# convert pandas dataframe to numpy array
nparr = np.array(dataframe)
# Create an empty pandas Dataframe
full_samples = pd.DataFrame()
# Generate input series based on lag and add these series to full dataset
lag = lags_dict['ORIG']
for i in range(lag):
x = pd.DataFrame(nparr[i:dataframe.shape[0] -
(lag - i)], columns=['X' + str(i + 1)])
x = x.reset_index(drop=True)
full_samples = pd.concat([full_samples, x], axis=1, sort=False)
# Generate label data
label = pd.DataFrame(nparr[lag+lead_time-1:], columns=['Y'])
label = label.reset_index(drop=True)
full_samples = full_samples[:full_samples.shape[0]-(lead_time-1)]
full_samples = full_samples.reset_index(drop=True)
# Add labled data to full_data_set
full_samples = pd.concat([full_samples, label], axis=1, sort=False)
# Get the length of this series
series_len = full_samples.shape[0]
# Get the training and developing set
train_dev_samples = full_samples[0:(series_len - test_len)]
# Get the testing set.
test_samples = full_samples[(series_len - test_len):series_len]
# train_dev_len = train_dev_samples.shape[0]
train_samples = full_samples[0:(series_len - test_len - test_len)]
dev_samples = full_samples[(
series_len - test_len - test_len):(series_len - test_len)]
assert (train_samples.shape[0] + dev_samples.shape[0] +
test_samples.shape[0]) == series_len
# Get the max and min value of each series
series_max = train_samples.max(axis=0)
series_min = train_samples.min(axis=0)
# Normalize each series to the range between -1 and 1
train_samples = 2 * (train_samples - series_min) / \
(series_max - series_min) - 1
dev_samples = 2 * (dev_samples - series_min) / \
(series_max - series_min) - 1
test_samples = 2 * (test_samples - series_min) / \
(series_max - series_min) - 1
logger.info('Series length:{}'.format(series_len))
logger.info('Series length:{}'.format(series_len))
logger.info(
'Training-development sample size:{}'.format(train_dev_samples.shape[0]))
logger.info('Training sample size:{}'.format(train_samples.shape[0]))
logger.info('Development sample size:{}'.format(dev_samples.shape[0]))
logger.info('Testing sample size:{}'.format(test_samples.shape[0]))
series_max = pd.DataFrame(series_max, columns=['series_max'])
series_min = pd.DataFrame(series_min, columns=['series_min'])
normalize_indicators = pd.concat([series_max, series_min], axis=1)
normalize_indicators.to_csv(save_path+'norm_unsample_id.csv')
train_samples.to_csv(save_path+'minmax_unsample_train.csv', index=None)
dev_samples.to_csv(save_path+'minmax_unsample_dev.csv', index=None)
test_samples.to_csv(save_path+'minmax_unsample_test.csv', index=None)
def gen_one_step_hindcast_samples(station, decomposer, lags_dict, input_columns, output_column, test_len,
wavelet_level="db10-2", lead_time=1,regen=False):
"""
Generate one step hindcast decomposition-ensemble learning samples.
Args:
'station'-- ['string'] The station where the original time series come from.
'decomposer'-- ['string'] The decompositin algorithm used for decomposing the original time series.
'lags_dict'-- ['int dict'] The lagged time for each subsignal.
'input_columns'-- ['string list'] The input columns' name used for generating the learning samples.
'output_columns'-- ['string'] The output column's name used for generating the learning samples.
'test_len'-- ['int'] The size of development and testing samples ().
"""
logger.info('Generating one-step decomposition ensemble hindcasting samples')
logger.info('Station:{}'.format(station))
logger.info('Decomposer:{}'.format(decomposer))
logger.info('Lags_dict:{}'.format(lags_dict))
logger.info('Input columns:{}'.format(input_columns))
logger.info('Output column:{}'.format(output_column))
logger.info('Testing sample length:{}'.format(test_len))
logger.info(
'Mother wavelet and decomposition level:{}'.format(wavelet_level))
logger.info('Lead time:{}'.format(lead_time))
# Load data from local dick
if decomposer == "dwt" or decomposer == 'modwt':
data_path = root_path+"/"+station+"_"+decomposer+"/data/"+wavelet_level+"/"
else:
data_path = root_path+"/"+station+"_"+decomposer+"/data/"
save_path = data_path+"one_step_"+str(lead_time)+"_ahead_hindcast_pacf/"
if not os.path.exists(save_path):
os.makedirs(save_path)
if len(os.listdir(save_path))>0 and not regen:
logger.info('Learning samples have been generated!')
else:
decompose_file = data_path+decomposer.upper()+"_FULL.csv"
decompositions = pd.read_csv(decompose_file)
# Drop NaN
decompositions.dropna()
# Get the input data (the decompositions)
input_data = decompositions[input_columns]
# Get the output data (the original time series)
output_data = decompositions[output_column]
# Get the number of input features
subsignals_num = input_data.shape[1]
# Get the data size
data_size = input_data.shape[0]
# Compute the samples size
max_lag = max(lags_dict.values())
samples_size = data_size-max_lag
# Generate feature columns
samples_cols = []
for i in range(sum(lags_dict.values())):
samples_cols.append('X'+str(i+1))
samples_cols.append('Y')
# Generate input colmuns for each subsignal
full_samples = pd.DataFrame()
for i in range(subsignals_num):
# Get one subsignal
one_in = (input_data[input_columns[i]]).values
oness = pd.DataFrame()
lag = lags_dict[input_columns[i]]
for j in range(lag):
x = pd.DataFrame(one_in[j:data_size-(lag-j)],
columns=['X' + str(j + 1)])
x = x.reset_index(drop=True)
oness = pd.concat([oness, x], axis=1, sort=False)
# make all sample size of each subsignal identical
oness = oness.iloc[oness.shape[0]-samples_size:]
oness = oness.reset_index(drop=True)
full_samples = pd.concat([full_samples, oness], axis=1, sort=False)
# Get the target
target = (output_data.values)[max_lag+lead_time-1:]
target = pd.DataFrame(target, columns=['Y'])
full_samples = full_samples[:full_samples.shape[0]-(lead_time-1)]
full_samples = full_samples.reset_index(drop=True)
# Concat the features and target
full_samples = pd.concat([full_samples, target], axis=1, sort=False)
full_samples = pd.DataFrame(full_samples.values, columns=samples_cols)
full_samples.to_csv(save_path+'full_samples.csv')
assert samples_size == full_samples.shape[0]
# Get the training and developing set
train_dev_samples = full_samples[0:(samples_size - test_len)]
# Get the testing set.
test_samples = full_samples[(samples_size - test_len):samples_size]
# train_dev_len = train_dev_samples.shape[0]
train_samples = full_samples[0:(samples_size - test_len - test_len)]
dev_samples = full_samples[(
samples_size - test_len - test_len):(samples_size - test_len)]
assert (train_samples['X1'].size + dev_samples['X1'].size +
test_samples['X1'].size) == samples_size
# Get the max and min value of training set
series_max = train_samples.max(axis=0)
series_min = train_samples.min(axis=0)
# Normalize each series to the range between -1 and 1
train_samples = 2 * (train_samples - series_min) / \
(series_max - series_min) - 1
dev_samples = 2 * (dev_samples - series_min) / \
(series_max - series_min) - 1
test_samples = 2 * (test_samples - series_min) / \
(series_max - series_min) - 1
logger.info('Save path:{}'.format(save_path))
logger.info('Series length:{}'.format(samples_size))
logger.info('Training and development sample size:{}'.format(
train_dev_samples.shape[0]))
logger.info('Training sample size:{}'.format(train_samples.shape[0]))
logger.info('Development sample size:{}'.format(dev_samples.shape[0]))
logger.info('Testing sample size:{}'.format(test_samples.shape[0]))
series_max = pd.DataFrame(series_max, columns=['series_max'])
series_min = pd.DataFrame(series_min, columns=['series_min'])
normalize_indicators = pd.concat([series_max, series_min], axis=1)
normalize_indicators.to_csv(save_path+'norm_unsample_id.csv')
train_samples.to_csv(save_path + 'minmax_unsample_train.csv', index=None)
dev_samples.to_csv(save_path + 'minmax_unsample_dev.csv', index=None)
test_samples.to_csv(save_path+'minmax_unsample_test.csv', index=None)
def gen_one_step_forecast_samples_triandev_test(station, decomposer, lags_dict, input_columns, output_column, start, stop, test_len,
wavelet_level="db10-2", lead_time=1,regen=False):
"""
Generate one step forecast decomposition-ensemble samples.
Args:
'station'-- ['string'] The station where the original time series come from.
'decomposer'-- ['string'] The decompositin algorithm used for decomposing the original time series.
'lags_dict'-- ['int dict'] The lagged time for subsignals.
'input_columns'-- ['string lsit'] the input columns' name for read the source data by pandas.
'output_columns'-- ['string'] the output column's name for read the source data by pandas.
'start'-- ['int'] The start index of appended decomposition file.
'stop'-- ['int'] The stop index of appended decomposotion file.
'test_len'-- ['int'] The size of development and testing samples.
"""
logger.info(
'Generateing one-step decomposition ensemble forecasting samples (traindev-test pattern)')
logger.info('Station:{}'.format(station))
logger.info('Decomposer:{}'.format(decomposer))
logger.info('Lags_dict:{}'.format(lags_dict))
logger.info('Input columns:{}'.format(input_columns))
logger.info('Output column:{}'.format(output_column))
logger.info('Validation start index:{}'.format(start))
logger.info('Validation stop index:{}'.format(stop))
logger.info('Testing sample length:{}'.format(test_len))
logger.info(
'Mother wavelet and decomposition level:{}'.format(wavelet_level))
logger.info('Lead time:{}'.format(lead_time))
# Load data from local dick
if decomposer == "dwt" or decomposer == 'modwt':
data_path = root_path+"/"+station+"_"+decomposer+"/data/"+wavelet_level+"/"
else:
data_path = root_path+"/"+station+"_"+decomposer+"/data/"
save_path = data_path+"one_step_" + \
str(lead_time)+"_ahead_forecast_pacf_traindev_test/"
if not os.path.exists(save_path):
os.makedirs(save_path)
if len(os.listdir(save_path))>0 and not regen:
logger.info('Learning samples have been generated!')
else:
# !!!!!!Generate training samples
traindev_decompose_file = data_path+decomposer.upper()+"_TRAINDEV.csv"
traindev_decompositions = pd.read_csv(traindev_decompose_file)
# Drop NaN
traindev_decompositions.dropna()
# Get the input data (the decompositions)
traindev_input_data = traindev_decompositions[input_columns]
# Get the output data (the original time series)
traindev_output_data = traindev_decompositions[output_column]
# Get the number of input features
subsignals_num = traindev_input_data.shape[1]
# Get the data size
traindev_data_size = traindev_input_data.shape[0]
# Compute the samples size
max_lag = max(lags_dict.values())
traindev_samples_size = traindev_data_size-max_lag
# Generate feature columns
samples_cols = []
for i in range(sum(lags_dict.values())):
samples_cols.append('X'+str(i+1))
samples_cols.append('Y')
# Generate input colmuns for each input feature
train_dev_samples = pd.DataFrame()
for i in range(subsignals_num):
# Get one input feature
one_in = (traindev_input_data[input_columns[i]]).values # subsignal
lag = lags_dict[input_columns[i]]
oness = pd.DataFrame() # restor input features
for j in range(lag):
x = pd.DataFrame(one_in[j:traindev_data_size-(lag-j)],
columns=['X' + str(j + 1)])['X' + str(j + 1)]
x = x.reset_index(drop=True)
oness = pd.DataFrame(pd.concat([oness, x], axis=1))
oness = oness.iloc[oness.shape[0]-traindev_samples_size:]
oness = oness.reset_index(drop=True)
train_dev_samples = pd.DataFrame(
pd.concat([train_dev_samples, oness], axis=1))
# Get the target
target = (traindev_output_data.values)[max_lag+lead_time-1:]
target = | pd.DataFrame(target, columns=['Y']) | pandas.DataFrame |
import plotly.graph_objects as go
import plotly.io as pio
import numpy as np
import pandas as pd
import math
from static_data import K_value_ranges,condition_number_ranges,generalized_condition_number_ranges,ARR_ranges, on_plot_shown_label,fig_size,color_schemes,themes
from preprocess_util import *
def define_theme():
# naming a layout theme for future reference
pio.templates["encode"] = go.layout.Template(
layout_colorway=color_schemes,
data_scatter=[dict(line=dict(width=5))]
)
pio.templates["large"] = go.layout.Template(
layout_font=dict(family="Helvetica", size=16),
layout_title_font = dict(family="Helvetica", size=19),
)
pio.templates["ultralarge"] = go.layout.Template(
layout_font=dict(family="Helvetica", size=16),
layout_title_font = dict(family="Helvetica", size=19),
)
pio.templates["medium"] = go.layout.Template(
layout_font=dict(family="Helvetica", size=16),
layout_title_font = dict(family="Helvetica", size=19),
)
# pio.templates.default = "encode"
pio.templates.default = "presentation+encode"
def define_write_to_file_theme():
# naming a layout theme for future reference
pio.templates["encode"] = go.layout.Template(
layout_colorway=color_schemes,
layout_font=dict(family="Arial Black", size=22),
layout_title_font = dict(family="Arial Black", size=27),
data_scatter=[dict(line=dict(width=5))]
)
pio.templates["large"] = go.layout.Template(
layout_font=dict(family="Arial Black", size=35),
layout_title_font = dict(family="Arial Black", size=40),
)
pio.templates["ultralarge"] = go.layout.Template(
layout_font=dict(family="Arial Black", size=35),
layout_title_font = dict(family="Arial Black", size=40),
)
pio.templates["medium"] = go.layout.Template(
layout_font=dict(family="Arial Black", size=25),
layout_title_font = dict(family="Arial Black", size=30),
)
# pio.templates.default = "encode"
pio.templates.default = "presentation+encode"
def filter_by_scale(scale, plot_df):
if scale == 'k>=1':
plot_df = plot_df[plot_df['K_value'] >= 1]
elif scale == 'k<1':
plot_df = plot_df[plot_df['K_value'] < 1]
elif scale == 'All':
plot_df = plot_df
return plot_df
def get_k_val_dist(plot_df,groupby):
if (plot_df[groupby].median()>np.median(condition_number_ranges)):
ranges = condition_number_ranges
elif (plot_df[groupby].median()>np.median(generalized_condition_number_ranges)):
ranges = generalized_condition_number_ranges
else:
ranges = K_value_ranges
plot_df.loc[:, 'group_range'] = pd.cut(
plot_df[groupby], ranges).astype(str)
plot_df.loc[plot_df[groupby] > ranges[-1],
'group_range'] = '>{}'.format(ranges[-1])
plot_df.loc[plot_df[groupby] == ranges[0],
'group_range'] = ' {}'.format(ranges[0])
plot_df.loc[plot_df[groupby] < ranges[0],
'group_range'] = '<{}'.format(ranges[0])
def custom_sort(col):
vals = []
for val in col.tolist():
if ',' in val:
vals.append(float(val.split(',')[1][1:-1]))
else:
vals.append(float(val[1:]))
return pd.Series(vals)
return plot_df, custom_sort
def get_group_range(plot_df, groupby):
if groupby == 'K_value':
# ranges = K_value_ranges
# plot_df.loc[:, 'group_range'] = pd.cut(
# plot_df[groupby], ranges).astype(str)
# plot_df.loc[plot_df[groupby] > ranges[-1],
# 'group_range'] = '>{}'.format(ranges[-1])
# plot_df.loc[plot_df[groupby] == ranges[0],
# 'group_range'] = ' {}'.format(ranges[0])
# plot_df.loc[plot_df[groupby] < ranges[0],
# 'group_range'] = '<{}'.format(ranges[0])
# qcutted = pd.qcut(plot_df[plot_df[groupby]<1][groupby], 9,duplicates='drop')
# categories = qcutted.cat.categories
# qcutted_str = qcutted.astype(str)
# qcutted_str[qcutted_str == str(categories[0])] = '(0, {}]'.format(categories[0].right)
# qcutted_str[qcutted_str == str(categories[-1])] = '({}, 1)'.format(categories[-1].left)
# plot_df.loc[plot_df[groupby]<1, 'group_range'] = qcutted_str
# plot_df.loc[plot_df[groupby]>=1, 'group_range'] = '>= 1'
qcutted = pd.qcut(plot_df[groupby], 9,duplicates='drop')
categories = qcutted.cat.categories
qcutted_str = qcutted.astype(str)
# qcutted_str[qcutted_str == str(categories[0])] = '(1, {}]'.format(categories[0].right)
plot_df['group_range'] = qcutted_str
plot_df = plot_df[plot_df['group_range']!='nan']
# plot_df.loc[plot_df[groupby]>=1, 'group_range'] = '>= 1'
def custom_sort(col):
vals = []
for val in col.tolist():
if ',' in val:
vals.append(float(val.split(',')[1][1:-1]))
else:
# vals.append(float(val[2:]))
vals.append(float('inf'))
return pd.Series(vals)
return plot_df,custom_sort
elif groupby == 'arr':
def custom_sort(col, ranges):
vals = []
for val in col.tolist():
if val == '<={:.0%}'.format(ranges[1]):
vals.append('0')
else:
vals.append(val)
return pd.Series(vals)
ranges = ARR_ranges
plot_df.loc[:, 'group_range'] = pd.cut(plot_df[groupby], ranges).apply(
lambda x: '{:.0%}-{:.0%}'.format(x.left, x.right) if x is not None else 'nan').astype(str)
plot_df.loc[plot_df[groupby] > ranges[-1],
'group_range'] = '>{:.0%}'.format(ranges[-1])
plot_df.loc[plot_df[groupby] <= ranges[1],
'group_range'] = '<={:.0%}'.format(ranges[1])
plot_df = plot_df.dropna()
return plot_df,custom_sort
elif groupby in ['ave_true_abund','log2_true_abund']:
def custom_sort(col):
vals = []
for val in col.tolist():
if ',' in val:
vals.append(float(val.split(',')[1][1:-1]))
else:
# vals.append(float(val[1:]))
vals.append(float('inf'))
return | pd.Series(vals) | pandas.Series |
import pytest
from datetime import datetime, timedelta
import pytz
import numpy as np
from pandas import (NaT, Index, Timestamp, Timedelta, Period,
DatetimeIndex, PeriodIndex,
TimedeltaIndex, Series, isna)
from pandas.util import testing as tm
from pandas._libs.tslib import iNaT
@pytest.mark.parametrize('nat, idx', [(Timestamp('NaT'), DatetimeIndex),
(Timedelta('NaT'), TimedeltaIndex),
(Period('NaT', freq='M'), PeriodIndex)])
def test_nat_fields(nat, idx):
for field in idx._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == 'weekday':
continue
result = getattr(NaT, field)
assert np.isnan(result)
result = getattr(nat, field)
assert np.isnan(result)
for field in idx._bool_ops:
result = getattr(NaT, field)
assert result is False
result = getattr(nat, field)
assert result is False
def test_nat_vector_field_access():
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
for field in DatetimeIndex._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == 'weekday':
continue
result = getattr(idx, field)
expected = Index([getattr(x, field) for x in idx])
tm.assert_index_equal(result, expected)
s = Series(idx)
for field in DatetimeIndex._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == 'weekday':
continue
result = getattr(s.dt, field)
expected = [getattr(x, field) for x in idx]
tm.assert_series_equal(result, Series(expected))
for field in DatetimeIndex._bool_ops:
result = getattr(s.dt, field)
expected = [getattr(x, field) for x in idx]
tm.assert_series_equal(result, Series(expected))
@pytest.mark.parametrize('klass', [Timestamp, Timedelta, Period])
def test_identity(klass):
assert klass(None) is NaT
result = klass(np.nan)
assert result is NaT
result = klass(None)
assert result is NaT
result = klass(iNaT)
assert result is NaT
result = klass(np.nan)
assert result is NaT
result = klass(float('nan'))
assert result is NaT
result = klass(NaT)
assert result is NaT
result = klass('NaT')
assert result is NaT
assert isna(klass('nat'))
@pytest.mark.parametrize('klass', [Timestamp, Timedelta, Period])
def test_equality(klass):
# nat
if klass is not Period:
klass('').value == iNaT
klass('nat').value == iNaT
klass('NAT').value == iNaT
klass(None).value == iNaT
klass(np.nan).value == iNaT
assert isna(klass('nat'))
@pytest.mark.parametrize('klass', [Timestamp, Timedelta])
def test_round_nat(klass):
# GH14940
ts = klass('nat')
for method in ["round", "floor", "ceil"]:
round_method = getattr(ts, method)
for freq in ["s", "5s", "min", "5min", "h", "5h"]:
assert round_method(freq) is ts
def test_NaT_methods():
# GH 9513
raise_methods = ['astimezone', 'combine', 'ctime', 'dst',
'fromordinal', 'fromtimestamp', 'isocalendar',
'strftime', 'strptime', 'time', 'timestamp',
'timetuple', 'timetz', 'toordinal', 'tzname',
'utcfromtimestamp', 'utcnow', 'utcoffset',
'utctimetuple']
nat_methods = ['date', 'now', 'replace', 'to_datetime', 'today',
'tz_convert', 'tz_localize']
nan_methods = ['weekday', 'isoweekday']
for method in raise_methods:
if hasattr(NaT, method):
with pytest.raises(ValueError):
getattr(NaT, method)()
for method in nan_methods:
if hasattr(NaT, method):
assert np.isnan(getattr(NaT, method)())
for method in nat_methods:
if hasattr(NaT, method):
# see gh-8254
exp_warning = None
if method == 'to_datetime':
exp_warning = FutureWarning
with tm.assert_produces_warning(
exp_warning, check_stacklevel=False):
assert getattr(NaT, method)() is NaT
# GH 12300
assert | NaT.isoformat() | pandas.NaT.isoformat |
#!/usr/bin/env python
# coding: utf-8
# # GenCode Explore
#
# Explore the human RNA sequences from GenCode.
#
# Assume user downloaded files from GenCode 38 [FTP](http://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_38/)
# to a subdirectory called data.
#
# Exclude mitochondrial genes because many have non-standard start and stop codons.
# In[1]:
import time
def show_time():
t = time.time()
s = time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(t))
print(s)
show_time()
# In[2]:
import numpy as np
import pandas as pd
import gzip
import sys
try:
from google.colab import drive
IN_COLAB = True
print("On Google CoLab, mount cloud-local file, get our code from GitHub.")
PATH='/content/drive/'
#drive.mount(PATH,force_remount=True) # hardly ever need this
drive.mount(PATH) # Google will require login credentials
DATAPATH=PATH+'My Drive/data/' # must end in "/"
import requests
s = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_describe.py')
with open('RNA_describe.py', 'w') as f:
f.write(s.text) # writes to cloud local, delete the file later?
s = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_special.py')
with open('RNA_special.py', 'w') as f:
f.write(s.text) # writes to cloud local, delete the file later?
from RNA_describe import *
from RNA_special import *
except:
print("CoLab not working. On my PC, use relative paths.")
IN_COLAB = False
DATAPATH='../data/' # must end in "/"
sys.path.append("..") # append parent dir in order to use sibling dirs
from SimTools.RNA_describe import *
from SimTools.RNA_special import *
MODELPATH="BestModel" # saved on cloud instance and lost after logout
#MODELPATH=DATAPATH+MODELPATH # saved on Google Drive but requires login
if not assert_imported_RNA_describe():
print("ERROR: Cannot use RNA_describe.")
# In[3]:
PC_FILENAME='gencode.v38.pc_transcripts.fa.gz'
NC_FILENAME='gencode.v38.lncRNA_transcripts.fa.gz'
TEST_FILENAME='test.fa.gz'
# In[4]:
def load_gencode(filename,label):
DEFLINE='>' # start of line with ids in a FASTA FILE
DELIM='|' # character between ids
VERSION='.' # character between id and version
EMPTY='' # use this to avoid saving "previous" sequence in first iteration
labels=[] # usually 1 for protein-coding or 0 for non-coding
seqs=[] # usually strings of ACGT
lens=[] # sequence length
ids=[] # GenCode transcript ID, always starts ENST, excludes version
one_seq = EMPTY
one_id = None
special = RNA_Special_Cases()
special.mitochondria()
# Use gzip 'r' mode to open file in read-only mode.
# Use gzip 't' mode to read each line of text as type string.
with gzip.open (filename,'rt') as infile:
for line in infile:
if line[0]==DEFLINE:
# Save the previous sequence (if previous exists).
if not one_seq == EMPTY and not special.is_special(one_id):
labels.append(label)
seqs.append(one_seq)
lens.append(len(one_seq))
ids.append(one_id)
# Get ready to read the next sequence.
# Parse a GenCode defline that is formatted like this:
# >ENST0001234.5|gene_ID|other_fields other_info|other_info
# Use the following if ever GenCode includes an ID with no version
# one_id = line[1:].split(DELIM)[0].split(VERSION)[0]
one_id = line[1:].split(VERSION)[0]
one_seq = EMPTY
else:
# Continue loading sequence lines till next defline.
additional = line.rstrip()
one_seq = one_seq + additional
# Don't forget to save the last sequence after end-of-file.
if not one_seq == EMPTY and not special.is_special(one_id):
labels.append(label)
seqs.append(one_seq)
lens.append(len(one_seq))
ids.append(one_id)
df1=pd.DataFrame(ids,columns=['tid'])
df2= | pd.DataFrame(labels,columns=['class']) | pandas.DataFrame |
import pandas as pd
import numpy as np
from statsmodels.formula.api import ols
from swstats import *
from scipy.stats import ttest_ind
import xlsxwriter
from statsmodels.stats.multitest import multipletests
from statsmodels.stats.proportion import proportions_ztest
debugging = False
def pToSign(pval):
if pval < .001:
return "***"
elif pval < .01:
return "**"
elif pval < .05:
return "*"
elif pval < .1:
return "+"
else:
return ""
def analyzeExperiment_ContinuousVar(dta, varName):
order_value_control_group = dta.loc[dta.surveyArm == "arm1_control", varName]
order_value_arm2_group = dta.loc[dta.surveyArm == "arm2_written_techniques", varName]
order_value_arm3_group = dta.loc[dta.surveyArm == "arm3_existingssa", varName]
order_value_arm4_group = dta.loc[dta.surveyArm == "arm4_interactive_training", varName]
# Arm 1
arm1mean = np.mean(order_value_control_group)
arm1sd = np.std(order_value_control_group)
arm1text = "" + "{:.2f}".format(arm1mean) + " (" + "{:.2f}".format(arm1sd) + ")"
# Effect of Arm 2
arm2mean = np.mean(order_value_arm2_group)
arm2sd = np.std(order_value_arm2_group)
tscore, pval2 = ttest_ind(order_value_control_group, order_value_arm2_group)
arm2sign = pToSign(pval2)
arm2text = "" + "{:.2f}".format(arm2mean) + " (" + "{:.2f}".format(arm2sd) + ")" + arm2sign + " p:" + "{:.3f}".format(pval2)
# Effect of Arm 3
arm3mean = np.mean(order_value_arm3_group)
arm3sd = np.std(order_value_arm3_group)
tscore, pval3 = ttest_ind(order_value_control_group, order_value_arm3_group)
arm3sign = pToSign(pval3)
arm3text = "" + "{:.2f}".format(arm3mean) + " (" + "{:.2f}".format(arm3sd) + ")" + arm3sign + " p:" + "{:.3f}".format(pval3)
# Effect of Arm 4
arm4mean = np.mean(order_value_arm4_group)
arm4sd = np.std(order_value_arm4_group)
tscore, pval4 = ttest_ind(order_value_control_group, order_value_arm4_group)
arm4sign = pToSign(pval4)
arm4text = "" + "{:.2f}".format(arm4mean) + " (" + "{:.2f}".format(arm4sd) + ")" + arm4sign + " p:" + "{:.3f}".format(pval4)
# Correct P-values
y = multipletests(pvals=[pval2, pval3, pval4], alpha=0.05, method="holm")
# print(len(y[1][np.where(y[1] < 0.05)])) # y[1] returns corrected P-vals (array)
sigWithCorrection = y[1] < 0.05
if sigWithCorrection[0]:
arm2text = arm2text + ",#"
if sigWithCorrection[1]:
arm3text = arm3text + ",#"
if sigWithCorrection[2]:
arm4text = arm4text + ",#"
# Additional checks
tscore, pval2to4 = ttest_ind(order_value_arm2_group, order_value_arm4_group)
arm2to4sign = pToSign(pval2to4)
arm2to4text = "" + "{:.2f}".format(arm4mean - arm2mean) + " " + arm2to4sign + " p:" + "{:.3f}".format(pval2to4)
tscore, pval3to4 = ttest_ind(order_value_arm3_group, order_value_arm4_group)
arm3to4sign = pToSign(pval3to4)
arm3to4text = "" + "{:.2f}".format(arm4mean - arm3mean) + " " + arm3to4sign + " p:" + "{:.3f}".format(pval3to4)
results = {"Outcome": varName,
"Arm1": arm1text,
"Arm2": arm2text,
"Arm3": arm3text,
"Arm4": arm4text,
"Arm2To4": arm2to4text,
"Arm3To4": arm3to4text,
}
return results
def analyzeExperiment_BinaryVar(dta, varName):
order_value_control_group = dta.loc[dta.surveyArm == "arm1_control", varName]
order_value_arm2_group = dta.loc[dta.surveyArm == "arm2_written_techniques", varName]
order_value_arm3_group = dta.loc[dta.surveyArm == "arm3_existingssa", varName]
order_value_arm4_group = dta.loc[dta.surveyArm == "arm4_interactive_training", varName]
# Arm 1
arm1Successes = sum(order_value_control_group.isin([True, 1]))
arm1Count = sum(order_value_control_group.isin([True, False, 1, 0]))
arm1PercentSuccess = arm1Successes/arm1Count
arm1text = "" + "{:.2f}".format(arm1PercentSuccess) + " (" + "{:.0f}".format(arm1Successes) + ")"
# Effect of Arm 2
arm2Successes = sum(order_value_arm2_group.isin([True, 1]))
arm2Count = sum(order_value_arm2_group.isin([True, False, 1, 0]))
arm2PercentSuccess = arm2Successes/arm2Count
zstat, pval2 = proportions_ztest(count=[arm1Successes,arm2Successes], nobs=[arm1Count,arm2Count], alternative='two-sided')
arm2sign = pToSign(pval2)
arm2text = "" + "{:.2f}".format(arm2PercentSuccess) + " (" + "{:.0f}".format(arm2Successes) + ")" + arm2sign + " p:" + "{:.3f}".format(pval2)
# Effect of Arm 3
arm3Successes = sum(order_value_arm3_group.isin([True, 1]))
arm3Count = sum(order_value_arm3_group.isin([True, False, 1, 0]))
arm3PercentSuccess = arm3Successes/arm3Count
zstat, pval3 = proportions_ztest(count=[arm1Successes,arm3Successes], nobs=[arm1Count,arm3Count], alternative='two-sided')
arm3sign = pToSign(pval3)
arm3text = "" + "{:.2f}".format(arm3PercentSuccess) + " (" + "{:.0f}".format(arm3Successes) + ")" + arm3sign + " p:" + "{:.3f}".format(pval3)
# Effect of Arm 4
arm4Successes = sum(order_value_arm4_group.isin([True, 1]))
arm4Count = sum(order_value_arm4_group.isin([True, False, 1, 0]))
arm4PercentSuccess = arm4Successes/arm4Count
zstat, pval4 = proportions_ztest(count=[arm1Successes,arm4Successes], nobs=[arm1Count,arm4Count], alternative='two-sided')
arm4sign = pToSign(pval4)
arm4text = "" + "{:.2f}".format(arm4PercentSuccess) + " (" + "{:.0f}".format(arm4Successes) + ")" + arm4sign + " p:" + "{:.3f}".format(pval4)
# Correct P-values
y = multipletests(pvals=[pval2, pval3, pval4], alpha=0.05, method="holm")
# print(len(y[1][np.where(y[1] < 0.05)])) # y[1] returns corrected P-vals (array)
sigWithCorrection = y[1] < 0.05
if sigWithCorrection[0]:
arm2text = arm2text + ",#"
if sigWithCorrection[1]:
arm3text = arm3text + ",#"
if sigWithCorrection[2]:
arm4text = arm4text + ",#"
# Additional checks
zstat, pval2to4 = proportions_ztest(count=[arm2Successes,arm4Successes], nobs=[arm2Count,arm4Count], alternative='two-sided')
arm2to4sign = pToSign(pval2to4)
arm2to4text = "" + "{:.2f}".format(arm4PercentSuccess - arm2PercentSuccess) + " " + arm2to4sign + " p:" + "{:.3f}".format(pval2to4)
zstat, pval3to4 = proportions_ztest(count=[arm3Successes,arm4Successes], nobs=[arm3Count,arm4Count], alternative='two-sided')
arm3to4sign = pToSign(pval3to4)
arm3to4text = "" + "{:.2f}".format(arm4PercentSuccess - arm3PercentSuccess) + " " + arm3to4sign + " p:" + "{:.3f}".format(pval3to4)
results = {"Outcome": varName,
"Arm1": arm1text,
"Arm2": arm2text,
"Arm3": arm3text,
"Arm4": arm4text,
"Arm2To4": arm2to4text,
"Arm3To4": arm3to4text,
}
return results
def analyzeResults(dta, outputFileName, scoringVars, surveyVersion, primaryOnly=True):
if primaryOnly:
dta = dta[dta.IsPrimaryWave].copy()
dataDir = "C:/Dev/src/ssascams/data/"
''' Analyze the answers'''
writer = pd.ExcelWriter(dataDir + 'RESULTS_' + outputFileName + '.xlsx', engine='xlsxwriter')
# ###############
# Export summary stats
# ###############
demographicVars = ['trustScore', 'TotalIncome', 'incomeAmount', 'Race', 'race5', 'employment3', 'educYears', 'Married', 'marriedI', 'Age', 'ageYears', 'Gender', 'genderI']
allSummaryVars = ["percentCorrect", "surveyArm", "Wave", "daysFromTrainingToTest"] + scoringVars + demographicVars
summaryStats = dta[allSummaryVars].describe()
summaryStats.to_excel(writer, sheet_name="summary_FullPop", startrow=0, header=True, index=True)
grouped = dta[allSummaryVars].groupby(["surveyArm"])
summaryStats = grouped.describe().unstack().transpose().reset_index()
summaryStats.rename(columns={'level_0' :'VarName', 'level_1' :'Metric'}, inplace=True)
summaryStats.sort_values(['VarName', 'Metric'], inplace=True)
summaryStats.to_excel(writer, sheet_name="summary_ByArm", startrow=0, header=True, index=False)
if ~primaryOnly:
grouped = dta[allSummaryVars].groupby(["surveyArm", "Wave"])
summaryStats = grouped.describe().unstack().transpose().reset_index()
summaryStats.rename(columns={'level_0' :'VarName', 'level_1' :'Metric'}, inplace=True)
summaryStats.sort_values(['Wave','VarName', 'Metric'], inplace=True)
# grouped.describe().reset_index().pivot(index='name', values='score', columns='level_1')
summaryStats.to_excel(writer, sheet_name="summary_ByArmAndWave", startrow=0, header=True, index=False)
# summaryStats.to_csv(dataDir + "RESULTS_" + outputFileName + '.csv')
# ###############
# RQ1: What is the effect?
# ###############
row1 = analyzeExperiment_ContinuousVar(dta, "numCorrect")
row2 = analyzeExperiment_ContinuousVar(dta, "numFakeLabeledReal")
row3 = analyzeExperiment_ContinuousVar(dta, "numRealLabeledFake")
row4 = analyzeExperiment_ContinuousVar(dta, "percentCorrect")
pd.DataFrame([row1, row2, row3, row4]).to_excel(writer, sheet_name="r1", startrow=1, header=True, index=True)
##############
# RQ1* Robustness check on result: is the experiment randomized correctly?
##############
# NumCorrect Regression
resultTables = ols('numCorrect ~ C(surveyArm) + daysFromTrainingToTest + trustScore + lIncomeAmount + '
'C(employment3) + educYears + marriedI + ageYears + ageYearsSq + genderI + lose_moneyYN + duration_p2_Quantile ', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="r1_reg", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="r1_reg", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
# ###############
# RQ2: Communication Type
# ###############
row1 = analyzeExperiment_ContinuousVar(dta, "numEmailsCorrect")
row2 = analyzeExperiment_ContinuousVar(dta, "numSMSesCorrect")
row3 = analyzeExperiment_ContinuousVar(dta, "numLettersCorrect")
pd.DataFrame([row1, row2, row3]).to_excel(writer, sheet_name="r2", startrow=1, header=True, index=True)
##############
# RQ2* Robustness check on Emails result: is the experiment randomized correctly?
##############
# NumEmailsCorrect Regression
resultTables = ols('numEmailsCorrect ~ C(surveyArm) + daysFromTrainingToTest + trustScore + lIncomeAmount + '
'C(employment3) + educYears + marriedI + ageYears + ageYearsSq + genderI + lose_moneyYN + duration_p2_Quantile ', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="r2_reg", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="r2_reg", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
# ###############
# RQ3: Time Delay
# ###############
resultTables = ols('numCorrect ~ C(surveyArm)*Wave + daysFromTrainingToTest', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="r3a_CorrectWaveAndDay_Simple", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="r3a_CorrectWaveAndDay_Simple", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
resultTables = ols('numEmailsCorrect ~ C(surveyArm)*Wave + daysFromTrainingToTest', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="r3b_EmailWaveAndDay_Simple", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="r3b_EmailWaveAndDay_Simple", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
# ###############
# RQ4: Rainloop
# ###############
if surveyVersion == '6':
resultTables = ols('NumHeadersOpened ~ C(surveyArm)', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="r4_HeadersOpened", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="r4_HeadersOpened",startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
########################
# R5a: What determines fraud susceptibility (whether people get tricked or not)?
# Ie, false negatives
########################
# First Try on Regression
# resultTables = ols('numFakeLabeledReal ~ C(surveyArm) + daysFromTrainingToTest + trustScore + lIncomeAmount + '
# 'C(race5) + C(employment3) + educYears + marriedI + ageYears + ageYearsSq + genderI + lose_moneyYN + duration_p2_Quantile ', data=dta).fit().summary().tables
# pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="reg_numFakeLabeledReal_WRace", startrow=1, header=False, index=False)
# pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="reg_numFakeLabeledReal_WRace", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
# Remove race - many variables, small counts - likely over specifying
resultTables = ols('numFakeLabeledReal ~ C(surveyArm) + daysFromTrainingToTest + trustScore + lIncomeAmount + '
'C(employment3) + educYears + marriedI + ageYears + ageYearsSq + genderI + lose_moneyYN + duration_p2_Quantile ', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="r5a_numFakeLabeledReal", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="r5a_numFakeLabeledReal", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
resultTables = ols('numLabeledReal ~ C(surveyArm) + trustScore + lIncomeAmount + C(employment3) + educYears + marriedI + ageYears + ageYearsSq + genderI + lose_moneyYN + duration_p2_Quantile ', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="reg_numLabeledReal", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="reg_numLabeledReal", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
########################
# R5b: What determines lack of trust?
########################
# Ie, false positive
resultTables = ols('numRealLabeledFake ~ C(surveyArm) + daysFromTrainingToTest + trustScore + lIncomeAmount + '
'C(employment3) + educYears + marriedI + ageYears + ageYearsSq + genderI + lose_moneyYN + duration_p2_Quantile ', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="r5b_numRealLabeledFake", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="r5b_numRealLabeledFake", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
resultTables = ols('numLabeledFake ~ C(surveyArm) + daysFromTrainingToTest + trustScore + lIncomeAmount + '
'C(employment3) + educYears + marriedI + ageYears + ageYearsSq + genderI + lose_moneyYN + duration_p2_Quantile ', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="reg_numLabeledFake", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="reg_numLabeledFake", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
# ###############
# RQ6: Impostor Type
# ###############
row1 = analyzeExperiment_ContinuousVar(dta, "numCorrect_SSA")
row2 = analyzeExperiment_ContinuousVar(dta, "numCorrect_Other")
row3 = analyzeExperiment_ContinuousVar(dta, "numEmailsCorrect_SSA")
row4 = analyzeExperiment_ContinuousVar(dta, "numEmailsCorrect_Other")
pd.DataFrame([row1, row2, row3, row4]).to_excel(writer, sheet_name="r6", startrow=1, header=True, index=True)
# ###############
# RQ7: Likelihood of being tricked
# ###############
dta['isTrickedByFraud'] = dta.numFakeLabeledReal > 0
dta['isTrickedByAnySSAEmail'] = dta.numEmailsCorrect_SSA < max(dta.numEmailsCorrect_SSA)
dta['isTrickedByAnyNonSSAEmail'] = dta.numEmailsCorrect_Other < max(dta.numEmailsCorrect_Other)
row1 = analyzeExperiment_BinaryVar(dta, "isTrickedByFraud")
row2 = analyzeExperiment_BinaryVar(dta, "isTrickedByAnySSAEmail")
row3 = analyzeExperiment_BinaryVar(dta, "isTrickedByAnyNonSSAEmail")
pd.DataFrame([row1, row2, row3]).to_excel(writer, sheet_name="r7", startrow=1, header=True, index=True)
# ###############
# RQ8: Every Email
# ###############
filter_cols = [col for col in dta.columns if col.startswith('Correct_')]
theRows = []
for filter_col in filter_cols:
arow = analyzeExperiment_BinaryVar(dta, filter_col)
theRows = theRows + [arow]
pd.DataFrame(theRows).to_excel(writer, sheet_name="r8", startrow=1, header=True, index=True)
# ##############
# Correlations
################
indepVars = ['surveyArm', 'daysFromTrainingToTest', 'Wave', 'trustScore', 'incomeAmount', 'race5', 'employment3', 'educYears', 'marriedI', 'ageYears','Gender',
'previousFraudYN', 'lose_moneyYN', 'duration_p1', 'duration_p1_Quantile', 'duration_p2', 'duration_p2_Quantile', 'Employment']
depVars = ['numCorrect', 'numFakeLabeledReal', 'numRealLabeledFake']
dta.Wave = dta.Wave.astype('float64')
# Look at Correlations among variables
allVarsToCorr = depVars + indepVars
corrMatrix = dta[allVarsToCorr].corr()
pd.DataFrame(corrMatrix).to_excel(writer, sheet_name="corrMatrix", startrow=1, header=True, index=True)
# duration_p1 is a proxy for arm, so strange results there.
# we'd need a fine-tuned var. Let's use p2 instead. Also, the Quantile shows a much stronger relationship than the raw values (likely since it is not linear in the depvars)
# Losing money and income and age show a moderate relationship
# ##############
# Scatter Plots
################
import seaborn as sns
sns.set_theme(style="ticks")
toPlot = dta[['numCorrect', 'surveyArm', 'daysFromTrainingToTest', 'Wave', 'trustScore', 'lose_moneyYN', 'duration_p2_Quantile']]
sns.pairplot(toPlot, hue="surveyArm")
# ##############
# Regressions
# ##############
# Sanity Check regression
resultTables = ols('lIncomeAmount ~ageYears + ageYearsSq + educYears + marriedI + genderI', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="reg_Sanity", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="reg_Sanity", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
# Simple Experiment-Only test
resultTables = ols('numCorrect ~ C(surveyArm)', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="numCorrect_ByArm", startrow=1, header=False, index=False)
| pd.DataFrame(resultTables[1]) | pandas.DataFrame |
from collections import OrderedDict
from typing import Any, Dict, List, Tuple, Union, cast
import pandas as pd
from the_census._api.models import GeographyItem
from the_census._config import Config
from the_census._data_transformation.interface import ICensusDataTransformer
from the_census._geographies.models import GeoDomain
from the_census._utils.timer import timer
from the_census._variables.models import Group, GroupVariable, VariableCode
class CensusDataTransformer(ICensusDataTransformer[pd.DataFrame]):
_config: Config
def __init__(self, config: Config) -> None:
self._config = config
@timer
def supported_geographies(
self, supported_geos: OrderedDict[str, GeographyItem]
) -> pd.DataFrame:
values_flattened: List[Dict[str, str]] = []
for geo_item in supported_geos.values():
for clause in geo_item.clauses:
values_flattened.append(
{
"name": geo_item.name,
"hierarchy": geo_item.hierarchy,
"for": clause.for_clause,
"in": ",".join(clause.in_clauses),
}
)
return | pd.DataFrame(values_flattened) | pandas.DataFrame |
import os.path
import glob
import re
from typing import Dict, List, Optional
from bokeh.charts import TimeSeries
from bokeh.models import Range1d
import bokeh.plotting
import json_lines
import pandas as pd
from scrapy.commands import ScrapyCommand
from scrapy.exceptions import UsageError
from dd_crawler.utils import get_domain
class Command(ScrapyCommand):
requires_project = True
def syntax(self):
return '<files>'
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
arg = parser.add_option
arg('-o', '--output', help='prefix for charts (without ".html")')
arg('--step', type=float, default=30, help='time step, s')
arg('--smooth', type=int, default=50, help='smooth span')
arg('--top', type=int, default=30, help='top domains to show')
arg('--no-show', action='store_true', help='don\'t show charts')
def short_desc(self):
return 'Print short speed summary, save charts to a file'
def run(self, args, opts):
if not args:
raise UsageError()
if len(args) == 1 and '*' in args[0]:
# paths were not expanded (docker)
filenames = glob.glob(args[0])
else:
filenames = args
del args
filtered_filenames = [
f for f in filenames
if re.match(r'[a-z0-9]{12}\.csv$', os.path.basename(f))]
filenames = filtered_filenames or filenames
if not filenames:
raise UsageError()
response_logs = []
for filename in filenames:
with json_lines.open(filename) as f:
response_logs.append(pd.DataFrame(f))
print('Read data from {} files'.format(len(filenames)))
all_rpms = [rpms for rpms in (
get_rpms(name, rlog, step=opts.step, smooth=opts.smooth)
for name, rlog in zip(filenames, response_logs))
if rpms is not None]
if all_rpms:
print_rpms(all_rpms, opts)
print_scores(response_logs, opts)
def get_rpms(filename: str, response_log: pd.DataFrame,
step: float, smooth: int) -> Optional[pd.DataFrame]:
timestamps = response_log['time']
buffer = []
if len(timestamps) == 0:
return
get_t0 = lambda t: int(t / step) * step
t0 = get_t0(timestamps[0])
rpms = []
for ts in timestamps:
if get_t0(ts) != t0:
rpms.append((t0, len(buffer) / (ts - buffer[0]) * 60))
t0 = get_t0(ts)
buffer = []
buffer.append(ts)
if rpms:
name = os.path.basename(filename)
rpms = | pd.DataFrame(rpms, columns=['time', name]) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from cvxopt import matrix, solvers
from datetime import datetime, date
import quandl
assets = ['AAPL', # Apple
'KO', # Coca-Cola
'DIS', # Disney
'XOM', # Exxon Mobil
'JPM', # JPMorgan Chase
'MCD', # McDonald's
'WMT'] # Walmart
# download historical data from quandl
hist_data = {}
for asset in assets:
data = quandl.get('wiki/'+asset, start_date='2015-01-01', end_date='2017-12-31', authtoken='<PASSWORD>')
hist_data[asset] = data['Adj. Close']
hist_data = pd.concat(hist_data, axis=1)
# calculate historical log returns
hist_return = np.log(hist_data / hist_data.shift())
hist_return = hist_return.dropna()
# find historical mean, covriance, and correlation
hist_mean = hist_return.mean(axis=0).to_frame()
hist_mean.columns = ['mu']
hist_cov = hist_return.cov()
hist_corr = hist_return.corr()
print(hist_mean.transpose())
print(hist_cov)
print(hist_corr)
# construct random portfolios
n_portfolios = 5000
#set up array to hold results
port_returns = np.zeros(n_portfolios)
port_stdevs = np.zeros(n_portfolios)
for i in range(n_portfolios):
w = np.random.rand(len(assets)) # random weights
w = w / sum(w) # weights sum to 1
port_return = np.dot(w.T, hist_mean.as_matrix()) * 250 # annualize; 250 business days
port_stdev = np.sqrt(np.dot(w.T, np.dot(hist_cov, w))) * np.sqrt(250) # annualize; 250 business days
port_returns[i] = port_return
port_stdevs[i] = port_stdev
plt.plot(port_stdevs, port_returns, 'o', markersize=6)
plt.xlabel('Expected Volatility')
plt.ylabel('Expected Return')
plt.title('Return and Standard Deviation of Randomly Generated Portfolios')
plt.show()
# Global Minimum Variance (GMV) -- closed form
hist_cov_inv = - np.linalg.inv(hist_cov)
one_vec = np.ones(len(assets))
w_gmv = np.dot(hist_cov_inv, one_vec) / (np.dot(np.transpose(one_vec), np.dot(hist_cov_inv, one_vec)))
w_gmv_df = pd.DataFrame(data = w_gmv).transpose()
w_gmv_df.columns = assets
stdev_gmv = np.sqrt(np.dot(w_gmv.T, np.dot(hist_cov, w_gmv))) * np.sqrt(250)
print(w_gmv_df)
print(stdev_gmv)
# Global Minimum Variance (GMV) -- numerical
P = matrix(hist_cov.as_matrix())
q = matrix(np.zeros((len(assets), 1)))
A = matrix(1.0, (1, len(assets)))
b = matrix(1.0)
w_gmv_v2 = np.array(solvers.qp(P, q, A=A, b=b)['x'])
w_gmv_df_v2 = pd.DataFrame(w_gmv_v2).transpose()
w_gmv_df_v2.columns = assets
stdev_gmv_v2 = np.sqrt(np.dot(w_gmv_v2.T, np.dot(hist_cov, w_gmv_v2))) * np.sqrt(250)
print(w_gmv_df_v2)
print(np.asscalar(stdev_gmv_v2))
# Maximum return -- closed form
mu_o = np.asscalar(np.max(hist_mean)) # MCD
A = np.matrix([[np.asscalar(np.dot(hist_mean.T,np.dot(hist_cov_inv,hist_mean))),
np.asscalar(np.dot(hist_mean.T,np.dot(hist_cov_inv,one_vec)))],
[np.asscalar(np.dot(hist_mean.T,np.dot(hist_cov_inv,one_vec))),
np.asscalar(np.dot(one_vec.T,np.dot(hist_cov_inv,one_vec)))]])
B = np.hstack([np.array(hist_mean),one_vec.reshape(len(assets),1)])
y = np.matrix([mu_o, 1]).T
w_max_ret = np.dot(np.dot(np.dot(hist_cov_inv, B), np.linalg.inv(A)),y)
w_max_ret_df = | pd.DataFrame(w_max_ret) | pandas.DataFrame |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 16 16:24:07 2017
@author: raimondas
"""
#%% imports
import os, sys, glob
import itertools
from distutils.dir_util import mkpath
from tqdm import tqdm
import numpy as np
#import matplotlib.pyplot as plt
#plt.ion()
sys.path.append('..')
#import seaborn as sns
#sns.set_style("ticks")
###
import random
import copy
from collections import OrderedDict
import pandas as pd
import argparse
#from utils import ETData, training_params
import pickle
from sklearn.model_selection import train_test_split
import scipy.io as scio
#from utils import eval_evt
from etdata import ETData, get_px2deg
#%% functions
def augment_with_saccades(data, seq_len, histogram_eq = True):
#debug
# stop
# data = data['unpaired_clean']
# seq_len=100
#augment by centering on saccades
etdata = ETData()
sacc = []
data_clean = [_d for (_i, _d) in data if len(_d) > seq_len]
for i, _data in enumerate(data_clean):
etdata.load(_data, **{'source':'array'})
etdata.calc_evt()
evt = etdata.evt.loc[etdata.evt['evt']==2, :]
evt = evt.assign(ind=i)
sacc.append(evt)
sacc_df = pd.concat(sacc).reset_index()
seeds = range(len(sacc_df))
train_data_pick = []
for (_, sacc), seed in zip(sacc_df.iterrows(), seeds):
np.random.seed(seed)
i = np.random.randint(args.seq_len)
trial_ind = int(sacc['ind'])
s = np.maximum(0, sacc['s'] - i).astype(int)
e = s + args.seq_len + 2 #because we will differentiate and predict next sample
if e < len(data_clean[trial_ind]):
#plt.plot(_train_data[trial_ind][s:e]['x'])
#plt.plot(_train_data[trial_ind][s:e]['y'])
train_data_pick.append(data_clean[trial_ind][s:e])
if histogram_eq:
#augment with large saccades
sacc=[]
for i, _data in enumerate(train_data_pick):
etdata.load(_data, **{'source':'array'})
etdata.calc_evt()
evt = etdata.evt.loc[etdata.evt['evt']==2, :]
evt = evt.assign(ind=i)
sacc.append(evt)
sacc_pick_df = pd.concat(sacc).reset_index()
h, edges = np.histogram(sacc_pick_df['ampl'], bins='auto')
p = (h.max()-h)
sacc = []
seeds = range(len(h))
for _p, _es, _ee, seed in zip(p, edges[:-1], edges[1:], seeds):
mask = (sacc_df['ampl'] > _es) & (sacc_df['ampl'] < _ee)
if (len(sacc_df.loc[mask, :]) > 0) and (_p > 0):
sacc.append(sacc_df.loc[mask, :].sample(n = _p, replace = True, random_state=seed))
sacc_ampl_df = pd.concat(sacc).reset_index()
train_data_ampl = []
seeds = range(len(sacc_ampl_df))
for (_, sacc), seed in zip(sacc_ampl_df.iterrows(), seeds):
np.random.seed(seed)
i = np.random.randint(args.seq_len)
trial_ind = int(sacc['ind'])
s = np.maximum(0, sacc['s'] - i).astype(int)
e = s + args.seq_len + 2 #because we will differentiate and predict next sample
if e < len(data_clean[trial_ind]):
#plt.plot(_train_data[trial_ind][s:e]['x'])
#plt.plot(_train_data[trial_ind][s:e]['y'])
train_data_ampl.append(data_clean[trial_ind][s:e])
train_data_pick +=train_data_ampl
return train_data_pick
#%% set parameters
parser = argparse.ArgumentParser()
parser.add_argument('--root', type=str, default='../../etdata',
help='data root')
parser.add_argument('--dataset', type=str, default='lund2013_npy',
help='dataset')
parser.add_argument('--seq_len', type=int, default=100,
help='number of samples in data iterator')
parser.add_argument('--events', default=[1, 2, 3],
help='events')
args = parser.parse_args()
etdata = ETData()
#%%data reader
print ("Reading data")
ddir = '%s/%s'%(args.root, args.dataset)
if not os.path.exists(ddir):
mkpath(ddir)
#try to convert from mat
fdir_mat = 'EyeMovementDetectorEvaluation/annotated_data/originally uploaded data/images'
FILES_MAT = glob.glob('%s/%s/*.mat'% (args.root, fdir_mat))
for fpath in tqdm(FILES_MAT):
fdir, fname = os.path.split(os.path.splitext(fpath)[0])
mat = scio.loadmat(fpath)
fs = mat['ETdata']['sampFreq'][0][0][0][0]
geom = {
'screen_width' :mat['ETdata']['screenDim'][0][0][0][0],
'screen_height': mat['ETdata']['screenDim'][0][0][0][1],
'display_width_pix' : mat['ETdata']['screenRes'][0][0][0][0],
'display_height_pix' :mat['ETdata']['screenRes'][0][0][0][1],
'eye_distance' : mat['ETdata']['viewDist'][0][0][0][0],
}
px2deg = get_px2deg(geom)
data = mat['ETdata']['pos'][0][0]
t = np.arange(0, len(data)).astype(np.float64)/fs
status = (data[:,3] == 0) | (data[:,4] == 0)
data = np.vstack((t, data[:,3], data[:,4], ~status, data[:,5])).T
etdata.load(data, **{'source': 'np_array'})
etdata.data['x'] = (etdata.data['x'] - geom['display_width_pix']/2) / px2deg
etdata.data['y'] = (etdata.data['y'] - geom['display_height_pix']/2) / px2deg
etdata.data['x'][status] = np.nan
etdata.data['y'][status] = np.nan
#fix
if 'UH29_img_Europe_labelled_MN' in fname:
etdata.data['evt'][3197:3272] = 1
#set status one more time
status = np.isnan(etdata.data['x']) | np.isnan(etdata.data['y']) |\
~np.in1d(etdata.data['evt'], args.events) | ~etdata.data['status']
etdata.data['status'] = ~status
etdata.save('%s/%s' % (ddir, fname))
FILES = glob.glob('%s/*.npy' % ddir)
#for replication use following code
with open('datalist', 'r') as f:
FILES = ['%s/%s'%(ddir, _fname.strip()) for _fname in f.readlines()]
#%%split based on trial
print ("Train/test split")
exp = [(fpath,) + tuple(os.path.split(os.path.splitext(fpath)[0])[-1].split('_labelled_')) +\
(os.path.split(os.path.splitext(fpath)[0])[-1].split('_')[0][:2], )+\
(os.path.split(os.path.splitext(fpath)[0])[-1].split('_')[2], ) for fpath in FILES]
exp_df = pd.DataFrame(exp, columns=['fpath', 'flabel', 'coder', 'sub', 'img'])
exp_gr = exp_df.groupby('flabel')
exp_df['pair'] = False
for _e, _d in exp_gr:
if len(_d) >1:
exp_df.loc[_d.index, 'pair'] = True
print ('Number of trials: %d' %len(exp_df['flabel'].unique()))
print ('Number of subjects: %d' %len(exp_df['sub'].unique()))
print ('Number of images: %d' %len(exp_df['img'].unique()))
#split on pairs
exp_gr_pair = exp_df.groupby('pair')
X_unpaired, X_paired = [_d for _, _d in exp_gr_pair]
#%%load data
print ("Cleaning data")
data = OrderedDict()
data_lens = []
#iterates through data and marks status as false for events other than [1, 2, 3]
for df, part in zip([X_unpaired, X_paired, X_paired],
[['unpaired'], ['paired', 'RA'], ['paired', 'MN']]):
if len(part)==1:
part = part[0]
data[part] = []
for n, d in df.iterrows():
_data = np.load(d['fpath'])
data_lens.append(len(_data))
mask = np.in1d(_data['evt'], args.events)
_data['status'][~mask] = False
data[part].append((d, _data))
else:
coder = part[-1]
part = part[-1]
data[part] = []
for n, d in df.loc[df['coder']==coder,:].iterrows():
_data = np.load(d['fpath'])
data_lens.append(len(_data))
mask = np.in1d(_data['evt'], args.events)
_data['status'][~mask] = False
data[part].append((d, _data))
#sort according to flabel
labels_mn = pd.DataFrame([l['flabel'] for l, _ in data['MN']])
labels_ra = | pd.DataFrame([l['flabel'] for l, _ in data['RA']]) | pandas.DataFrame |
from datetime import (
datetime,
timedelta,
)
import re
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.errors import InvalidIndexError
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
isna,
notna,
)
import pandas._testing as tm
import pandas.core.common as com
# We pass through a TypeError raised by numpy
_slice_msg = "slice indices must be integers or None or have an __index__ method"
class TestDataFrameIndexing:
def test_getitem(self, float_frame):
# Slicing
sl = float_frame[:20]
assert len(sl.index) == 20
# Column access
for _, series in sl.items():
assert len(series.index) == 20
assert tm.equalContents(series.index, sl.index)
for key, _ in float_frame._series.items():
assert float_frame[key] is not None
assert "random" not in float_frame
with pytest.raises(KeyError, match="random"):
float_frame["random"]
def test_getitem2(self, float_frame):
df = float_frame.copy()
df["$10"] = np.random.randn(len(df))
ad = np.random.randn(len(df))
df["@awesome_domain"] = ad
with pytest.raises(KeyError, match=re.escape("'df[\"$10\"]'")):
df.__getitem__('df["$10"]')
res = df["@awesome_domain"]
tm.assert_numpy_array_equal(ad, res.values)
def test_setitem_list(self, float_frame):
float_frame["E"] = "foo"
data = float_frame[["A", "B"]]
float_frame[["B", "A"]] = data
tm.assert_series_equal(float_frame["B"], data["A"], check_names=False)
tm.assert_series_equal(float_frame["A"], data["B"], check_names=False)
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
data[["A"]] = float_frame[["A", "B"]]
newcolumndata = range(len(data.index) - 1)
msg = (
rf"Length of values \({len(newcolumndata)}\) "
rf"does not match length of index \({len(data)}\)"
)
with pytest.raises(ValueError, match=msg):
data["A"] = newcolumndata
def test_setitem_list2(self):
df = DataFrame(0, index=range(3), columns=["tt1", "tt2"], dtype=np.int_)
df.loc[1, ["tt1", "tt2"]] = [1, 2]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series([1, 2], df.columns, dtype=np.int_, name=1)
tm.assert_series_equal(result, expected)
df["tt1"] = df["tt2"] = "0"
df.loc[df.index[1], ["tt1", "tt2"]] = ["1", "2"]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series(["1", "2"], df.columns, name=1)
tm.assert_series_equal(result, expected)
def test_getitem_boolean(self, mixed_float_frame, mixed_int_frame, datetime_frame):
# boolean indexing
d = datetime_frame.index[10]
indexer = datetime_frame.index > d
indexer_obj = indexer.astype(object)
subindex = datetime_frame.index[indexer]
subframe = datetime_frame[indexer]
tm.assert_index_equal(subindex, subframe.index)
with pytest.raises(ValueError, match="Item wrong length"):
datetime_frame[indexer[:-1]]
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
with pytest.raises(ValueError, match="Boolean array expected"):
datetime_frame[datetime_frame]
# test that Series work
indexer_obj = Series(indexer_obj, datetime_frame.index)
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
# we are producing a warning that since the passed boolean
# key is not the same as the given index, we will reindex
# not sure this is really necessary
with tm.assert_produces_warning(UserWarning):
indexer_obj = indexer_obj.reindex(datetime_frame.index[::-1])
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [
datetime_frame,
mixed_float_frame,
mixed_int_frame,
]:
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame(
{c: np.where(data[c] > 0, data[c], np.nan) for c in data.columns},
index=data.index,
columns=data.columns,
)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns=df.columns)
tm.assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
assert bif[c].dtype == df[c].dtype
def test_getitem_boolean_casting(self, datetime_frame):
# don't upcast if we don't need to
df = datetime_frame.copy()
df["E"] = 1
df["E"] = df["E"].astype("int32")
df["E1"] = df["E"].copy()
df["F"] = 1
df["F"] = df["F"].astype("int64")
df["F1"] = df["F"].copy()
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")] * 2
+ [np.dtype("int64")] * 2,
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
# int block splitting
df.loc[df.index[1:3], ["E1", "F1"]] = 0
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")]
+ [np.dtype("float64")]
+ [np.dtype("int64")]
+ [np.dtype("float64")],
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
def _checkit(lst):
result = df[lst]
expected = df.loc[df.index[lst]]
tm.assert_frame_equal(result, expected)
_checkit([True, False, True])
_checkit([True, True, True])
_checkit([False, False, False])
def test_getitem_boolean_iadd(self):
arr = np.random.randn(5, 5)
df = DataFrame(arr.copy(), columns=["A", "B", "C", "D", "E"])
df[df < 0] += 1
arr[arr < 0] += 1
tm.assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=["A"], index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(
np.random.randn(4, 3), index=[1, 10, "C", "E"], columns=[1, 2, 3]
)
result = df.iloc[:-1]
expected = df.loc[df.index[:-1]]
tm.assert_frame_equal(result, expected)
result = df.loc[[1, 10]]
expected = df.loc[Index([1, 10])]
tm.assert_frame_equal(result, expected)
def test_getitem_ix_mixed_integer2(self):
# 11320
df = DataFrame(
{
"rna": (1.5, 2.2, 3.2, 4.5),
-1000: [11, 21, 36, 40],
0: [10, 22, 43, 34],
1000: [0, 10, 20, 30],
},
columns=["rna", -1000, 0, 1000],
)
result = df[[1000]]
expected = df.iloc[:, [3]]
tm.assert_frame_equal(result, expected)
result = df[[-1000]]
expected = df.iloc[:, [1]]
tm.assert_frame_equal(result, expected)
def test_getattr(self, float_frame):
tm.assert_series_equal(float_frame.A, float_frame["A"])
msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'"
with pytest.raises(AttributeError, match=msg):
float_frame.NONEXISTENT_NAME
def test_setattr_column(self):
df = DataFrame({"foobar": 1}, index=range(10))
df.foobar = 5
assert (df.foobar == 5).all()
def test_setitem(self, float_frame):
# not sure what else to do here
series = float_frame["A"][::2]
float_frame["col5"] = series
assert "col5" in float_frame
assert len(series) == 15
assert len(float_frame) == 30
exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))
exp = Series(exp, index=float_frame.index, name="col5")
tm.assert_series_equal(float_frame["col5"], exp)
series = float_frame["A"]
float_frame["col6"] = series
tm.assert_series_equal(series, float_frame["col6"], check_names=False)
# set ndarray
arr = np.random.randn(len(float_frame))
float_frame["col9"] = arr
assert (float_frame["col9"] == arr).all()
float_frame["col7"] = 5
assert (float_frame["col7"] == 5).all()
float_frame["col0"] = 3.14
assert (float_frame["col0"] == 3.14).all()
float_frame["col8"] = "foo"
assert (float_frame["col8"] == "foo").all()
# this is partially a view (e.g. some blocks are view)
# so raise/warn
smaller = float_frame[:2]
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
smaller["col10"] = ["1", "2"]
assert smaller["col10"].dtype == np.object_
assert (smaller["col10"] == ["1", "2"]).all()
def test_setitem2(self):
# dtype changing GH4204
df = DataFrame([[0, 0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan, np.nan]])
tm.assert_frame_equal(df, expected)
df = DataFrame([[0, 0]])
df.loc[0] = np.nan
tm.assert_frame_equal(df, expected)
def test_setitem_boolean(self, float_frame):
df = float_frame.copy()
values = float_frame.values
df[df["A"] > 0] = 4
values[values[:, 0] > 0] = 4
tm.assert_almost_equal(df.values, values)
# test that column reindexing works
series = df["A"] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
tm.assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
tm.assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
tm.assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
tm.assert_almost_equal(df.values, values)
# indexed with same shape but rows-reversed df
df[df[::-1] == 2] = 3
values[values == 2] = 3
tm.assert_almost_equal(df.values, values)
msg = "Must pass DataFrame or 2-d ndarray with boolean values only"
with pytest.raises(TypeError, match=msg):
df[df * 0] = 2
# index with DataFrame
mask = df > np.abs(df)
expected = df.copy()
df[df > np.abs(df)] = np.nan
expected.values[mask.values] = np.nan
tm.assert_frame_equal(df, expected)
# set from DataFrame
expected = df.copy()
df[df > np.abs(df)] = df * 2
np.putmask(expected.values, mask.values, df.values * 2)
tm.assert_frame_equal(df, expected)
def test_setitem_cast(self, float_frame):
float_frame["D"] = float_frame["D"].astype("i8")
assert float_frame["D"].dtype == np.int64
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
float_frame["B"] = 0
assert float_frame["B"].dtype == np.int64
# cast if pass array of course
float_frame["B"] = np.arange(len(float_frame))
assert issubclass(float_frame["B"].dtype.type, np.integer)
float_frame["foo"] = "bar"
float_frame["foo"] = 0
assert float_frame["foo"].dtype == np.int64
float_frame["foo"] = "bar"
float_frame["foo"] = 2.5
assert float_frame["foo"].dtype == np.float64
float_frame["something"] = 0
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2.5
assert float_frame["something"].dtype == np.float64
def test_setitem_corner(self, float_frame):
# corner case
df = DataFrame({"B": [1.0, 2.0, 3.0], "C": ["a", "b", "c"]}, index=np.arange(3))
del df["B"]
df["B"] = [1.0, 2.0, 3.0]
assert "B" in df
assert len(df.columns) == 2
df["A"] = "beginning"
df["E"] = "foo"
df["D"] = "bar"
df[datetime.now()] = "date"
df[datetime.now()] = 5.0
# what to do when empty frame with index
dm = DataFrame(index=float_frame.index)
dm["A"] = "foo"
dm["B"] = "bar"
assert len(dm.columns) == 2
assert dm.values.dtype == np.object_
# upcast
dm["C"] = 1
assert dm["C"].dtype == np.int64
dm["E"] = 1.0
assert dm["E"].dtype == np.float64
# set existing column
dm["A"] = "bar"
assert "bar" == dm["A"][0]
dm = DataFrame(index=np.arange(3))
dm["A"] = 1
dm["foo"] = "bar"
del dm["foo"]
dm["foo"] = "bar"
assert dm["foo"].dtype == np.object_
dm["coercible"] = ["1", "2", "3"]
assert dm["coercible"].dtype == np.object_
def test_setitem_corner2(self):
data = {
"title": ["foobar", "bar", "foobar"] + ["foobar"] * 17,
"cruft": np.random.random(20),
}
df = DataFrame(data)
ix = df[df["title"] == "bar"].index
df.loc[ix, ["title"]] = "foobar"
df.loc[ix, ["cruft"]] = 0
assert df.loc[1, "title"] == "foobar"
assert df.loc[1, "cruft"] == 0
def test_setitem_ambig(self):
# Difficulties with mixed-type data
from decimal import Decimal
# Created as float type
dm = DataFrame(index=range(3), columns=range(3))
coercable_series = Series([Decimal(1) for _ in range(3)], index=range(3))
uncoercable_series = Series(["foo", "bzr", "baz"], index=range(3))
dm[0] = np.ones(3)
assert len(dm.columns) == 3
dm[1] = coercable_series
assert len(dm.columns) == 3
dm[2] = uncoercable_series
assert len(dm.columns) == 3
assert dm[2].dtype == np.object_
def test_setitem_None(self, float_frame):
# GH #766
float_frame[None] = float_frame["A"]
tm.assert_series_equal(
float_frame.iloc[:, -1], float_frame["A"], check_names=False
)
tm.assert_series_equal(
float_frame.loc[:, None], float_frame["A"], check_names=False
)
tm.assert_series_equal(float_frame[None], float_frame["A"], check_names=False)
repr(float_frame)
def test_loc_setitem_boolean_mask_allfalse(self):
# GH 9596
df = DataFrame(
{"a": ["1", "2", "3"], "b": ["11", "22", "33"], "c": ["111", "222", "333"]}
)
result = df.copy()
result.loc[result.b.isna(), "a"] = result.a
tm.assert_frame_equal(result, df)
def test_getitem_fancy_slice_integers_step(self):
df = DataFrame(np.random.randn(10, 5))
# this is OK
result = df.iloc[:8:2] # noqa
df.iloc[:8:2] = np.nan
assert isna(df.iloc[:8:2]).values.all()
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(np.random.randn(10, 5), index=range(0, 20, 2))
# this is OK
cp = df.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).values.all()
# so is this
cp = df.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = df.iloc[2:6]
result2 = df.loc[3:11]
expected = df.reindex([4, 6, 8, 10])
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# non-monotonic, raise KeyError
df2 = df.iloc[list(range(5)) + list(range(5, 10))[::-1]]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11] = 0
@td.skip_array_manager_invalid_test # already covered in test_iloc_col_slice_view
def test_fancy_getitem_slice_mixed(self, float_frame, float_string_frame):
sliced = float_string_frame.iloc[:, -3:]
assert sliced["D"].dtype == np.float64
# get view with single block
# setting it triggers setting with copy
sliced = float_frame.iloc[:, -3:]
assert np.shares_memory(sliced["C"]._values, float_frame["C"]._values)
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
sliced.loc[:, "C"] = 4.0
assert (float_frame["C"] == 4).all()
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
start, end = df.index[[5, 10]]
result = df.loc[start:end]
result2 = df[start:end]
expected = df[5:11]
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
result = df.copy()
result.loc[start:end] = 0
result2 = df.copy()
result2[start:end] = 0
expected = df.copy()
expected[5:11] = 0
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_ix_multi_take(self):
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, :]
xp = df.reindex([0])
tm.assert_frame_equal(rs, xp)
# GH#1321
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, df.columns == 1]
xp = df.reindex(index=[0], columns=[1])
tm.assert_frame_equal(rs, xp)
def test_getitem_fancy_scalar(self, float_frame):
f = float_frame
ix = f.loc
# individual value
for col in f.columns:
ts = f[col]
for idx in f.index[::5]:
assert ix[idx, col] == ts[idx]
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_scalar(self, float_frame):
f = float_frame
expected = float_frame.copy()
ix = f.loc
# individual value
for j, col in enumerate(f.columns):
ts = f[col] # noqa
for idx in f.index[::5]:
i = f.index.get_loc(idx)
val = np.random.randn()
expected.values[i, j] = val
ix[idx, col] = val
tm.assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self, float_frame):
f = float_frame
ix = f.loc
expected = f.reindex(columns=["B", "D"])
result = ix[:, [False, True, False, True]]
tm.assert_frame_equal(result, expected)
expected = f.reindex(index=f.index[5:10], columns=["B", "D"])
result = ix[f.index[5:10], [False, True, False, True]]
tm.assert_frame_equal(result, expected)
boolvec = f.index > f.index[7]
expected = f.reindex(index=f.index[boolvec])
result = ix[boolvec]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, :]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, f.columns[2:]]
expected = f.reindex(index=f.index[boolvec], columns=["C", "D"])
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_boolean(self, float_frame):
# from 2d, set with booleans
frame = float_frame.copy()
expected = float_frame.copy()
mask = frame["A"] > 0
frame.loc[mask] = 0.0
expected.values[mask.values] = 0.0
tm.assert_frame_equal(frame, expected)
frame = float_frame.copy()
expected = float_frame.copy()
frame.loc[mask, ["A", "B"]] = 0.0
expected.values[mask.values, :2] = 0.0
tm.assert_frame_equal(frame, expected)
def test_getitem_fancy_ints(self, float_frame):
result = float_frame.iloc[[1, 4, 7]]
expected = float_frame.loc[float_frame.index[[1, 4, 7]]]
tm.assert_frame_equal(result, expected)
result = float_frame.iloc[:, [2, 0, 1]]
expected = float_frame.loc[:, float_frame.columns[[2, 0, 1]]]
tm.assert_frame_equal(result, expected)
def test_getitem_setitem_boolean_misaligned(self, float_frame):
# boolean index misaligned labels
mask = float_frame["A"][::-1] > 1
result = float_frame.loc[mask]
expected = float_frame.loc[mask[::-1]]
tm.assert_frame_equal(result, expected)
cp = float_frame.copy()
expected = float_frame.copy()
cp.loc[mask] = 0
expected.loc[mask] = 0
tm.assert_frame_equal(cp, expected)
def test_getitem_setitem_boolean_multi(self):
df = DataFrame(np.random.randn(3, 2))
# get
k1 = np.array([True, False, True])
k2 = np.array([False, True])
result = df.loc[k1, k2]
expected = df.loc[[0, 2], [1]]
tm.assert_frame_equal(result, expected)
expected = df.copy()
df.loc[np.array([True, False, True]), np.array([False, True])] = 5
expected.loc[[0, 2], [1]] = 5
tm.assert_frame_equal(df, expected)
def test_getitem_setitem_float_labels(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.random.randn(5, 5), index=index)
result = df.loc[1.5:4]
expected = df.reindex([1.5, 2, 3, 4])
tm.assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4:5]
expected = df.reindex([4, 5]) # reindex with int
tm.assert_frame_equal(result, expected, check_index_type=False)
assert len(result) == 2
result = df.loc[4:5]
expected = df.reindex([4.0, 5.0]) # reindex with float
tm.assert_frame_equal(result, expected)
assert len(result) == 2
# loc_float changes this to work properly
result = df.loc[1:2]
expected = df.iloc[0:2]
tm.assert_frame_equal(result, expected)
df.loc[1:2] = 0
result = df[1:2]
assert (result == 0).all().all()
# #2727
index = Index([1.0, 2.5, 3.5, 4.5, 5.0])
df = DataFrame(np.random.randn(5, 5), index=index)
# positional slicing only via iloc!
msg = (
"cannot do positional indexing on Float64Index with "
r"these indexers \[1.0\] of type float"
)
with pytest.raises(TypeError, match=msg):
df.iloc[1.0:5]
result = df.iloc[4:5]
expected = df.reindex([5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 1
cp = df.copy()
with pytest.raises(TypeError, match=_slice_msg):
cp.iloc[1.0:5] = 0
with pytest.raises(TypeError, match=msg):
result = cp.iloc[1.0:5] == 0
assert result.values.all()
assert (cp.iloc[0:1] == df.iloc[0:1]).values.all()
cp = df.copy()
cp.iloc[4:5] = 0
assert (cp.iloc[4:5] == 0).values.all()
assert (cp.iloc[0:4] == df.iloc[0:4]).values.all()
# float slicing
result = df.loc[1.0:5]
expected = df
tm.assert_frame_equal(result, expected)
assert len(result) == 5
result = df.loc[1.1:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4.51:5]
expected = df.reindex([5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 1
result = df.loc[1.0:5.0]
expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 5
cp = df.copy()
cp.loc[1.0:5.0] = 0
result = cp.loc[1.0:5.0]
assert (result == 0).values.all()
def test_setitem_single_column_mixed_datetime(self):
df = DataFrame(
np.random.randn(5, 3),
index=["a", "b", "c", "d", "e"],
columns=["foo", "bar", "baz"],
)
df["timestamp"] = Timestamp("20010102")
# check our dtypes
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 3 + [np.dtype("datetime64[ns]")],
index=["foo", "bar", "baz", "timestamp"],
)
tm.assert_series_equal(result, expected)
# GH#16674 iNaT is treated as an integer when given by the user
df.loc["b", "timestamp"] = iNaT
assert not isna(df.loc["b", "timestamp"])
assert df["timestamp"].dtype == np.object_
assert df.loc["b", "timestamp"] == iNaT
# allow this syntax (as of GH#3216)
df.loc["c", "timestamp"] = np.nan
assert isna(df.loc["c", "timestamp"])
# allow this syntax
df.loc["d", :] = np.nan
assert not isna(df.loc["c", :]).all()
def test_setitem_mixed_datetime(self):
# GH 9336
expected = DataFrame(
{
"a": [0, 0, 0, 0, 13, 14],
"b": [
datetime(2012, 1, 1),
1,
"x",
"y",
datetime(2013, 1, 1),
datetime(2014, 1, 1),
],
}
)
df = DataFrame(0, columns=list("ab"), index=range(6))
df["b"] = pd.NaT
df.loc[0, "b"] = datetime(2012, 1, 1)
df.loc[1, "b"] = 1
df.loc[[2, 3], "b"] = "x", "y"
A = np.array(
[
[13, np.datetime64("2013-01-01T00:00:00")],
[14, np.datetime64("2014-01-01T00:00:00")],
]
)
df.loc[[4, 5], ["a", "b"]] = A
tm.assert_frame_equal(df, expected)
def test_setitem_frame_float(self, float_frame):
piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]
float_frame.loc[float_frame.index[-2] :, ["A", "B"]] = piece.values
result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values
expected = piece.values
tm.assert_almost_equal(result, expected)
def test_setitem_frame_mixed(self, float_string_frame):
# GH 3216
# already aligned
f = float_string_frame.copy()
piece = DataFrame(
[[1.0, 2.0], [3.0, 4.0]], index=f.index[0:2], columns=["A", "B"]
)
key = (f.index[slice(None, 2)], ["A", "B"])
f.loc[key] = piece
tm.assert_almost_equal(f.loc[f.index[0:2], ["A", "B"]].values, piece.values)
def test_setitem_frame_mixed_rows_unaligned(self, float_string_frame):
# GH#3216 rows unaligned
f = float_string_frame.copy()
piece = DataFrame(
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]],
index=list(f.index[0:2]) + ["foo", "bar"],
columns=["A", "B"],
)
key = (f.index[slice(None, 2)], ["A", "B"])
f.loc[key] = piece
tm.assert_almost_equal(
f.loc[f.index[0:2:], ["A", "B"]].values, piece.values[0:2]
)
def test_setitem_frame_mixed_key_unaligned(self, float_string_frame):
# GH#3216 key is unaligned with values
f = float_string_frame.copy()
piece = f.loc[f.index[:2], ["A"]]
piece.index = f.index[-2:]
key = (f.index[slice(-2, None)], ["A", "B"])
f.loc[key] = piece
piece["B"] = np.nan
tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)
def test_setitem_frame_mixed_ndarray(self, float_string_frame):
# GH#3216 ndarray
f = float_string_frame.copy()
piece = float_string_frame.loc[f.index[:2], ["A", "B"]]
key = (f.index[slice(-2, None)], ["A", "B"])
f.loc[key] = piece.values
tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)
def test_setitem_frame_upcast(self):
# needs upcasting
df = DataFrame([[1, 2, "foo"], [3, 4, "bar"]], columns=["A", "B", "C"])
df2 = df.copy()
df2.loc[:, ["A", "B"]] = df.loc[:, ["A", "B"]] + 0.5
expected = df.reindex(columns=["A", "B"])
expected += 0.5
expected["C"] = df["C"]
tm.assert_frame_equal(df2, expected)
def test_setitem_frame_align(self, float_frame):
piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]
piece.index = float_frame.index[-2:]
piece.columns = ["A", "B"]
float_frame.loc[float_frame.index[-2:], ["A", "B"]] = piece
result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values
expected = piece.values
tm.assert_almost_equal(result, expected)
def test_getitem_setitem_ix_duplicates(self):
# #1201
df = DataFrame(np.random.randn(5, 3), index=["foo", "foo", "bar", "baz", "bar"])
result = df.loc["foo"]
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.loc["bar"]
expected = df.iloc[[2, 4]]
tm.assert_frame_equal(result, expected)
result = df.loc["baz"]
expected = df.iloc[3]
tm.assert_series_equal(result, expected)
def test_getitem_ix_boolean_duplicates_multiple(self):
# #1201
df = DataFrame(np.random.randn(5, 3), index=["foo", "foo", "bar", "baz", "bar"])
result = df.loc[["bar"]]
exp = df.iloc[[2, 4]]
tm.assert_frame_equal(result, exp)
result = df.loc[df[1] > 0]
exp = df[df[1] > 0]
tm.assert_frame_equal(result, exp)
result = df.loc[df[0] > 0]
exp = df[df[0] > 0]
tm.assert_frame_equal(result, exp)
@pytest.mark.parametrize("bool_value", [True, False])
def test_getitem_setitem_ix_bool_keyerror(self, bool_value):
# #2199
df = DataFrame({"a": [1, 2, 3]})
message = f"{bool_value}: boolean label can not be used without a boolean index"
with pytest.raises(KeyError, match=message):
df.loc[bool_value]
msg = "cannot use a single bool to index into setitem"
with pytest.raises(KeyError, match=msg):
df.loc[bool_value] = 0
# TODO: rename? remove?
def test_single_element_ix_dont_upcast(self, float_frame):
float_frame["E"] = 1
assert issubclass(float_frame["E"].dtype.type, (int, np.integer))
result = float_frame.loc[float_frame.index[5], "E"]
assert is_integer(result)
# GH 11617
df = DataFrame({"a": [1.23]})
df["b"] = 666
result = df.loc[0, "b"]
assert is_integer(result)
expected = Series([666], [0], name="b")
result = df.loc[[0], "b"]
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
# Categorical Variables
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
### Read the data
data = pd.read_csv('./Data/melb_data.csv')
### Separate target from predictors
y = data.Price
X = data.drop(['Price'], axis=1)
### Divide data into training and validation subsets
X_train_full, X_valid_full, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0)
### Drop columns with missing values (simplest approach)
cols_with_missing = [col for col in X_train_full.columns if X_train_full[col].isnull().any()]
X_train_full.drop(cols_with_missing, axis=1, inplace=True)
X_valid_full.drop(cols_with_missing, axis=1, inplace=True)
### "Cardinality" means the number of unique values in a column
### Select categorical columns with relatively low cardinality (convenient but arbitrary)
low_cardinality_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == "object"]
### Select numerical columns
numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']]
### Keep selected columns only
my_cols = low_cardinality_cols + numerical_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy()
### Get list of categorical variables
s = (X_train.dtypes == 'object')
object_cols = list(s[s].index)
print("Categorical variables:")
print(object_cols)
### Function for comparing different approaches
def score_dataset(X_train, X_valid, y_train, y_valid):
model = RandomForestRegressor(n_estimators=100, random_state=0)
model.fit(X_train, y_train)
preds = model.predict(X_valid)
return mean_absolute_error(y_valid, preds)
drop_X_train = X_train.select_dtypes(exclude=['object'])
drop_X_valid = X_valid.select_dtypes(exclude=['object'])
print("MAE from Approach 1 (Drop categorical variables):")
print(score_dataset(drop_X_train, drop_X_valid, y_train, y_valid))
### Make copy to avoid changing original data
label_X_train = X_train.copy()
label_X_valid = X_valid.copy()
### Apply label encoder to each column with categorical data
label_encoder = LabelEncoder()
for col in object_cols:
label_X_train[col] = label_encoder.fit_transform(X_train[col])
label_X_valid[col] = label_encoder.transform(X_valid[col])
print("MAE from Approach 2 (Label Encoding):")
print(score_dataset(label_X_train, label_X_valid, y_train, y_valid))
### Apply one-hot encoder to each column with categorical data
OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(X_train[object_cols]))
OH_cols_valid = pd.DataFrame(OH_encoder.transform(X_valid[object_cols]))
### One-hot encoding removed index; put it back
OH_cols_train.index = X_train.index
OH_cols_valid.index = X_valid.index
### Remove categorical columns (will replace with one-hot encoding)
num_X_train = X_train.drop(object_cols, axis=1)
num_X_valid = X_valid.drop(object_cols, axis=1)
### Add one-hot encoded columns to numerical features
OH_X_train = | pd.concat([num_X_train, OH_cols_train], axis=1) | pandas.concat |
import time
import logging
from TwitterAPI import TwitterAPI
from twython import Twython
from twython import TwythonError, TwythonRateLimitError, TwythonAuthError
import pandas as pd
from datetime import datetime, timedelta
from spikexplore.NodeInfo import NodeInfo
from spikexplore.graph import add_node_attributes, add_edges_attributes
logger = logging.getLogger(__name__)
class TwitterCredentials:
def __init__(self, app_key, access_token, consumer_key=None, consumer_secret=None):
self.app_key = app_key
self.access_token = access_token
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
class TweetsGetterV1:
def __init__(self, credentials, config):
# Instantiate an object
self.app_key = credentials.app_key
self.access_token = credentials.access_token
self.config = config
self.twitter_handle = Twython(self.app_key, access_token=self.access_token)
pass
def _filter_old_tweets(self, tweets):
max_day_old = self.config.max_day_old
if not max_day_old:
return tweets
days_limit = datetime.now() - timedelta(days=max_day_old)
tweets_filt = filter(lambda t: datetime.strptime(t['created_at'], '%a %b %d %H:%M:%S +0000 %Y') >= days_limit,
tweets)
return list(tweets_filt)
def get_user_tweets(self, username):
# Collect tweets from a username
count = self.config.max_tweets_per_user
# Test if ok
try:
user_tweets_raw = self.twitter_handle.get_user_timeline(screen_name=username,
count=count, include_rts=True,
tweet_mode='extended', exclude_replies=False)
# remove old tweets
user_tweets_filt = self._filter_old_tweets(user_tweets_raw)
# make a dictionary
user_tweets = {x['id']: x for x in user_tweets_filt}
tweets_metadata = \
map(lambda x: (x[0], {'user': x[1]['user']['screen_name'],
'name': x[1]['user']['name'],
'user_details': x[1]['user']['description'],
'mentions': list(
map(lambda y: y['screen_name'], x[1]['entities']['user_mentions'])),
'hashtags': list(map(lambda y: y['text'], x[1]['entities']['hashtags'])),
'retweet_count': x[1]['retweet_count'],
'favorite_count': x[1]['favorite_count'], 'created_at': x[1]['created_at'],
'account_creation': x[1]['user']['created_at'],
'account_followers': x[1]['user']['followers_count'],
'account_following': x[1]['user']['friends_count'],
'account_statuses': x[1]['user']['statuses_count'],
'account_favourites': x[1]['user']['favourites_count'],
'account_verified': x[1]['user']['verified'],
'account_default_profile': x[1]['user']['default_profile'],
'account_default_profile_image': x[1]['user']['default_profile_image']}),
user_tweets.items())
return user_tweets, dict(tweets_metadata)
except TwythonAuthError as e_auth:
if e_auth.error_code == 401:
logger.warning('Unauthorized access to user {}. Skipping.'.format(username))
return {}, {}
else:
logger.error('Cannot access to twitter API, authentification error. {}'.format(e_auth.error_code))
raise
except TwythonRateLimitError as e_lim:
logger.warning('API rate limit reached')
logger.warning(e_lim)
remainder = float(self.twitter_handle.get_lastfunction_header(header='x-rate-limit-reset')) - time.time()
logger.warning('Retry after {} seconds.'.format(remainder))
time.sleep(remainder + 1)
del self.twitter_handle
self.twitter_handle = Twython(self.app_key, access_token=self.access_token) # seems you need this
return {}, {} # best way to handle it ?
except TwythonError as e:
logger.error('Twitter API returned error {} for user {}.'.format(e.error_code, username))
return {}, {}
def reshape_node_data(self, node_df):
# user name user_details mentions hashtags retweet_count favorite_count
# created_at account_creation account_followers account_following account_statuses account_favourites
# account_verified account_default_profile account_default_profile_image spikyball_hop
node_df = node_df[
['user', 'name', 'user_details', 'spikyball_hop', 'account_creation', 'account_default_profile',
'account_default_profile_image', 'account_favourites', 'account_followers', 'account_following',
'account_statuses', 'account_verified']]
node_df = node_df.reset_index().groupby('user').max().rename(columns={'index': 'max_tweet_id'})
return node_df
class TweetsGetterV2:
def __init__(self, credentials, config):
self.twitter_handle = TwitterAPI(credentials.consumer_key, credentials.consumer_secret,
api_version='2', auth_type='oAuth2')
self.config = config
self.start_time = None
if config.max_day_old:
days_limit = datetime.now() - timedelta(days=config.max_day_old)
# date format: 2010-11-06T00:00:00Z
self.start_time = days_limit.strftime('%Y-%m-%dT%H:%M:%SZ')
self.user_cache = {}
def _safe_twitter_request(self, request_str, params):
res = self.twitter_handle.request(request_str, params)
while res.status_code == 429: # rate limit reached
logger.warning('API rate limit reached')
remainder = float(res.headers['x-rate-limit-reset']) - time.time()
logger.warning('Retry after {} seconds.'.format(remainder))
time.sleep(remainder + 1)
res = self.twitter_handle.request(request_str, params)
if res.status_code != 200:
logger.warning('API returned with code {}'.format(res.status_code))
return res
def _get_user_info(self, username):
if username not in self.user_cache:
params = {'user.fields': 'created_at,verified,description,public_metrics,protected,profile_image_url'}
res = dict(self._safe_twitter_request('users/by/username/:{}'.format(username), params).json())
if 'errors' in res:
self.user_cache[username] = None
for e in res['errors']:
logger.info(e['detail'])
else:
self.user_cache[username] = res['data']
return self.user_cache[username]
def _fill_user_info(self, includes):
if 'users' not in includes:
return
for u in includes['users']:
if u['username'] not in self.user_cache:
self.user_cache[u['username']] = u
def _get_user_tweets(self, username, num_tweets, next_token):
assert(num_tweets <= 100 and num_tweets > 0)
params = {'max_results': num_tweets, 'expansions': 'author_id,entities.mentions.username,referenced_tweets.id',
'tweet.fields': 'entities,created_at,public_metrics,lang,referenced_tweets',
'user.fields': 'verified,description,created_at,public_metrics,protected,profile_image_url'}
if self.start_time:
params['start_time'] = self.start_time
if next_token:
params['pagination_token'] = next_token
user_info = self._get_user_info(username)
if not user_info: # not found
return {}, {}, None
if user_info['protected']:
logger.info('Skipping user {} - protected account'.format(username))
return {}, {}, None
tweets_raw = dict(self._safe_twitter_request('users/:{}/tweets'.format(user_info['id']), params).json())
if 'errors' in tweets_raw:
err_details = set([e['detail'] for e in tweets_raw['errors']])
for e in err_details:
logger.info(e)
if 'data' not in tweets_raw:
logger.info('Empty results for {}'.format(username))
return {}, {}, None
user_tweets = {int(x['id']): x for x in tweets_raw['data']}
referenced_tweets = {x['id']: x for x in tweets_raw['includes'].get('tweets', {})}
# make the tweets dict similar to the one retrieved using APIv1
for k in user_tweets.keys():
user_tweets[k]['id_str'] = user_tweets[k]['id']
user_tweets[k]['id'] = k # preserve 'id' as int (used as index)
user_tweets[k]['full_text'] = user_tweets[k].pop('text')
user_tweets[k]['user'] = {'id': int(user_info['id']), 'id_str': user_info['id'],
'screen_name': user_info['username'], 'name': user_info['name'],
'description': user_info['description'], 'verified': user_info['verified'],
'protected': user_info['protected'],
'created_at': user_info['created_at'],
'followers_count': user_info['public_metrics']['followers_count'],
'friends_count': user_info['public_metrics']['following_count'],
'statuses_count': user_info['public_metrics']['tweet_count']}
# handle retweet info
if 'referenced_tweets' in user_tweets[k]:
ref = list(filter(lambda x: x['type'] == 'quoted' or x['type'] == 'retweeted',
user_tweets[k]['referenced_tweets']))
if ref:
ref_type = ref[0]['type']
ref_txt = ''
if ref_type == 'quoted':
ref_txt = user_tweets[k]['full_text'] + " "
ref_txt += referenced_tweets[ref[0]['id']]['text']
user_tweets[k]['retweeted_status'] = {'full_text': ref_txt}
tweets_metadata = \
dict(map(lambda x: (x[0], {'user': user_info['username'],
'name': user_info['name'],
'user_details': user_info['description'],
'mentions': list(
map(lambda y: y['username'], x[1].get('entities', {}).get('mentions', {}))),
'hashtags': list(
map(lambda y: y['tag'], x[1].get('entities', {}).get('hashtags', {}))),
'retweet_count': x[1]['public_metrics']['retweet_count'],
'favorite_count': x[1]['public_metrics']['like_count'],
'created_at': x[1]['created_at'],
'account_creation': user_info['created_at'],
'account_followers': user_info['public_metrics']['followers_count'],
'account_following': user_info['public_metrics']['following_count'],
'account_statuses': user_info['public_metrics']['tweet_count'],
'account_verified': user_info['verified']}),
user_tweets.items()))
if 'includes' in tweets_raw:
self._fill_user_info(tweets_raw['includes'])
return user_tweets, tweets_metadata, tweets_raw['meta'].get('next_token', None)
def get_user_tweets(self, username):
remaining_number_of_tweets = self.config.max_tweets_per_user
next_token = None
user_tweets_acc = {}
tweets_metadata_acc = {}
while remaining_number_of_tweets > 0:
number_of_tweets = 100 if remaining_number_of_tweets > 100 else remaining_number_of_tweets
remaining_number_of_tweets -= number_of_tweets
user_tweets, tweets_metadata, next_token = self._get_user_tweets(username, number_of_tweets, next_token)
user_tweets_acc.update(user_tweets)
tweets_metadata_acc.update(tweets_metadata)
if not next_token:
break
return user_tweets_acc, tweets_metadata_acc
def reshape_node_data(self, node_df):
node_df = node_df[
['user', 'name', 'user_details', 'spikyball_hop', 'account_creation',
'account_followers', 'account_following',
'account_statuses', 'account_verified']]
node_df = node_df.reset_index().groupby('user').max().rename(columns={'index': 'max_tweet_id'})
return node_df
class TwitterNetwork:
class TwitterNodeInfo(NodeInfo):
def __init__(self, user_hashtags=None, user_tweets=None, tweets_meta=pd.DataFrame()):
self.user_hashtags = user_hashtags if user_hashtags else {}
self.user_tweets = user_tweets if user_tweets else {}
self.tweets_meta = tweets_meta
def update(self, new_info):
self.user_hashtags.update(new_info.user_hashtags)
self.user_tweets.update(new_info.user_tweets)
def get_nodes(self):
return self.tweets_meta
def __init__(self, credentials, config):
if config.api_version == 1:
self.tweets_getter = TweetsGetterV1(credentials, config)
elif config.api_version == 2:
self.tweets_getter = TweetsGetterV2(credentials, config)
else:
raise ValueError("Invalid api version")
self.config = config
def create_node_info(self):
return self.TwitterNodeInfo()
def get_neighbors(self, user):
if not isinstance(user, str):
return self.TwitterNodeInfo(), pd.DataFrame()
tweets_dic, tweets_meta = self.tweets_getter.get_user_tweets(user)
edges_df, node_info = self.edges_nodes_from_user(tweets_meta, tweets_dic)
# replace user and mentions by source and target
if not edges_df.empty:
edges_df.index.names = ['source', 'target']
edges_df.reset_index(level=['source', 'target'], inplace=True)
return node_info, edges_df
def filter(self, node_info, edges_df):
# filter edges according to node properties
# filter according to edges properties
edges_df = self.filter_edges(edges_df)
return node_info, edges_df
def filter_edges(self, edges_df):
# filter edges according to their properties
if edges_df.empty:
return edges_df
return edges_df[edges_df['weight'] >= self.config.min_mentions]
def neighbors_list(self, edges_df):
if edges_df.empty:
return edges_df
users_connected = edges_df['target'].tolist()
return users_connected
def neighbors_with_weights(self, edges_df):
user_list = self.neighbors_list(edges_df)
return dict.fromkeys(user_list, 1)
###############################################################
# Functions for extracting tweet info from the twitter API
###############################################################
def edges_nodes_from_user(self, tweets_meta, tweets_dic):
# Make an edge and node property dataframes
edges_df = self.get_edges(tweets_meta)
user_info = self.get_nodes_properties(tweets_meta, tweets_dic)
return edges_df, user_info
def get_edges(self, tweets_meta):
if not tweets_meta:
return pd.DataFrame()
# Create the user -> mention table with their properties fom the list of tweets of a user
meta_df = pd.DataFrame.from_dict(tweets_meta, orient='index').explode('mentions').dropna()
# Some bots to be removed from the collection
users_to_remove = self.config.users_to_remove
filtered_meta_df = meta_df[~meta_df['mentions'].isin(users_to_remove) &
~meta_df['mentions'].isin(meta_df['user'])]
# group by mentions and keep list of tweets for each mention
tmp = filtered_meta_df.groupby(['user', 'mentions']).apply(lambda x: (x.index.tolist(), len(x.index)))
if tmp.empty:
return tmp
edge_df = pd.DataFrame(tmp.tolist(), index=tmp.index) \
.rename(columns={0: 'tweet_id', 1: 'weight'})
return edge_df
def get_nodes_properties(self, tweets_meta, tweets_dic):
if not tweets_meta:
return self.TwitterNodeInfo({}, {}, | pd.DataFrame() | pandas.DataFrame |
# MLP
import csv
from itertools import islice
import random
import matplotlib.pyplot as plt
import numpy as np
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import KFold, train_test_split
import pandas as pd
from sklearn.utils import shuffle
import tensorflow as tf
def bit2attr(bitstr) -> list:
attr_vec = list()
for i in range(len(bitstr)):
attr_vec.append(int(bitstr[i]))
return attr_vec
def mean_relative_error(y_pred, y_test):
assert len(y_pred) == len(y_test)
mre = 0.0
for i in range(len(y_pred)):
mre = mre + abs((y_pred[i] - y_test[i]) / y_test[i])
mre = mre * 100/ len(y_pred)
return mre
Large_MRE_points = pd.DataFrame()
Large_MRE_X = []
Large_MRE_y_test = []
Large_MRE_y_pred = []
Large_MRE = []
'''
1) 数据预处理
'''
# filepath = 'data/fp/sjn/R+B+Cmorgan_fp1202.csv'
filepath = 'data/database/22-01-29-descriptor-train.csv'
data = pd.read_csv(filepath, encoding='gb18030')
print(data.shape)
data = data.dropna()
print(data.shape)
data = shuffle(data)
data_x_df = data.drop(['label'], axis=1)
data_y_df = data[['label']]
# 归一化
min_max_scaler_X = MinMaxScaler()
min_max_scaler_X.fit(data_x_df)
x_trans1 = min_max_scaler_X.transform(data_x_df)
min_max_scaler_y = MinMaxScaler()
min_max_scaler_y.fit(data_y_df)
y_trans1 = min_max_scaler_y.transform(data_y_df)
test_filepath = "data/database/22-01-29-descriptor-test-level-1.csv"
test_data = | pd.read_csv(test_filepath, encoding='gb18030') | pandas.read_csv |
import pandas as pd
from pandas.api.types import is_numeric_dtype, is_categorical, infer_dtype
from functools import reduce
import warnings
import weakref
from itertools import combinations
from scipy.stats import chi2_contingency
import numpy as np
from collections import Counter
@pd.api.extensions.register_dataframe_accessor("cats")
class CatsAccessor:
"""A class of useful categorical stuff to add to pandas
"""
def __init__(self, pandas_obj):
self._finalizer = weakref.finalize(self, self._cleanup)
self._validate(pandas_obj)
self._obj = pandas_obj
self._categorical_columns = None
def _cleanup(self):
del self._obj
def remove(self):
self._finalizer()
@staticmethod
def _validate(obj):
# verify this is a DataFrame
if not isinstance(obj, pd.DataFrame):
raise AttributeError("Must be a pandas DataFrame")
def _get_categorical_columns(self):
result = [col for col in self._obj.columns if | infer_dtype(self._obj[col]) | pandas.api.types.infer_dtype |
from MP import MpFunctions
import requests
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import pandas as pd
import plotly.graph_objs as go
import datetime as dt
import numpy as np
import warnings
warnings.filterwarnings('ignore')
app = dash.Dash(__name__)
def get_ticksize(data, freq=30):
# data = dflive30
numlen = int(len(data) / 2)
# sample size for calculating ticksize = 50% of most recent data
tztail = data.tail(numlen).copy()
tztail['tz'] = tztail.Close.rolling(freq).std() # std. dev of 30 period rolling
tztail = tztail.dropna()
ticksize = np.ceil(tztail['tz'].mean() * 0.25) # 1/4 th of mean std. dev is our ticksize
if ticksize < 0.2:
ticksize = 0.2 # minimum ticksize limit
return int(ticksize)
def get_data(url):
"""
:param url: binance url
:return: ohlcv dataframe
"""
response = requests.get(url)
data = response.json()
df = pd.DataFrame(data)
df = df.apply(pd.to_numeric)
df[0] = pd.to_datetime(df[0], unit='ms')
df = df[[0, 1, 2, 3, 4, 5]]
df.columns = ['datetime', 'Open', 'High', 'Low', 'Close', 'volume']
df = df.set_index('datetime', inplace=False, drop=False)
return df
url_30m = "https://www.binance.com/api/v1/klines?symbol=BTCBUSD&interval=30m" # 10 days history 30 min ohlcv
df = get_data(url_30m)
df.to_csv('btcusd30m.csv', index=False)
# params
context_days = len([group[1] for group in df.groupby(df.index.date)]) # Number of days used for context
freq = 2 # for 1 min bar use 30 min frequency for each TPO, here we fetch default 30 min bars server
avglen = context_days - 2 # num days to calculate average values
mode = 'tpo' # for volume --> 'vol'
trading_hr = 24 # Default for BTC USD or Forex
day_back = 0 # -1 While testing sometimes maybe you don't want current days data then use -1
# ticksz = 28 # If you want to use manual tick size then uncomment this. Really small number means convoluted alphabets (TPO)
ticksz = (get_ticksize(df.copy(), freq=freq))*2 # Algorithm will calculate the optimal tick size based on volatility
textsize = 10
if day_back != 0:
symbol = 'Historical Mode'
else:
symbol = 'BTC-USD Live'
dfnflist = [group[1] for group in df.groupby(df.index.date)] #
dates = []
for d in range(0, len(dfnflist)):
dates.append(dfnflist[d].index[0])
date_time_close = dt.datetime.today().strftime('%Y-%m-%d') + ' ' + '23:59:59'
append_dt = pd.Timestamp(date_time_close)
dates.append(append_dt)
date_mark = {str(h): {'label': str(h), 'style': {'color': 'blue', 'fontsize': '4',
'text-orientation': 'upright'}} for h in range(0, len(dates))}
mp = MpFunctions(data=df.copy(), freq=freq, style=mode, avglen=avglen, ticksize=ticksz, session_hr=trading_hr)
mplist = mp.get_context()
app.layout = html.Div(
html.Div([
dcc.Location(id='url', refresh=False),
dcc.Link('Twitter', href='https://twitter.com/beinghorizontal'),
html.Br(),
dcc.Link('python source code', href='http://www.github.com/beinghorizontal'),
html.H4('@beinghorizontal'),
dcc.Graph(id='beinghorizontal'),
dcc.Interval(
id='interval-component',
interval=5 * 1000, # Reduce the time if you want frequent updates 5000 = 5 sec
n_intervals=0
),
html.P([
html.Label("Time Period"),
dcc.RangeSlider(id='slider',
pushable=1,
marks=date_mark,
min=0,
max=len(dates),
step=None,
value=[len(dates) - 2, len(dates) - 1])
], style={'width': '80%',
'fontSize': '14px',
'padding-left': '100px',
'display': 'inline-block'})
])
)
@app.callback(Output(component_id='beinghorizontal', component_property='figure'),
[Input('interval-component', 'n_intervals'),
Input('slider', 'value')
])
def update_graph(n, value):
listmp_hist = mplist[0]
distribution_hist = mplist[1]
url_1m = "https://www.binance.com/api/v1/klines?symbol=BTCBUSD&interval=1m"
df_live1 = get_data(url_1m) # this line fetches new data for current day
df_live1 = df_live1.dropna()
dflive30 = df_live1.resample('30min').agg({'datetime': 'last', 'Open': 'first', 'High': 'max', 'Low': 'min',
'Close': 'last', 'volume': 'sum'})
df2 = pd.concat([df, dflive30])
df2 = df2.drop_duplicates('datetime')
ticksz_live = (get_ticksize(dflive30.copy(), freq=2))
mplive = MpFunctions(data=dflive30.copy(), freq=freq, style=mode, avglen=avglen, ticksize=ticksz_live,
session_hr=trading_hr)
mplist_live = mplive.get_context()
listmp_live = mplist_live[0] # it will be in list format so take [0] slice for current day MP data frame
df_distribution_live = mplist_live[1]
df_distribution_concat = | pd.concat([distribution_hist, df_distribution_live], axis=0) | pandas.concat |
import numpy as np
import pandas as pd
from itertools import islice
import multiprocessing
from multiprocessing.pool import ThreadPool, Pool
N_CPUS = multiprocessing.cpu_count()
def batch_generator(iterable, n=1):
if hasattr(iterable, '__len__'):
# https://stackoverflow.com/questions/8290397/how-to-split-an-iterable-in-constant-size-chunks
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
elif hasattr(iterable, '__next__'):
# https://stackoverflow.com/questions/1915170/split-a-generator-iterable-every-n-items-in-python-splitevery
i = iter(iterable)
piece = list(islice(i, n))
while piece:
yield piece
piece = list(islice(i, n))
else:
raise ValueError('Iterable is not iterable?')
def map_batches_multiproc(func, iterable, chunksize, multiproc_mode='threads',
n_threads=None, threads_per_cpu=1.0):
if n_threads is None:
n_threads = int(threads_per_cpu * N_CPUS)
if hasattr(iterable, '__len__') and len(iterable) <= chunksize:
return [func(iterable)]
with pool_type(multiproc_mode)(n_threads) as pool:
batches = batch_generator(iterable, n=chunksize)
return list(pool.imap(func, batches))
def pool_type(parallelism_type):
if 'process' in parallelism_type.lower():
return Pool
elif 'thread' in parallelism_type.lower():
return ThreadPool
else:
raise ValueError('Unsupported value for "parallelism_type"')
def parallelize_dataframe(df, func, n_partitions=N_CPUS, parallelism_type='process'):
# with Pool(n_partitions) as pool:
# return pd.concat(pool.map(func, np.array_split(df, n_partitions)))
df_split = np.array_split(df, n_partitions)
with pool_type(parallelism_type)(n_partitions) as pool:
res = pool.map(func, df_split)
df = | pd.concat(res, sort=False) | pandas.concat |
#!/usr/bin/env python3
import sys
import re
import os
import collections
import pickle
import pandas as pd
from pandas import ExcelWriter
from pandas import ExcelFile
def unpack_layer_tops_product(layer_top):
product = 1
for i in layer_top:
product *= int(i)
return product
def calculate_mlu_ops_byte(layer_name, net_shape_dict, debug=True):
flops = 0
mem_bytes = 0
if debug:
print(net_shape_dict)
if layer_name in net_shape_dict:
v = net_shape_dict[layer_name]
if v['type'] == 'Convolution':
# ops = out_h*out_w*(2*in_c*k_s*k_s)*out_c*out_n
# bytes = (k_s*k_s*in_c*out_c+out_h*out_w*out_c)*out_n*2
in_n, in_c, in_h, in_w = v['bottoms'][0]
out_n, out_c, out_h, out_w = v['tops'][0]
k_s = int(v['kernel_size'])
in_n, in_c, in_h, in_w = int(in_n), int(in_c), int(in_h), int(in_w)
out_n, out_c, out_h, out_w = int(out_n), int(out_c), int(out_h), int(out_w)
flops = out_h * out_w * (2 * in_c * k_s * k_s) * out_c * out_n
mem_bytes = (k_s * k_s * in_c * out_c + out_h * out_w * out_c) * out_n * 4
if debug:
print(flops, mem_bytes)
elif v['type'] == 'Pooling':
in_n, in_c, in_h, in_w = v['bottoms'][0]
out_n, out_c, out_h, out_w = v['tops'][0]
k_s = int(v['kernel_size'])
in_n, in_c, in_h, in_w = int(in_n), int(in_c), int(in_h), int(in_w)
out_n, out_c, out_h, out_w = int(out_n), int(out_c), int(out_h), int(out_w)
if int(v['kernel_size']) == 0:
# global pooling
# ops = in_c*in_h*in_w*in_n or in_c*(in_h*in_w+1)*in_n
# bytes = (in_c*in_h*in_w+out_c*out_h*out_w)*out_n*2
flops = in_c * (in_h * in_w + 1) * in_n
mem_bytes = (in_c * in_h * in_w + out_c * out_h * out_w) * out_n * 4
else:
# common pooling
# ops = out_c*out_h*out_w*k_s*k_s*out_n
# bytes = out_c*out_h*out_w*(k_s*k_s+1)*out_n*2
flops = out_c * out_h * out_w * k_s * k_s * out_n
mem_bytes = out_c * out_h * out_w * (k_s * k_s + 1) * out_n * 4
if debug:
print(flops, mem_bytes)
elif v['type'] == 'ReLU':
# ops=2*N*C*H*W
# bytes=2*N*C*H*W*2
out_n, out_c, out_h, out_w = v['tops'][0]
out_n, out_c, out_h, out_w = int(out_n), int(out_c), int(out_h), int(out_w)
flops = out_n * out_c * out_h * out_w
mem_bytes = 2 * out_n * out_c * out_w * out_w * 4
if debug:
print(flops, mem_bytes)
elif v['type'] == 'Scale':
# ops=2*N*C*H*W
# bytes=3*N*C*H*W*2
#out_n, out_c, out_h, out_w = v['tops'][0]
#out_n, out_c, out_h, out_w = int(out_n), int(out_c), int(out_h), int(out_w)
#flops = 2 * out_n * out_c * out_h * out_w
#mem_bytes = 3 * out_n * out_c * out_w * out_w * 2
product = unpack_layer_tops_product(v['tops'][0])
flops = 2 * product
mem_bytes = 3 * product * 4
if debug:
print(flops, mem_bytes)
elif v['type'] == 'Softmax':
# ops=4*N*C*H*W
# bytes=2*N*C*H*W*2
# Note: sometimes this layer only has two dimensions:
product = unpack_layer_tops_product(v['tops'][0])
flops = 4 * product
mem_bytes = 2 * product * 4
if debug:
print(flops, mem_bytes)
elif v['type'] == 'BatchNorm':
# ops=8*N*C*H*W
# bytes=4*N*C*H*W*2
#out_n, out_c, out_h, out_w = v['tops'][0]
#out_n, out_c, out_h, out_w = int(out_n), int(out_c), int(out_h), int(out_w)
#flops = 8 * out_n * out_c * out_h * out_w
#mem_bytes = 4 * out_n * out_c * out_w * out_w * 2
product = unpack_layer_tops_product(v['tops'][0])
flops = 8 * product
mem_bytes = 4 * product * 4
if debug:
print(flops, mem_bytes)
elif v['type'] == 'Dropout':
# ops=3*N*C*H*W
# bytes=3*N*C*H*W*2
out_n, out_c, out_h, out_w = v['tops'][0]
out_n, out_c, out_h, out_w = int(out_n), int(out_c), int(out_h), int(out_w)
flops = 3 * out_n * out_c * out_h * out_w
mem_bytes = 3 * out_n * out_c * out_w * out_w * 4
if debug:
print(flops, mem_bytes)
elif v['type'] == 'Concat':
# ops=0
# bytes=2*out_n*out*c*out*h*out*w*2
out_n, out_c, out_h, out_w = v['tops'][0]
out_n, out_c, out_h, out_w = int(out_n), int(out_c), int(out_h), int(out_w)
flops = 0
mem_bytes = 2 * out_n * out_c * out_w * out_w * 4
if debug:
print(flops, mem_bytes)
elif v['type'] == 'Eltwise':
# ops=2*N*C*H*W
# bytes=3*N*C*H*W*2
#out_n, out_c, out_h, out_w = v['tops'][0]
#out_n, out_c, out_h, out_w = int(out_n), int(out_c), int(out_h), int(out_w)
#flops = 2 * out_n * out_c * out_h * out_w
#mem_bytes = 3 * out_n * out_c * out_w * out_w * 2
product = unpack_layer_tops_product(v['tops'][0])
flops = 2 * product
mem_bytes = 3 * product * 4
if debug:
print(flops, mem_bytes)
elif v['type'] == 'InnerProduct':
# ops = out_h*outw*(2*in_c*k_s*k_s)*out_c*out_n
# bytes = (k_s*k_s*in_c*out_c+out_h*out_w*out_c)*in_n*2
if debug:
print('bottoms: ', v['bottoms'])
print('tops: ', v['tops'])
in_n, in_c, in_h, in_w = v['bottoms'][0]
#out_n, out_c, out_h, out_w = v['tops'][0]
if len(v['tops'][0]) == 4:
out_n, out_c, out_h, out_w = v['tops'][0]
elif len(v['tops'][0]) == 3:
out_n, out_c, out_h = v['tops'][0]
out_w = 1
elif len(v['tops'][0]) == 2:
out_n, out_c = v['tops'][0]
out_h = 1
out_w = 1
elif len(v['tops'][0]) == 1:
out_n = v['tops'][0]
out_c = 1
out_h = 1
out_w = 1
if 'kernel_size' in v:
k_s = int(v['kernel_size'])
else:
k_s = 1
in_n, in_c, in_h, in_w = int(in_n), int(in_c), int(in_h), int(in_w)
flops = out_h * out_w * (2 * in_c * k_s * k_s) * out_c * out_n
mem_bytes = (k_s * k_s * in_c * out_c + out_h * out_w * out_c) * in_n * 4
if debug:
print(flops, mem_bytes)
elif v['type'] == 'Normalize':
# ops=8*N*C*H*W
# bytes=2*N*C*H*W*2
out_n, out_c, out_h, out_w = v['tops'][0]
out_n, out_c, out_h, out_w = int(out_n), int(out_c), int(out_h), int(out_w)
flops = 8 * out_n * out_c * out_h * out_w
mem_bytes = 2 * out_n * out_c * out_w * out_w * 4
if debug:
print(flops, mem_bytes)
elif v['type'] == 'SsdDetection':
# ops=4*N*C*H*W
# bytes=(in_0+in_1+...+in_n+out)*2
out_n, out_c, out_h, out_w = v['tops'][0]
out_n, out_c, out_h, out_w = int(out_n), int(out_c), int(out_h), int(out_w)
flops = 4 * out_n * out_c * out_h * out_w
mem_bytes = 0
for n, c, h, w in v['bottoms']:
mem_bytes += n * c * h * w * 2
mem_bytes += out_n * out_c * out_h * out_w * 4
if debug:
print(flops, mem_bytes)
return flops, mem_bytes
def extract_into_excel(layer_info_file='layer_info.txt', layer_shape_file='layer_shape.txt', debug=False):
layer_info_reader = open(layer_info_file, 'rb')
layer_info = pickle.load(layer_info_reader)
net_name = layer_info['name']
print(net_name)
layer_shape_reader = open(layer_shape_file, 'rb')
layer_shape = pickle.load(layer_shape_reader)
layer_name_time_reader = open(str(net_name) + '-each_layer_time.txt', 'rb')
layer_type_time_reader = open(str(net_name) + '-layer_type_time.txt', 'rb')
layer_name_record_dict = pickle.load(layer_name_time_reader)
layer_type_record_dict = pickle.load(layer_type_time_reader)
flops_membwd_reader = open(str(net_name) + '-flops_membwd.txt', 'rb')
flops_membwd_dict = pickle.load(flops_membwd_reader)
layer_name_list = []
layer_name_type_list = []
layer_name_time_list = []
layer_type_list = []
layer_type_time_list = []
flops_membwd_type_list = []
flops_membwd_values_list = []
max_bottoms_length = max_tops_length = 0
# multiple input and output shape info
layer_shape_list_dict = {}
kernel_size_list = []
stride_list = []
pad_list = []
try:
for k, v in list(layer_shape.items()):
if v['type'] == 'Input' or v['type'] == 'Accuracy':
del layer_shape[k]
continue
max_bottoms_length = max(max_bottoms_length, len(v['bottoms']))
max_tops_length = max(max_tops_length, len(v['tops']))
# determine the input and output tuples length
for i in range(0, max_bottoms_length):
layer_shape_list_dict['Input' + str(i) + ' N'] = []
layer_shape_list_dict['Input' + str(i) + ' C'] = []
layer_shape_list_dict['Input' + str(i) + ' H'] = []
layer_shape_list_dict['Input' + str(i) + ' W'] = []
for i in range(0, max_tops_length):
layer_shape_list_dict['Output' + str(i) + ' N'] = []
layer_shape_list_dict['Output' + str(i) + ' C'] = []
layer_shape_list_dict['Output' + str(i) + ' H'] = []
layer_shape_list_dict['Output' + str(i) + ' W'] = []
for k, v in layer_name_record_dict.items():
layer_name_list.append(str(k))
layer_name_type_list.append(str(layer_info[str(k)]))
layer_name_time_list.append(float(v))
# 'kernel_size', 'stride', 'pad'
if layer_info[str(k)] == 'Convolution' or layer_info[str(k)] == 'Pooling':
kernel_size_list.append(int(layer_shape[str(k)]['kernel_size']))
stride_list.append(int(layer_shape[str(k)]['stride']))
pad_list.append(int(layer_shape[str(k)]['pad']))
else:
kernel_size_list.append(float('nan'))
stride_list.append(float('nan'))
pad_list.append(float('nan'))
# Input and output shape
for i in range(0, len(layer_shape[str(k)]['bottoms'])):
if debug:
print('max tops len:', max_tops_length, 'max bottoms len:', max_bottoms_length)
print('layer:', str(k), 'type:', layer_shape[str(k)]['type'], 'bottoms:', layer_shape[str(k)]['bottoms'][i])
if len(layer_shape[str(k)]['bottoms'][i]) == 1:
layer_shape_list_dict['Input' + str(i) + ' N'].append(int(layer_shape[str(k)]['bottoms'][i][0]))
layer_shape_list_dict['Input' + str(i) + ' C'].append(float('nan'))
layer_shape_list_dict['Input' + str(i) + ' H'].append(float('nan'))
layer_shape_list_dict['Input' + str(i) + ' W'].append(float('nan'))
elif len(layer_shape[str(k)]['bottoms'][i]) == 2:
layer_shape_list_dict['Input' + str(i) + ' N'].append(int(layer_shape[str(k)]['bottoms'][i][0]))
layer_shape_list_dict['Input' + str(i) + ' C'].append(int(layer_shape[str(k)]['bottoms'][i][1]))
layer_shape_list_dict['Input' + str(i) + ' H'].append(float('nan'))
layer_shape_list_dict['Input' + str(i) + ' W'].append(float('nan'))
elif len(layer_shape[str(k)]['bottoms'][i]) == 3:
layer_shape_list_dict['Input' + str(i) + ' N'].append(int(layer_shape[str(k)]['bottoms'][i][0]))
layer_shape_list_dict['Input' + str(i) + ' C'].append(int(layer_shape[str(k)]['bottoms'][i][1]))
layer_shape_list_dict['Input' + str(i) + ' H'].append(int(layer_shape[str(k)]['bottoms'][i][2]))
layer_shape_list_dict['Input' + str(i) + ' W'].append(float('nan'))
elif len(layer_shape[str(k)]['bottoms'][i]) == 4:
layer_shape_list_dict['Input' + str(i) + ' N'].append(int(layer_shape[str(k)]['bottoms'][i][0]))
layer_shape_list_dict['Input' + str(i) + ' C'].append(int(layer_shape[str(k)]['bottoms'][i][1]))
layer_shape_list_dict['Input' + str(i) + ' H'].append(int(layer_shape[str(k)]['bottoms'][i][2]))
layer_shape_list_dict['Input' + str(i) + ' W'].append(int(layer_shape[str(k)]['bottoms'][i][3]))
for i in range(len(layer_shape[str(k)]['bottoms']), max_bottoms_length):
layer_shape_list_dict['Input' + str(i) + ' N'].append(float('nan'))
layer_shape_list_dict['Input' + str(i) + ' C'].append(float('nan'))
layer_shape_list_dict['Input' + str(i) + ' H'].append(float('nan'))
layer_shape_list_dict['Input' + str(i) + ' W'].append(float('nan'))
for i in range(0, len(layer_shape[str(k)]['tops'])):
if len(layer_shape[str(k)]['tops'][i]) == 1:
layer_shape_list_dict['Output' + str(i) + ' N'].append(int(layer_shape[str(k)]['tops'][i][0]))
layer_shape_list_dict['Output' + str(i) + ' C'].append(float('nan'))
layer_shape_list_dict['Output' + str(i) + ' H'].append(float('nan'))
layer_shape_list_dict['Output' + str(i) + ' W'].append(float('nan'))
elif len(layer_shape[str(k)]['tops'][i]) == 2:
layer_shape_list_dict['Output' + str(i) + ' N'].append(int(layer_shape[str(k)]['tops'][i][0]))
layer_shape_list_dict['Output' + str(i) + ' C'].append(int(layer_shape[str(k)]['tops'][i][1]))
layer_shape_list_dict['Output' + str(i) + ' H'].append(float('nan'))
layer_shape_list_dict['Output' + str(i) + ' W'].append(float('nan'))
elif len(layer_shape[str(k)]['tops'][i]) == 3:
layer_shape_list_dict['Output' + str(i) + ' N'].append(int(layer_shape[str(k)]['tops'][i][0]))
layer_shape_list_dict['Output' + str(i) + ' C'].append(int(layer_shape[str(k)]['tops'][i][1]))
layer_shape_list_dict['Output' + str(i) + ' H'].append(int(layer_shape[str(k)]['tops'][i][2]))
layer_shape_list_dict['Output' + str(i) + ' W'].append(float('nan'))
elif len(layer_shape[str(k)]['tops'][i]) == 4:
layer_shape_list_dict['Output' + str(i) + ' N'].append(int(layer_shape[str(k)]['tops'][i][0]))
layer_shape_list_dict['Output' + str(i) + ' C'].append(int(layer_shape[str(k)]['tops'][i][1]))
layer_shape_list_dict['Output' + str(i) + ' H'].append(int(layer_shape[str(k)]['tops'][i][2]))
layer_shape_list_dict['Output' + str(i) + ' W'].append(int(layer_shape[str(k)]['tops'][i][3]))
for i in range(len(layer_shape[str(k)]['tops']), max_tops_length):
if debug:
print('max tops len:', max_tops_length, 'max bottoms len:', max_bottoms_length)
print('layer:', str(k), 'type:', layer_shape[str(k)]['type'], 'tops:', layer_shape[str(k)]['tops'])
layer_shape_list_dict['Output' + str(i) + ' N'].append(float('nan'))
layer_shape_list_dict['Output' + str(i) + ' C'].append(float('nan'))
layer_shape_list_dict['Output' + str(i) + ' H'].append(float('nan'))
layer_shape_list_dict['Output' + str(i) + ' W'].append(float('nan'))
for k, v in layer_type_record_dict.items():
layer_type_list.append(str(k))
layer_type_time_list.append(float(v))
for k, v in flops_membwd_dict.items():
flops_membwd_type_list.append(str(k))
flops_membwd_values_list.append(float(v))
finally:
layer_info_reader.close()
layer_shape_reader.close()
layer_name_time_reader.close()
layer_type_time_reader.close()
flops_membwd_reader.close()
assert len(layer_name_list) == len(layer_name_time_list) and \
len(layer_name_time_list) == len(kernel_size_list) and \
len(kernel_size_list) == len(stride_list) and \
len(stride_list) == len(pad_list) and \
len(layer_type_list) == len(layer_type_time_list) and \
len(flops_membwd_type_list) == len(flops_membwd_values_list), \
" Error! Must have same records length!"
# calculate flops and memory accessing bytes
ops_list = []
mem_bytes_list = []
for layer_name in layer_name_list:
flops, mem_bytes = calculate_mlu_ops_byte(layer_name, layer_shape)
ops_list.append(flops)
mem_bytes_list.append(mem_bytes)
gflops_list = []
intensity_list = []
total_model_ops = 0.0
total_model_mem_bytes = 0.0
for i, exe_time in enumerate(layer_name_time_list):
gflops_list.append(ops_list[i] / 1e9 / (exe_time / 1e3))
intensity_list.append(float(ops_list[i] / mem_bytes_list[i]))
total_model_ops += ops_list[i]
total_model_mem_bytes += mem_bytes_list[i]
avg_model_intensity = float(total_model_ops / total_model_mem_bytes)
total_model_time = 0
for time in layer_type_time_list:
total_model_time += time
avg_model_gflops = total_model_ops / 1e9 / (total_model_time / 1e3)
# for sheet4 columns
value_list = [total_model_ops, total_model_mem_bytes, total_model_time, avg_model_gflops, avg_model_intensity]
name_list = ['model ops', 'model bytes', 'model time(ms)', 'model GFLOPS', 'model intensity']
sheet1_od = collections.OrderedDict()
sheet1_od['layer name'] = layer_name_list
sheet1_od['layer type'] = layer_name_type_list
sheet1_od['time(ms)'] = layer_name_time_list
sheet1_od['Ops'] = ops_list
sheet1_od['Bytes'] = mem_bytes_list
sheet1_od['GFLOPS'] = gflops_list
sheet1_od['Intensity'] = intensity_list
for i in range(0, max_bottoms_length):
sheet1_od['Input' + str(i) + ' N'] = layer_shape_list_dict['Input' + str(i) + ' N']
sheet1_od['Input' + str(i) + ' C'] = layer_shape_list_dict['Input' + str(i) + ' C']
sheet1_od['Input' + str(i) + ' H'] = layer_shape_list_dict['Input' + str(i) + ' H']
sheet1_od['Input' + str(i) + ' W'] = layer_shape_list_dict['Input' + str(i) + ' W']
sheet1_od['kernel size'] = kernel_size_list
sheet1_od['stride'] = stride_list
sheet1_od['pad'] = pad_list
for i in range(0, max_tops_length):
sheet1_od['Output' + str(i) + ' N'] = layer_shape_list_dict['Output' + str(i) + ' N']
sheet1_od['Output' + str(i) + ' C'] = layer_shape_list_dict['Output' + str(i) + ' C']
sheet1_od['Output' + str(i) + ' H'] = layer_shape_list_dict['Output' + str(i) + ' H']
sheet1_od['Output' + str(i) + ' W'] = layer_shape_list_dict['Output' + str(i) + ' W']
sheet1_df = | pd.DataFrame(sheet1_od) | pandas.DataFrame |
"""
contains various implementations for recommending movies
"""
import pandas as pd
import numpy as np
from utils import movies
from utils import movies,ratings,df_mov_avg_cnt, search_title,movie_to_id,id_to_movie,get_movieId
from utils import model_nmf,model_knn
from scipy.sparse import csr_matrix # creating the matrix (filling in 0 when there is no entry)
from sklearn.decomposition import NMF # Non matrix factorization for Recommneder system
def recommend_random(query, k=5):
"""
Recommends a list of k random movie ids
"""
movies_rand =movies[~movies['movieId'].isin(query)] # drops all movies in the query
rand_list=movies_rand.sample(k)['movieId'].to_list()
rand_movie_title_list=[]
for list in rand_list:
movie_title = movies.loc[movies['movieId']==list,'title'].values[0]
rand_movie_title_list.append(movie_title)
df_rand_recommended=pd.DataFrame(rand_movie_title_list,rand_list)
return df_rand_recommended
def recommend_popular(query, k=5):
"""
Recommend a list of k movie ids that are from 40 most popular
"""
df_popular=df_mov_avg_cnt[~df_mov_avg_cnt['movieId'].isin(query)]
popular=df_popular.sort_values('popular',ascending=False)['movieId'].head(40).to_list()
rand_popular = np.random.randint(40,size =(1,5))
pop_movieId_list=[]
for rand_nu in range(len(rand_popular[0])):
pop_id = popular[rand_popular[0][rand_nu]]
pop_movieId_list.append(pop_id)
pop_movietitle_list=[]
for list_pop in pop_movieId_list:
movie_title_pop=movies.loc[movies['movieId']==list_pop,'title'].values[0]
pop_movietitle_list.append(movie_title_pop)
df_pop_recommended =pd.DataFrame(pop_movietitle_list,pop_movieId_list)
return df_pop_recommended
def recommend_nmf(query, k=5):
"""
Recommend a list of k movie ids based on a trained NMF model
"""
# 1. candiate generation
# user_query = disney_movies
# construct a user vector
user_vec=np.repeat(0,193610)
user_vec[query]=5
# 2. scoring
model = model_nmf
scores=model.inverse_transform(model.transform([user_vec])) # calculate the score with the NMF model
# 3. ranking
scores =pd.Series(scores[0])
scores[query]=0 # set zero score to movies allready seen by the user
scores=scores.sort_values(ascending=False)
# return the top-k highst rated movie ids or titles
recommendations= scores.head(k).index
moiveId_r=[]
movieId_t=[]
for recs in range(len(recommendations)):
movieId_r_Var = recommendations[recs]
movieId_t_Var= movies.set_index('movieId').loc[movieId_r_Var]['title']
moiveId_r.append(movieId_r_Var)
movieId_t.append(movieId_t_Var)
df_nmf=pd.DataFrame(movieId_t,moiveId_r)
return df_nmf
def recommend_neighbors(query, k=5):
"""
Recommend a list of k movie ids based on the most similar users
"""
# 1. candiate generation
user_vec=np.repeat(0,193610)
user_vec[query]=5 # construct a user vector
# calculates the distances to all other users in the data!
distances, userIds = model_knn.kneighbors(
X=[user_vec],
n_neighbors=10,
return_distance=True
)
# sklearn returns a list of predictions - extract the first and only value of the list
distances = distances[0]
userIds = userIds[0]
# 2. scoring
# find n neighbors
neighborhood =ratings.loc[ratings['userId'].isin(userIds)]
scores=neighborhood.groupby('movieId')['rating'].sum()
# calculate their average rating
# 3. ranking
# filter out movies allready seen by the user
# give a zero score to movies the user has allready seen
scores.loc[scores.index.isin(query)]=0
scores = scores.sort_values(ascending=False)
recommendations=scores.head(k).index
# return the top-k highst rated movie ids or titles
moiveId_r=[]
movieId_t=[]
for recs in range(len(recommendations)):
movieId_r_Var = recommendations[recs]
movieId_t_Var= movies.set_index('movieId').loc[movieId_r_Var]['title']
moiveId_r.append(movieId_r_Var)
movieId_t.append(movieId_t_Var)
df_knn= | pd.DataFrame(movieId_t,moiveId_r) | pandas.DataFrame |
import argparse
import pandas as pd
import tensorflow as tf
import dataset_ops
import numpy as np
import datetime
import cuda
import pandas_format # noqa
from pathlib import Path
from sklearn import linear_model
from sklearn import tree
from sklearn.model_selection import train_test_split
try:
import tqdm
except ImportError:
tqdm = None
session_start = datetime.datetime.now().strftime(r"%Y%m%d-%H%M%S")
parser = argparse.ArgumentParser()
parser.add_argument('-w', nargs='+', type=int)
parser.add_argument('-o', '--out', default=f'classic_learning_{session_start}.csv')
parser.add_argument('models', nargs='+')
args = parser.parse_args()
print(args)
cuda.initialize()
# dataset_manager = dataset_ops.MicroPilotTestsManager(dataset_dir=Path('./h5'), runs_filename='runs.hdf')
dataset_manager = dataset_ops.PaparazziTestManager(dataset_dir=Path('pprz_h5'), runs_filename='pprz_runs.hdf')
all_runs = dataset_manager.get_all_available_tests()
# selected_runs = all_runs.loc[(all_runs['Test Length'] > 200) & (all_runs['Test Length'] < 20000)]
# selected_runs = selected_runs.iloc[:40]
selected_runs = all_runs.sample(frac=1, axis=1, random_state=55)
inputs = ('SpeedFts', 'Pitch', 'Roll', 'Yaw', 'current_altitude', )
outputs= ('elev', 'ai', 'rdr', 'throttle', 'Flaps')
# max_length = selected_runs['Test Length'].max()
# max_length = 18000
# dataset_manager.preload_data(selected_runs, max_length=max_length, features=inputs + outputs)
# tfdataset = dataset_ops.TensorflowDataset(dataset_manager)
# dataset = tfdataset.get_dataset(selected_runs, batch_size=25, features=inputs+outputs, max_length=max_length)
dataset = dataset_manager.preload_data(selected_runs, features=inputs+outputs)
N_s = dataset_manager.count_states()
train, test = train_test_split(dataset, test_size=0.2, random_state=44)
# %%
# results_df = pd.DataFrame(columns=['Data Set', 'Name', 'regularization', 'd', 'w', 'Precision', 'Recall'])
# results_df = results_df.set_index(['Data Set', 'Name', 'w'])
# results_df = pd.read_csv('tree_results_20.csv', index_col=0).append(pd.read_csv('results_3_5_10_15.csv', index_col=0))
file_name = Path(args.out)
if file_name.exists():
results_df = pd.read_csv(file_name, index_col=0)
else:
results_df = pd.DataFrame(columns=['Data Set', 'Name', 'regularization', 'w', 'Precision', 'Recall'])
# %%
def class_precision_recall(y_true, y_pred):
# _, classes = y_pred.shape
# y_true = tf.math.argmax(y_true, axis=-1) # batch x L
# y_pred = tf.math.argmax(y_pred, axis=-1)
y_true = tf.constant(y_true)
y_pred = tf.constant(y_pred)
classes = N_s
y_pred.shape.assert_is_compatible_with(y_true.shape)
if y_true.dtype != y_pred.dtype:
y_pred = tf.cast(y_pred, y_true.dtype)
recall_scores, precision_scores = [], []
for C in range(classes):
C = tf.cast(C, 'int64')
trueC = tf.equal(y_true, C)
declaredC = tf.equal(y_pred, C)
correctlyC = tf.logical_and(declaredC, trueC)
trueC = tf.cast(tf.math.count_nonzero(trueC), 'float32')
declaredC = tf.cast(tf.math.count_nonzero(declaredC), 'float32')
correctlyC = tf.cast(tf.math.count_nonzero(correctlyC), 'float32')
if declaredC > 0:
precision_score = tf.math.divide_no_nan(correctlyC, declaredC)
precision_scores.append(precision_score)
if trueC > 0:
recall_score = tf.math.divide_no_nan(correctlyC, trueC)
recall_scores.append(recall_score)
P = tf.reduce_mean(tf.stack(precision_scores))
R = tf.reduce_mean(tf.stack(recall_scores))
return P, R
def iterate_window(dataframe, w):
last = dataframe.shape[0] - w + 1
for index in range(last):
yield dataframe[index:index + w]
def convert_to_data_points(w):
def _generator(data):
signals = data[1].to_numpy()
states = data[2].to_numpy()
signals_iter = iterate_window(signals, w)
# previous_iter = iterate_window(states, w)
next_state_iter = states[w:]
# for signals, previous, next_state in zip(signals_iter, previous_iter, next_state_iter):
for signals, next_state in zip(signals_iter, next_state_iter):
# X = np.concatenate((signals.flatten(), one_hotter[previous].flatten()))
X = signals.flatten()
# y = one_hotter[next_state]
y = next_state
yield X, y
return _generator
def create_allX_allY(dataset, w):
converter = convert_to_data_points(w)
allX, allY = [], []
for test in tqdm.tqdm(dataset):
lX, ly = [], []
for X, y in converter(test):
lX.append(X)
ly.append(y)
allX.append(np.stack(lX))
allY.append(np.stack(ly))
del lX, ly
# return allX, allY
return np.concatenate(allX), np.concatenate(allY)
def evaluate(model, *, name, w, regularization):
train_p, train_r = class_precision_recall(y_train, model.predict(X_train))
test_p, test_r = class_precision_recall(y_test, model.predict(X_test))
df = pd.DataFrame({
'Data Set': ['Train', 'Test'],
'Precision': [float(train_p), float(test_p)],
'Recall': [float(train_r), float(test_r)],
})
df['Name'] = name
df['w'] = w
df['regularization'] = regularization
# df = df.set_index(['Data Set', 'Name', 'w'])
return df
# %%
for w in args.w:
print('Window size', w)
X_train, y_train = create_allX_allY(train, w)
X_test, y_test = create_allX_allY(test, w)
# .set_index(['Data Set', 'Name', 'w'])
if 'ridge' in args.models:
print('Training Ridge')
model_ridge = linear_model.RidgeClassifierCV(alphas=np.logspace(-6, 6, 13))
model_ridge.fit(X_train, y_train)
results = evaluate(model_ridge, name='ridge', w=w, regularization=None)
if file_name.exists():
results_df = pd.read_csv(file_name, index_col=0)
results_df = results_df.append(results)
print(results)
try:
results_df.to_csv(file_name)
except:
print('Failed to save')
if 'tree1' in args.models or 'trees' in args.models:
print('Training Tree')
model_tree = tree.DecisionTreeClassifier(max_features=None)
model_tree.fit(X_train, y_train)
results = evaluate(model_tree, name='tree', regularization=None, w=w)
if file_name.exists():
results_df = | pd.read_csv(file_name, index_col=0) | pandas.read_csv |
import pytest
import pandas as pd
from model.bitcoin.BitcoinFileManager import BitcoinFileManager
def test_create_tweet_file_manager():
# Given
# When
try:
BitcoinFileManager()
# Then
except Exception:
pytest.fail("Could not create BitcoinFileManager")
def test_get_file_name():
# Given
bitcoinFileManager = BitcoinFileManager()
args = {'date': '2021-01-13'}
# When
file_name = bitcoinFileManager.get_file_name(args)
# Then
assert file_name == "data/bitcoin/2021-01-13\\bitcoin.csv" or file_name == "data/bitcoin/2021-01-13/bitcoin.csv"
def test_get_file_name_fails_on_no_date():
# Given
bitcoinFileManager = BitcoinFileManager()
args = {}
# When
try:
bitcoinFileManager.get_file_name(args)
# Then
pytest.fail("Should throw exception")
except Exception:
assert True
def test_open_file(mocker):
# When
data = | pd.DataFrame(columns=['timestamp', 'Close']) | pandas.DataFrame |
from __future__ import annotations
import numpy as np
import pandas as pd
from sklearn import datasets
from IMLearn.metrics import mean_square_error
from IMLearn.utils import split_train_test
from IMLearn.model_selection import cross_validate
from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression
from sklearn.linear_model import Lasso
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
def select_polynomial_degree(n_samples: int = 100, noise: float = 5):
"""
Simulate data from a polynomial model and use cross-validation to select the best fitting degree
Parameters
----------
n_samples: int, default=100
Number of samples to generate
noise: float, default = 5
Noise level to simulate in responses
"""
# Question 1 - Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps Gaussian noise
# and split into training- and testing portions
f = lambda x: (x + 3) * (x + 2) * (x + 1) * (x - 1) * (x - 2)
X = np.linspace(-1.2, 2, n_samples)
y = f(X) + np.random.normal(0, noise, size=n_samples)
X_train, y_train, X_test, y_test = split_train_test(pd.DataFrame(X),
| pd.Series(y) | pandas.Series |
###############################
#
# ADD DATE FEATURES
#
###############################
import numpy as np
import pandas as pd
import re
def add_date_features(df,
date_vars,
drop = True,
time = False):
'''
Adds basic date-based features based to the data frame.
--------------------
Arguments:
- df (pandas DF): dataset
- date_var (str): name of the date feature
- drop (bool): whether to drop the original date feature
- time (bool): whether to include time-based features
--------------------
Returns:
- pandas DF with new features
--------------------
Examples:
# create data frame
data = {'age': [27, np.nan, 30],
'height': [170, 168, 173],
'gender': ['female', 'male', np.nan],
'date_of_birth': [np.datetime64('1993-02-10'), np.nan, np.datetime64('1990-04-08')]}
df = pd.DataFrame(data)
# add date features
from dptools import add_date_features
df_new = add_date_features(df, date_vars = 'date_of_birth')
'''
# copy df
df_new = df.copy()
# store no. features
n_feats = df_new.shape[1]
# convert to list
if not isinstance(date_vars, list):
date_vars = [date_vars]
# feature engineering loop
for date_var in date_vars:
var = df_new[date_var]
var_dtype = var.dtype
if isinstance(var_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):
var_dtype = np.datetime64
if not np.issubdtype(var_dtype, np.datetime64):
df_new[date_var] = var = pd.to_datetime(var, infer_datetime_format = True)
targ_pre = re.sub('[Dd]ate$', '', date_var)
# list of day attributes
attributes = ['year', 'month', 'week', 'day',
'dayofweek', 'dayofyear',
'is_month_end', 'is_month_start',
'is_quarter_end', 'is_quarter_start',
'is_year_end', 'is_year_start']
# list of time attributes
if time:
attributes = attributes + ['Hour', 'Minute', 'Second']
# compute features
for att in attributes:
df_new[targ_pre + '_' + att.lower()] = getattr(var.dt, att)
df_new[targ_pre + '_elapsed'] = var.astype(np.int64) // 10 ** 9
# remove original feature
if drop:
df_new.drop(date_var, axis = 1, inplace = True)
# return results
print('Added {} date-based features.'.format(df_new.shape[1] - n_feats + int(drop) * len(date_vars)))
return df_new
###############################
#
# ADD TEXT FEATURES
#
###############################
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
import scipy.sparse
def add_text_features(df,
text_vars,
tf_idf_feats = 5,
common_words = 0,
rare_words = 0,
ngram_range = (1, 1),
drop = True):
'''
Adds basic text-based features including word count, character count and
TF-IDF based features to the data frame.
--------------------
Arguments:
- df (pandas DF): dataset
- text_vars (list): list of textual features
- tf_idf_feats (int): number of TF-IDF based features
- common_words (int): number of the most common words to remove for TF-IDF
- rare_words (int): number of the most rare words to remove for TF-IDF
- ngram_range (int, int): range of n-grams for TF-IDF based features
- drop (bool): whether to drop the original textual features
--------------------
Returns:
- pandas DF with new features
--------------------
Examples:
# import dependecies
import pandas as pd
import numpy as np
# create data frame
data = {'age': [27, np.nan, 30, 25, np.nan],
'height': [170, 168, 173, 177, 165],
'gender': ['female', 'male', np.nan, 'male', 'female'],
'income': ['high', 'medium', 'low', 'low', 'no income']}
df = pd.DataFrame(data)
# add text features
from dptools import add_text_features
df_new = add_text_features(df, text_vars = ['income', 'gender'])
'''
# copy df
df_new = df.copy()
# store no. features
n_feats = df_new.shape[1]
# convert to list
if not isinstance(text_vars, list):
text_vars = [text_vars]
# feature engineering loop
for text_var in text_vars:
# replace NA with empty string
df_new[text_var].fillna('', inplace = True)
# remove common and rare words
freq = pd.Series(' '.join(df_new[text_var]).split()).value_counts()[:common_words]
freq = pd.Series(' '.join(df_new[text_var]).split()).value_counts()[-rare_words:]
# convert to lowercase
df_new[text_var] = df_new[text_var].apply(lambda x: ' '.join(x.lower() for x in x.split()))
# remove punctuation
df_new[text_var] = df_new[text_var].str.replace('[^\w\s]','')
# word count
df_new[text_var + '_word_count'] = df_new[text_var].apply(lambda x: len(str(x).split(' ')))
df_new[text_var + '_word_count'][df_new[text_var] == ''] = 0
# character count
df_new[text_var + '_char_count'] = df_new[text_var].str.len().fillna(0).astype('int64')
# import vectorizer
tfidf = TfidfVectorizer(max_features = tf_idf_feats,
lowercase = True,
norm = 'l2',
analyzer = 'word',
stop_words = 'english',
ngram_range = ngram_range)
# compute TF-IDF
vals = tfidf.fit_transform(df_new[text_var])
vals = pd.DataFrame.sparse.from_spmatrix(vals)
vals.columns = [text_var + '_tfidf_' + str(p) for p in vals.columns]
df_new = pd.concat([df_new, vals], axis = 1)
# remove original feature
if drop:
df_new.drop(text_var, axis = 1, inplace = True)
# return results
print('Added {} text-based features.'.format(df_new.shape[1] - n_feats + int(drop) * len(text_vars)))
return df_new
###############################
#
# AGGRGEATE DATA
#
###############################
import pandas as pd
def aggregate_data(df,
group_var,
num_stats = ['mean', 'sum'],
fac_stats = ['count', 'mode'],
factors = None,
var_label = None,
sd_zeros = False):
'''
Aggregates the data by a certain categorical feature. Continuous features
are aggregated by computing summary statistcs by the grouping feature.
Categorical features are aggregated by computing the most frequent values
and number of unique value by the grouping feature.
--------------------
Arguments:
- df (pandas DF): dataset
- group_var (str): grouping feature
- num_stats (list): list of stats for aggregating numeric features
- fac_stats (list): list of stats for aggregating categorical features
- factors (list): list of categorical features names
- var_label (str): prefix for feature names after aggregation
- sd_zeros (bool): whether to replace NA with 0 for standard deviation
--------------------
Returns
- aggregated pandas DF
--------------------
Examples:
# import dependecies
import pandas as pd
import numpy as np
# create data frame
data = {'age': [27, np.nan, 30, 25, np.nan],
'height': [170, 168, 173, 177, 165],
'gender': ['female', 'male', np.nan, 'male', 'female'],
'income': ['high', 'medium', 'low', 'low', 'no income']}
df = pd.DataFrame(data)
# aggregate the data
from dptools import aggregate_data
df_new = aggregate_data(df, group_var = 'gender', num_stats = ['min', 'max'], fac_stats = 'mode')
'''
##### SEPARATE FEATURES
# display info
print('- Preparing the dataset...')
# find factors
if factors == None:
df_factors = [f for f in df.columns if df[f].dtype == 'object']
factors = [f for f in df_factors if f != group_var]
else:
df_factors = factors
df_factors.append(group_var)
# partition subsets
if type(group_var) == str:
num_df = df[[group_var] + list(set(df.columns) - set(df_factors))]
fac_df = df[df_factors]
else:
num_df = df[group_var + list(set(df.columns) - set(df_factors))]
fac_df = df[df_factors]
# display info
n_facs = fac_df.shape[1] - 1
n_nums = num_df.shape[1] - 1
print('- Extracted %.0f factors and %.0f numerics...' % (n_facs, n_nums))
##### AGGREGATION
# aggregate numerics
if n_nums > 0:
print('- Aggregating numeric features...')
num_df = num_df.groupby([group_var]).agg(num_stats)
num_df.columns = ['_'.join(col).strip() for col in num_df.columns.values]
num_df = num_df.sort_index()
# aggregate factors
if n_facs > 0:
print('- Aggregating factor features...')
if (fac_stats == ['count', 'mode']) or (fac_stats == ['mode', 'count']):
fac_df = fac_df.groupby([group_var]).agg([('count'), ('mode', lambda x: pd.Series.mode(x)[0])])
if (fac_stats == 'count') or (fac_stats == ['count']):
fac_df = fac_df.groupby([group_var]).agg([('count')])
if (fac_stats == 'mode') or (fac_stats == ['mode']):
fac_df = fac_df.groupby([group_var]).agg([('mode', lambda x: | pd.Series.mode(x) | pandas.Series.mode |
from datetime import timedelta
import pandas as pd
# Specify start and periods, the number of periods (days).
# dRan1 = pd.date_range(start='2017-01-01', periods=13, inclusive='right')
def test_something(a, b):
return {b, a}
print(test_something(1, 1))
exit(1)
for d in pd.date_range(start='2022-02-01', periods=10, inclusive='left'):
original_date = pd.to_datetime(d).date()
next_date = | pd.to_datetime(d) | pandas.to_datetime |
from __future__ import division
import numpy as np
import pandas as pd
import json
from .util import *
class Delta():
def __init__(self, df, key, baseline_run = False):
###################################################################################################
################################ basic time and key parameters ####################################
###################################################################################################
self.baseline_run = baseline_run
T = len(df)
self.key = key
self.demand_multiplier = df.demand_multiplier.values
##########################################################################################################################
################################ Gains, Old and Middle River, and San Joaquin options ####################################
##########################################################################################################################
self.OMR_sim = df.OMR_sim.values
self.netgains = df.gains_sim.values
# self.sanjoaquin = self.netgains - df.YRS_fnf.values - df.NML_fnf.values
self.sanjoaquin = df.sanjoaquin.values
self.san_joaquin_ie_amt = df.san_joaquin_ie_amt.values
#############################################################################################################
################################ extract Delta properties from json file ####################################
#############################################################################################################
for k,v in json.load(open('orca/data/json_files/Delta_properties.json')).items():
setattr(self,k,v)
############################################################################################
################################ initialize time series ####################################
############################################################################################
self.dmin = np.zeros(T)
self.min_rule = np.zeros(T)
self.gains = np.zeros(T)
self.sodd_cvp = np.zeros(T)
self.sodd_swp = np.zeros(T)
self.cvp_max = np.zeros(T)
self.swp_max = np.zeros(T)
self.TRP_pump = np.zeros(T)
self.HRO_pump = np.zeros(T)
self.inflow = np.zeros(T)
self.outflow = np.zeros(T)
self.CVP_shortage = np.zeros(T)
self.SWP_shortage = np.zeros(T)
self.SWP_shortage = np.zeros(T)
self.Delta_shortage = np.zeros(T)
# self.x2 = np.zeros(T+1)
# self.x2[1] = 82.0
# self.x2[0] = 82.0
#########################################################################################################
################################ initialize arrays for interpolation ####################################
#########################################################################################################
self.cvp_targetO = np.zeros(367)
self.swp_targetO = np.zeros(367)
self.cvp_pmaxO = np.zeros(367)
self.swp_pmaxO = np.zeros(367)
self.swp_intake_maxO = np.zeros(367)
self.cvp_intake_maxO = np.zeros(367)
self.san_joaquin_adj = np.zeros(367)
self.D1641_on_off = np.zeros(367)
self.san_joaquin_ie_used = np.zeros(367)
# self.san_joaquin_ie_amt = np.zeros(T)
self.omr_reqr_int = np.zeros(367)
############################################################################################
################################ interpolation to fill arrays ##############################
############################################################################################
# for i in range(0,T):
# self.san_joaquin_ie_amt[i] = np.interp(self.sanjoaquin[i]*tafd_cfs, self.san_joaquin_export_ratio['D1641_flow_target'],self.san_joaquin_export_ratio['D1641_export_limit']) * cfs_tafd
for i in range(0,365):
self.san_joaquin_adj[i] = np.interp(water_day(i), self.san_joaquin_add['d'], self.san_joaquin_add['mult']) * max(self.sanjoaquin[i] - 1000.0 * cfs_tafd, 0.0)
self.san_joaquin_ie_used[i] = np.interp(water_day(i), self.san_joaquin_export_ratio['d'], self.san_joaquin_export_ratio['on_off'])
self.omr_reqr_int[i] = np.interp(water_day(i), self.omr_reqr['d'], self.omr_reqr['flow']) * cfs_tafd
self.cvp_targetO[i] = np.interp(i, self.pump_max['cvp']['d'], #calculate pumping target for day of year (based on target pumping for sodd)
self.pump_max['cvp']['target']) * cfs_tafd
self.swp_targetO[i] = np.interp(i, self.pump_max['swp']['d'],
self.pump_max['swp']['target']) * cfs_tafd
self.cvp_pmaxO[i] = np.interp(i, self.pump_max['cvp']['d'],
self.pump_max['cvp']['pmax']) * cfs_tafd #calculate pumping targets (based on max allowed pumping) based on time of year
self.swp_pmaxO[i] = np.interp(i, self.pump_max['swp']['d'],
self.pump_max['swp']['pmax']) * cfs_tafd
self.swp_intake_maxO[i] = np.interp(i, self.pump_max['swp']['d'], self.pump_max['swp']['intake_limit']) * cfs_tafd
self.cvp_intake_maxO[i] = np.interp(i, self.pump_max['cvp']['d'],self.pump_max['cvp']['intake_limit']) * cfs_tafd
def find_release(self, dowy, d, t, wyt, orovilleAS, shastaAS, folsomAS):
#################################################################################################################
################################ San Joaquin river import/export ratio constraints ##############################
#################################################################################################################
san_joaquin_ie = self.san_joaquin_ie_amt[t] * self.san_joaquin_ie_used[dowy]
swp_jas_stor = (self.pump_max['swp']['pmax'][5] * cfs_tafd)/self.export_ratio[wyt][8]
cvp_jas_stor = (self.pump_max['cvp']['pmax'][5] * cfs_tafd)/self.export_ratio[wyt][8]
if dowy <= 274:
numdaysSave = 92
else:
numdaysSave = 1
if orovilleAS > numdaysSave*swp_jas_stor:
swp_max = min(max(self.swp_intake_max[d] + self.san_joaquin_adj[d], san_joaquin_ie * 0.45), self.swp_pmax[d])
else:
swp_max = 0.0
if (shastaAS + folsomAS) > numdaysSave*cvp_jas_stor:
cvp_max = min(max(self.cvp_intake_max[d], san_joaquin_ie * 0.55), self.cvp_pmax[d])
else:
cvp_max = 0.0
return cvp_max, swp_max
def calc_flow_bounds(self, t, d, m, wyt, dowy, orovilleAS, shastaAS, folsomAS):
#######################################################################################################################################################
################################ Initial flow constraints based on Delta export ration constraints and reservoir storage ##############################
#######################################################################################################################################################
gains = self.netgains[t]
self.min_rule[t] = self.min_outflow[wyt][m-1] * cfs_tafd
export_ratio = self.export_ratio[wyt][m-1]
self.cvp_max[t] = self.cvp_target[d-1]*self.demand_multiplier[t]
self.swp_max[t] = self.swp_target[d-1]*self.demand_multiplier[t]
if d == 366:
self.cvp_max[t] = self.cvp_target[d-2]*self.demand_multiplier[t]
self.swp_max[t] = self.swp_target[d-2]*self.demand_multiplier[t]
'''the sodd_* variables tell the reservoirs how much to release
for south of delta demands only
(dmin is the reservoir release needed to meet delta outflows)'''
if gains > self.min_rule[t]: # extra unstored water available for pumping. in this case dmin[t] is 0
self.sodd_cvp[t] = max((self.cvp_max[t] - 0.55*(gains - self.min_rule[t])) / export_ratio, 0) #implementing export ratio "tax"
self.sodd_swp[t] = max((self.swp_max[t] - 0.45*(gains - self.min_rule[t])) / export_ratio, 0)
else: # additional flow needed
self.dmin[t] = self.min_rule[t] - gains
'''amount of additional flow from reservoirs that does not need "export tax"
because dmin release helps to meet the export ratio requirement'''
Q = self.min_rule[t]*export_ratio/(1-export_ratio)
if self.cvp_max[t] + self.swp_max[t] < Q:
self.sodd_cvp[t] = self.cvp_max[t]
self.sodd_swp[t] = self.swp_max[t]
else:
self.sodd_cvp[t] = 0.75*Q + (self.cvp_max[t] - 0.75*Q)/export_ratio #implementing export ratio "tax"
self.sodd_swp[t] = 0.25*Q + (self.swp_max[t] - 0.25*Q)/export_ratio
#determining percentage of CVP sodd demands from both Shasta and Folsom
if folsomAS > 0.0 and shastaAS > 0.0:
self.folsomSODDPCT = folsomAS/(folsomAS + shastaAS)
elif folsomAS < 0.0:
self.folsomSODDPCT = 0.0
else:
self.folsomSODDPCT = 1.0
self.shastaSODDPCT = 1.0 - self.folsomSODDPCT
def meet_OMR_requirement(self, Tracy, Banks, t): #old and middle river requirements (hence "OMR")
#################################################################################################
################################ Old and Middle river requirements ##############################
#################################################################################################
if Tracy + Banks > self.maxTotPump:
'''maxTotPump is calculated in calc_weekly_storage, before this OMR function is called.
current simulated puming is more that the total allowed pumping based on Delta requirements
Tracy (CVP) is allocated 55% of available flow for pumping, Banks (SWP) is allocated 45%.
(assuming Delta outflow is greater than it's requirement- I still need to look into where that's determined)'''
#Tracy is pumping less that it's maximum allocated flow. Harvery should pump less flow now.
if Tracy < self.maxTotPump*0.55:
Banks = self.maxTotPump - Tracy
elif Banks < self.maxTotPump*0.45: #Banks is pumping less that it's maximum allocated flow. Tracy should pump less flow now.
Tracy = self.maxTotPump - Banks
'''in this case, both pumps would be taking their allocated percentage of flow,
but the overall flow through the pumps is still greater than the maximum allowed'''
else:
Banks = self.maxTotPump*0.45
Tracy= self.maxTotPump*0.55
return Tracy, Banks
def step_init(self, t, d, m, wyt, dowy, cvp_flows, swp_flows, orovilleAS, shastaAS, folsomAS):
##################################################################################################
################################ initial stimulation step at time t ##############################
##################################################################################################
self.gains[t] = self.netgains[t] #+ sumnodds
self.inflow[t] = max(self.gains[t] + cvp_flows + swp_flows, 0) # realinflow * cfs_tafd
self.outflow_rule = self.min_outflow[wyt][m-1] * cfs_tafd
self.min_rule[t] = max(self.outflow_rule, 0)
export_ratio = self.export_ratio[wyt][m-1]
self.cvp_max[t] = self.cvp_pmax[d-1] #max pumping allowed
self.swp_max[t] = self.swp_pmax[d-1]
omrNat = self.OMR_sim[t]* cfs_tafd
maxTotPumpInt = omrNat - self.omr_reqr_int[dowy] #- fish_trigger_adj
self.maxTotPump = max(maxTotPumpInt,0.0)
self.cvp_max[t], self.swp_max[t] = self.find_release(dowy, d, t, wyt, orovilleAS, shastaAS, folsomAS)
self.cvp_max[t], self.swp_max[t] = self.meet_OMR_requirement(self.cvp_max[t], self.swp_max[t], t)
self.required_outflow = max(self.min_rule[t], (1-export_ratio)*self.inflow[t])
self.surplus = self.gains[t] - self.required_outflow
return self.surplus
def step_pump(self, t, d, m, wyt, dowy, cvp_flows, swp_flows,surplus):
##################################################################################################
################################ second stimulation step at time t ##############################
##################################################################################################
if surplus >= 0:
#gains cover both the min_rule and the export ratio requirement.so, pump the full cvp/swp inflows
self.TRP_pump[t] = max(min(cvp_flows + 0.55 * surplus, self.cvp_max[t]),0) #Tracy pumping plant, for CVP exports
self.HRO_pump[t] = max(min(swp_flows + 0.45 * surplus, self.swp_max[t]),0) #Harvey 0. Banks pumping plant, for SWP exports
else:
'''deficit must be made up from cvp/swp flows. Assume 75/25 responsibility for these
(including meeting the export ratio requirement)'''
deficit = -surplus
cvp_pump = max(cvp_flows - 0.75 * deficit, 0)
if cvp_pump == 0:
swp_pump = max(swp_flows - (deficit - cvp_flows), 0)
else:
swp_pump = max(swp_flows - 0.25 * deficit, 0)
self.TRP_pump[t] = max(min(cvp_pump, self.cvp_max[t]),0) #overall TRP pumping
self.HRO_pump[t] = max(min(swp_pump, self.swp_max[t]),0) #overall HRO pumping
if d >= 365:
self.TRP_pump[t] = self.TRP_pump[t-1]
self.HRO_pump[t] = self.HRO_pump[t-1]
self.outflow[t] = self.inflow[t] - self.TRP_pump[t] - self.HRO_pump[t]
self.CVP_shortage[t] = max(self.cvp_max[t] - self.TRP_pump[t],0)
self.SWP_shortage[t] = max(self.swp_max[t] - self.HRO_pump[t],0)
self.Delta_shortage[t] = max(self.min_rule[t] -self.outflow[t],0)
def results_as_df(self, index):
##########################################################################################
################################ for generating output file ##############################
##########################################################################################
df = pd.DataFrame()
if self.baseline_run == False:
names = ['SODD_CVP','SODD_SWP','SWP_shortage', 'CVP_shortage']
things = [self.cvp_max,self.swp_max,self.SWP_shortage, self.CVP_shortage]
for n,t in zip(names,things):
df['%s_%s' % (self.key,n)] = | pd.Series(t, index=index) | pandas.Series |
# # # # # # # # # # # # # # # # # # # # # # # #
# #
# Module to run real time contingencies #
# By: <NAME> and <NAME> #
# 09-08-2018 #
# Version Aplha-0. 1 #
# #
# Module inputs: #
# -> File name #
# # # # # # # # # # # # # # # # # # # # # # # #
import pandapower as pp
import pandas as pd
import json
import copy
import calendar
from time import time
import datetime
from inspyred import ec
import inspyred
import math
from random import Random
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def Disconet_Asset(net,Asset_type,Asset_to_disc, Service=False):
net_lf = copy.deepcopy(net)
if Asset_type=='GEN': # Disconnect Generators
index = net_lf.sgen.loc[net_lf.sgen['name'] == Asset_to_disc].index[0]
net_lf.sgen.in_service[index] = Service
elif Asset_type=='TR': # Disconnect Transformers
index = net_lf.trafo.loc[net_lf.trafo['name'] == Asset_to_disc].index[0]
net_lf.trafo.in_service[index] = Service
elif Asset_type=='LN': # Disconnect Lines
index = net_lf.line.loc[net_lf.line['name'] == Asset_to_disc].index[0]
net_lf.line.in_service[index] = Service
elif Asset_type=='SW':
index = net_lf.switch.loc[net.switch['name'] == Asset_to_disc].index[0]
net_lf.switch.closed[index] = not Service
elif Asset_type=='LO':
index = net_lf.load.loc[net.load['name'] == Asset_to_disc].index[0]
net_lf.load.in_service[index] = Service
elif Asset_type=='BUS':
index = net_lf.bus.loc[net.bus['name'] == Asset_to_disc].index[0]
net_lf.bus.in_service[index] = Service
elif Asset_type=='ST':
index = net_lf.storage.loc[net.storage['name'] == Asset_to_disc].index[0]
net_lf.storage.in_service[index] = Service
else:
print('Asset to disconnet does not exist')
return net_lf
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def Network_Reconfiguration(net,strategy):
net_lf = copy.deepcopy(net)
for step in strategy:
l_sequence = strategy[step]
asset_type = l_sequence['Element_Type']
asset_to_disc = l_sequence['Element_Name']
net_lf = Disconet_Asset(net_lf,asset_type,asset_to_disc)
return net_lf
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def Load_Contingency_Strategies(File):
with open(File) as json_file:
data = json.load(json_file)
return data
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def Load_AM_Plan(File):
data = Load_Contingency_Strategies(File)
#with open(File) as json_file:
# data = json.load(json_file)
df = pd.DataFrame.from_dict(data, orient='index')
df['Date'] = pd.to_datetime(df['Date'])#pd.to_datetime(df['Date'])
return df
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Funtion to return the daily load growth
def Load_Growth_By_Day(L_growth):
daily_growth = pow(1+L_growth, 1/365)-1 # Daily growth rate
def f_Load_Daily_Growth(ndays): # Daily growth rate fuction
return pow(1+daily_growth,ndays)
return f_Load_Daily_Growth
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Risk assessment
def Power_Risk_assessment(net,secure=1):
assessment = {}
load = net.res_load['p_mw'].fillna(0)*secure
load_base = net.load['p_mw']*net.load.scaling
assessment['Load'] = pd.DataFrame(
{'name':net.load.name,
'ENS':load_base - load,
'ES': load})
assessment['T_ES'] = load.sum()
assessment['T_ENS'] = load_base.sum()-load.sum()
gen_name = pd.concat([net.sgen.name, net.storage.name,net.ext_grid.name], ignore_index=True)
p_gen = pd.concat([net.res_sgen.p_mw, net.res_storage.p_mw,net.res_ext_grid.p_mw], ignore_index=True)
p_gen = p_gen.fillna(0)*secure
net.res_sgen['Type'] = 'D_Gen'
net.res_storage['Type'] = 'Storage'
net.res_ext_grid['Type'] = 'External'
p_source = pd.concat([net.res_sgen.Type, net.res_storage.Type,net.res_ext_grid.Type], ignore_index=True)
assessment['Gen'] = pd.DataFrame(
{'name':gen_name,
'source': p_source,
'gen':p_gen})
assessment['purchased_E'] = secure*net.res_ext_grid['p_mw'].values[0]
# Delta of energy suplied
p_gen_base = | pd.concat([net.sgen.p_mw, net.storage.p_mw], ignore_index=True) | pandas.concat |
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import numpy as np
import pandas as pd
train = pd.read_csv('./input/train.csv').fillna(' ')
i=1
filltered_comment_list = []
for comment in train.comment_text:
filtered_words = [w for w in word_tokenize(comment) if not w.lower() in stopwords.words('english')]
sentence = ' '.join(word for word in filtered_words)
filltered_comment_list.append(sentence)
i = i + 1
print(i)
output = | pd.DataFrame(columns=['comment_text']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
import ray
from ray.ml.preprocessor import PreprocessorNotFittedException
from ray.ml.preprocessors import (
StandardScaler,
MinMaxScaler,
OrdinalEncoder,
OneHotEncoder,
LabelEncoder,
SimpleImputer,
Chain,
)
def test_standard_scaler():
"""Tests basic StandardScaler functionality."""
col_a = [-1, 0, 1, 2]
col_b = [1, 1, 5, 5]
col_c = [1, 1, 1, None]
in_df = pd.DataFrame.from_dict({"A": col_a, "B": col_b, "C": col_c})
ds = ray.data.from_pandas(in_df)
scaler = StandardScaler(["B", "C"])
# Transform with unfitted preprocessor.
with pytest.raises(PreprocessorNotFittedException):
scaler.transform(ds)
# Fit data.
scaler.fit(ds)
assert scaler.stats_ == {
"mean(B)": 3.0,
"mean(C)": 1.0,
"std(B)": 2.0,
"std(C)": 0.0,
}
# Transform data.
transformed = scaler.transform(ds)
out_df = transformed.to_pandas()
processed_col_a = col_a
processed_col_b = [-1.0, -1.0, 1.0, 1.0]
processed_col_c = [0.0, 0.0, 0.0, None]
expected_df = pd.DataFrame.from_dict(
{"A": processed_col_a, "B": processed_col_b, "C": processed_col_c}
)
assert out_df.equals(expected_df)
# Transform batch.
pred_col_a = [1, 2, 3]
pred_col_b = [3, 5, 7]
pred_col_c = [0, 1, 2]
pred_in_df = pd.DataFrame.from_dict(
{"A": pred_col_a, "B": pred_col_b, "C": pred_col_c}
)
pred_out_df = scaler.transform_batch(pred_in_df)
pred_processed_col_a = pred_col_a
pred_processed_col_b = [0.0, 1.0, 2.0]
pred_processed_col_c = [-1.0, 0.0, 1.0]
pred_expected_df = pd.DataFrame.from_dict(
{
"A": pred_processed_col_a,
"B": pred_processed_col_b,
"C": pred_processed_col_c,
}
)
assert pred_out_df.equals(pred_expected_df)
def test_min_max_scaler():
"""Tests basic MinMaxScaler functionality."""
col_a = [-1, 0, 1]
col_b = [1, 3, 5]
col_c = [1, 1, None]
in_df = | pd.DataFrame.from_dict({"A": col_a, "B": col_b, "C": col_c}) | pandas.DataFrame.from_dict |
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
from matplotlib.pylab import rcParams
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.stattools import acf, pacf
from statsmodels.tsa.arima_model import ARIMA
def test_stationarity(timeseries,title,figure_nb):
# 决定起伏统计
rolmean = timeseries.rolling(window=24).mean() # 对size个数据进行移动平均
#rol_weighted_mean = pd.ewma(timeseries, span=12) # 对size个数据进行加权移动平均
rolstd = timeseries.rolling(window=24).std() # 偏离原始值多少
# 画出起伏统计
plt.figure(figure_nb)
orig = plt.plot(timeseries, color='blue', label='Original')
mean = plt.plot(rolmean, color='red', label='Rolling Mean')
#weighted_mean = plt.plot(rol_weighted_mean, color='green', label='weighted Mean')
std = plt.plot(rolstd, color='black', label='Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation')
plt.xticks(rotation=20)
plt.savefig('./Rolling mean std'+title+'.jpg')
# 进行df测试
print ('Result of Dickry-Fuller test')
dftest = adfuller(timeseries, autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic', 'p-value', '#Lags Used', 'Number of observations Used'])
for key, value in dftest[4].items():
dfoutput['Critical value(%s)' % key] = value
dfoutput.to_csv('result.csv',mode='a+')
print (dfoutput)
# 分解decomposing
def decomp(ts):
decomposition = seasonal_decompose(ts)
trend = decomposition.trend # 趋势
seasonal = decomposition.seasonal # 季节性
residual = decomposition.resid # 剩余的
plt.figure(4)
plt.subplot(411)
plt.title('Decomposition')
plt.plot(ts,label='Original')
plt.legend(loc=1); plt.xticks(rotation=20)
plt.subplot(412)
plt.plot(trend,label='Trend')
plt.legend(loc=1); plt.xticks(rotation=20)
plt.subplot(413)
plt.plot(seasonal,label='Seasonarity')
plt.legend(loc=1); plt.xticks(rotation=20)
plt.subplot(414)
plt.plot(residual,label='Residual')
plt.legend(loc=1); plt.xticks(rotation=20)
plt.tight_layout()
plt.savefig('decompo.jpg')
def acf_pacf(ts):
# 确定参数
lag_acf = acf(ts, nlags=20)
lag_pacf = pacf(ts, nlags=20)
# q的获取:ACF图中曲线第一次穿过上置信区间.这里q取2
plt.figure(5)
plt.subplot(121)
plt.plot(lag_acf)
plt.axhline(y=0, linestyle='--', color='gray')
plt.axhline(y=-1.96 / np.sqrt(len(ts)), linestyle='--', color='gray') # lowwer置信区间
plt.axhline(y=1.96 / np.sqrt(len(ts)), linestyle='--', color='gray') # upper置信区间
plt.title('Autocorrelation Function')
# p的获取:PACF图中曲线第一次穿过上置信区间.这里p取2
plt.subplot(122)
plt.plot(lag_pacf)
plt.axhline(y=0, linestyle='--', color='gray')
plt.axhline(y=-1.96 / np.sqrt(len(ts)), linestyle='--', color='gray')
plt.axhline(y=1.96 / np.sqrt(len(ts)), linestyle='--', color='gray')
plt.title('Partial Autocorrelation Function')
plt.tight_layout()
plt.savefig('ACF & PACF')
def arma_models(ts):
model = ARIMA(ts_log, order=(2, 1, 0))
result_AR = model.fit(disp=-1)
#plt.figure(6)
#plt.plot(ts)
#plt.plot(result_AR.fittedvalues, color='red')
#plt.title('AR model RSS:%.4f' % sum(result_AR.fittedvalues - ts) ** 2)
model = ARIMA(ts_log, order=(0, 1, 5))
result_MA = model.fit(disp=-1)
#plt.figure(7)
#plt.plot(ts)
#plt.plot(result_MA.fittedvalues, color='red')
#plt.title('MA model RSS:%.4f' % sum(result_MA.fittedvalues - ts) ** 2)
#model = ARIMA(ts_log, order=(2, 1, 3))
#result_ARIMA = model.fit()
#plt.figure(8)
#plt.plot(ts)
#plt.plot(result_ARIMA.fittedvalues, color='red')
#plt.title('ARIMA RSS:%.4f' % sum(result_ARIMA.fittedvalues - ts) ** 2)
return result_AR, result_MA
def predict_insample(model_result,figure_nb):
predictions_diff = pd.Series(model_result.fittedvalues, copy=True)
# print(predictions_ARIMA_diff.head())#发现数据是没有第一行的,因为有1的延迟
predictions_diff_cumsum = predictions_diff.cumsum()
# print (predictions_ARIMA_diff_cumsum.head())
predictions_log = pd.Series(ts_log.ix[0], index=ts_log.index)
predictions_log = predictions_log.add(predictions_diff_cumsum, fill_value=0)
# print predictions_ARIMA_log.head()
predictions = np.exp(predictions_log)
plt.figure(figure_nb)
plt.plot(ts,label='origin')
plt.plot(predictions,label='prediction')
plt.xticks(rotation=20)
plt.legend(loc=0)
plt.title('predictions_ARIMA RMSE: %.4f' % np.sqrt(sum((predictions - ts) ** 2) / len(ts)))
def predict_future(result_model,start_val):
predict_diff = result_model.predict('2017-9-16 00:00:00','2017-9-23 23:00:00')
predict_diff_cumsum =predict_diff.cumsum()
predict_log=pd.Series(start_val,index=predict_diff.index)
predict_log=predict_log.add(predict_diff_cumsum,fill_value=0)
predict = np.exp(predict_log)
plt.figure(11)
plt.plot(ts,color='blue')
plt.plot(data0.loc[ | pd.Timestamp(2017,9,16) | pandas.Timestamp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.