prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""Contains classes and functions required for data processing.
"""
# Loading relevant modules
import xarray as xr
import numpy as np
import glob as glob
import datetime
import itertools
import pandas as pd
import matplotlib.pyplot as plt
import scipy
import scipy.signal
# For printing headings
from modules.misc import print_heading
class dataprocessor(object):
"""Dataprocessor contains the data for processing and coordinates it's standardisation. It contains seaice data from NSIDC, ERA5 data of a variety of variables and index datasets.
each of these need the following functions to work in this class. (Required if more data is added to the system at a later date)
load_data() - to load data in.
temporal_decomposition() - to split into raw, seasonal cycle and anomalous data.
save_data() - to save data to folder.
Attributes
----------
index_data : TYPE
Description
indicies : list
Which indicies to process.
load_ERA5 : bool
Should data from the ERA5 dataset be processed.
load_indicies : bool
Should data from the index datasets be processed.
load_seaice : bool
Are we processing seaice data.
processeddatafolder : str
File path for output processed data.
rawdatafolder : str
File path for source data.
seaice_data : object
Object containing seaice data.
variables : list
Which Era5 variables to load.
"""
def __init__(self, rawdatafolder = 'data/', processeddatafolder = 'processed_data/'):
"""Generates a dataprocessor object.
Parameters
----------
rawdatafolder : str, optional
Path to raw data.
processeddatafolder : str, optional
Path to output data.
"""
heading = "Generating a data processor"
print_heading(heading)
# Saving datafolder paths to object
self.rawdatafolder = rawdatafolder
self.processeddatafolder = processeddatafolder
def load_data(self, load_seaice = False, load_indicies = False, load_ERA5 = False, indicies = ['SAM'], variables = ['t2m'], minyear = 1979, maxyear = 2020):
"""Adds raw data to the processor object.
Parameters
----------
load_seaice : bool, optional
Decides if we should load seaice data.
load_indicies : bool, optional
Decides if we should load index data.
load_ERA5 : bool, optional
Description
indicies : list, optional
Which indicies to load as index data.
variables : list, optional
which era5 variables to load.
Deleted Parameters
------------------
n : int, optional
Spatial resolution parameter.
"""
# Setting which datasets to load for processing
self.load_seaice = load_seaice
self.load_indicies = load_indicies
self.load_ERA5 = load_ERA5
# For datasets with multiple variables, which should be loaded.
self.indicies = indicies
self.variables = variables
if self.load_seaice:
heading = "Loading seaice data from NSIDC"
print_heading(heading)
self.seaice_data = seaice_data(rawdatafolder = self.rawdatafolder,
processeddatafolder = self.processeddatafolder)
self.seaice_data.load_data()
self.seaice_data.data = self.seaice_data.data.where(self.seaice_data.data > 0.15*250, other = 0.0)
self.seaice_data.data = self.seaice_data.data.sel(time=slice(f"{minyear}-01-01", f"{maxyear}-12-31"))
if self.load_indicies:
heading = f"Loading index data"
print_heading(heading)
self.index_data = index_data(rawdatafolder = self.rawdatafolder,
processeddatafolder = self.processeddatafolder,
indicies = self.indicies)
self.index_data.load_data()
self.index_data.data = {index : self.index_data.data[index].sel(time=slice(f"{minyear}-01-01", f"{maxyear}-12-31")) for index in self.index_data.data.keys()}
if self.load_ERA5:
heading = f"Loading ECMWF ERA5 data"
print_heading(heading)
self.era5_data = era5_data(rawdatafolder = self.rawdatafolder,
processeddatafolder = self.processeddatafolder)
self.era5_data.load_data()
def decompose_and_save(self, resolutions = [1,5,10,20], temporal_resolution = ['monthly', 'seasonal', 'annual'], temporal_decomposition = ['raw', 'anomalous'], detrend = ['raw', 'detrended']):
"""Summary
Parameters
----------
resolutions : list, optional
Description
temporal_resolution : list, optional
Description
temporal_decomposition : list, optional
Description
detrend : list, optional
Description
Deleted Parameters
------------------
temporal_decomp : list, optional
Description
"""
if self.load_seaice:
self.seaice_data.decompose_and_save(resolutions = resolutions, temporal_resolution = temporal_resolution, temporal_decomposition = temporal_decomposition, detrend = detrend)
if self.load_indicies:
self.index_data.decompose_and_save(temporal_resolution = temporal_resolution, temporal_decomposition = temporal_decomposition, detrend = detrend)
if self.load_ERA5:
self.era5_data.decompose_and_save(resolutions = resolutions, temporal_resolution = temporal_resolution, temporal_decomposition = temporal_decomposition, detrend = detrend)
class seaice_data:
"""Class for seaice data.
Attributes
----------
data : xarray DataArray
The data for seaice.
files : list
list of seaice raw data files.
output_folder : str
File path for output data folder.
source_folder : str
File path for source data folder.
Deleted Attributes
------------------
n : int
spatial resolution parameter.
"""
def __init__(self, rawdatafolder = 'data/', processeddatafolder = 'processeddata/', n = 5):
"""Loads the raw data.
Parameters
----------
rawdatafolder : str, optional
File path for raw data.
processeddatafolder : str, optional
File path for processed data.
n : int, optional
Spatial resolution parameter.
"""
self.source_folder = rawdatafolder + 'SIC-monthly/'
self.output_folder = processeddatafolder + 'SIC/'
self.files = glob.glob(self.source_folder+'*.bin')
def load_data(self):
"""Iterates over seaice files and loads as an object.
"""
data = []
dates = []
errorlist = []
sic_files = self.files
n = 1
for file in sic_files:
date = file.split('_')[-4]
try:
data += [self.readfile(file)[::n,::n]]
except ValueError:
print(file)
data += [data[-1]]
errorlist += [(date,file)]
# try:
# date = datetime.datetime.strptime(date, '%Y%m%d')
# except:
date = datetime.datetime.strptime(date, '%Y%m')
dates += [date]
for date, file in errorlist:
i = int(np.where(np.array(files) == file)[0])
data[i] = (data[i-1]+data[i+1])/2
data = np.array(data, dtype = float)
x = 10*np.arange(-395000,395000,2500)[::n]
y = 10*np.arange(435000,-395000,-2500)[::n]
x,y = np.meshgrid(x,y)
sie = data[0]
x_coastlines = x.flatten()[sie.flatten()==253]
y_coastlines = y.flatten()[sie.flatten()==253]
seaice = xr.DataArray(data,
coords={'time': dates,
'x': 10*np.arange(-395000, 395000, 2500)[::n],
'y': 10*np.arange( 435000,-395000,-2500)[::n]},
dims=['time', 'y', 'x'])
seaice.rename('seaice_concentration')
self.data = seaice
self.data = self.data.sortby('time')
def decompose_and_save(self, resolutions = [1,5,10,20], temporal_resolution = ['monthly', 'seasonal', 'annual'], temporal_decomposition = ['raw', 'anomalous'], detrend = ['raw', 'detrended']):
"""Break the data into different temporal splits.
Parameters
----------
resolutions : list, optional
Description
temporal_resolution : list, optional
Description
temporal_decomposition : list, optional
Description
detrend : list, optional
Description
"""
dataset = xr.Dataset({'source':self.data.copy()})
dataset.to_netcdf(self.output_folder+'source.nc')
heading = 'Splitting the seaice data up'
print_heading(heading)
for n, temp_res, temp_decomp, dt in itertools.product(resolutions, temporal_resolution, temporal_decomposition, detrend):
print(n, temp_res, temp_decomp, dt)
# Spatial resolution fix.
new_data = dataset.source.loc[:,::n,::n].copy()
# Temporal interpolation for missing data.
new_data = new_data.resample(time = '1MS').fillna(np.nan)
new_data = new_data.sortby(new_data.time)
new_data = new_data.groupby('time.month').apply(lambda group: group.sortby(group.time).interp(method='linear'))
if temp_res == 'seasonal':
new_data = new_data[:-1]
# If anomalous remove seasonal cycle
if temp_decomp == 'anomalous':
climatology = new_data.groupby("time.month").mean("time")
new_data = new_data.groupby("time.month") - climatology
# temporal averaging
if temp_res == 'seasonal':
new_data = new_data.resample(time="QS-DEC").mean()
elif temp_res == 'annual':
new_data = new_data.resample(time="YS").mean()
# plt.plot(new_data.mean(dim = ('x','y')))
# plt.show()
# dataset = xr.Dataset({'source':self.data.copy()})
# dataset[f'{temp_decomp}_{temp_res}_{n}'] = new_data
# Detrend
if 'detrended' == dt:
new_data = new_data.sortby(new_data.time)
new_data = detrend_data(new_data)
new_data.name = f'{temp_decomp}_{temp_res}_{n}_{dt}'
new_data.to_netcdf(self.output_folder + new_data.name +'.nc')
# self.data = dataset
print_heading('DONE')
def readfile(self, file):
"""Reads a binary data file and returns the numpy data array.
Parameters
----------
file : str
File path.
Returns
-------
data = (numpy array) data contained in the file.
"""
with open(file, "rb") as binary_file:
# Seek a specific position in the file and read N bytes
binary_file.seek(300, 0) # Go to beginning of the file
data = binary_file.read() # data array
data = np.array(list(data)).reshape(332, 316)
return data
class index_data:
"""Class for index data.
Attributes
----------
data : dict
Description
indicies : list
Which indicies to load.
output_folder : str
Path to output folder.
source_folder : str
Path to source folder.
"""
def __init__(self, rawdatafolder = 'data/', processeddatafolder = 'processeddata/', indicies = ['SAM']):
"""Loads the raw data.
Parameters
----------
rawdatafolder : str, optional
File path for raw data.
processeddatafolder : str, optional
File path for processed data.
indicies : list, optional
which indicies to load.
"""
self.source_folder = rawdatafolder + 'indicies/'
self.output_folder = processeddatafolder + 'INDICIES/'
self.indicies = indicies
def load_data(self):
"""Summary
"""
self.data = {}
if 'DMI' in self.indicies:
dmi = xr.open_dataset('Data/Indicies/dmi.nc')
self.data['DMI'] = dmi.DMI
if 'SAM' in self.indicies:
sam = np.genfromtxt('Data/Indicies/newsam.1957.2007.txt', skip_header =1, skip_footer = 1)[:,1:]
index = range(1957, 2020)
columns = range(1,13)
sam = pd.DataFrame(data = sam, columns = columns, index = index)
sam = sam.stack().reset_index()
sam.columns = ['year', 'month', 'SAM']
sam['time'] = pd.to_datetime(sam.year*100+sam.month,format='%Y%m')
sam = sam.set_index('time').SAM
sam = xr.DataArray(sam)
self.data['SAM'] = sam
if 'IPO' in self.indicies:
ipo = np.genfromtxt('Data/Indicies/tpi.timeseries.ersstv5.data', skip_header = 1, skip_footer = 11)[:,1:]
index = range(1854, 2021)
columns = range(1,13)
ipo = pd.DataFrame(data = ipo, columns = columns, index = index)
ipo = ipo.stack().reset_index()
ipo.columns = ['year', 'month', 'IPO']
ipo['time'] =
|
pd.to_datetime(ipo.year*100+ipo.month,format='%Y%m')
|
pandas.to_datetime
|
from functools import partial
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
concat,
isna,
notna,
)
import pandas._testing as tm
import pandas.tseries.offsets as offsets
@td.skip_if_no_scipy
@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
def test_series(series, sp_func, roll_func):
import scipy.stats
compare_func = partial(getattr(scipy.stats, sp_func), bias=False)
result = getattr(series.rolling(50), roll_func)()
assert isinstance(result, Series)
tm.assert_almost_equal(result.iloc[-1], compare_func(series[-50:]))
@td.skip_if_no_scipy
@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
def test_frame(raw, frame, sp_func, roll_func):
import scipy.stats
compare_func = partial(getattr(scipy.stats, sp_func), bias=False)
result = getattr(frame.rolling(50), roll_func)()
assert isinstance(result, DataFrame)
tm.assert_series_equal(
result.iloc[-1, :],
frame.iloc[-50:, :].apply(compare_func, axis=0, raw=raw),
check_names=False,
)
@td.skip_if_no_scipy
@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
def test_time_rule_series(series, sp_func, roll_func):
import scipy.stats
compare_func = partial(getattr(scipy.stats, sp_func), bias=False)
win = 25
ser = series[::2].resample("B").mean()
series_result = getattr(ser.rolling(window=win, min_periods=10), roll_func)()
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = series[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1], compare_func(trunc_series))
@td.skip_if_no_scipy
@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
def test_time_rule_frame(raw, frame, sp_func, roll_func):
import scipy.stats
compare_func = partial(getattr(scipy.stats, sp_func), bias=False)
win = 25
frm = frame[::2].resample("B").mean()
frame_result = getattr(frm.rolling(window=win, min_periods=10), roll_func)()
last_date = frame_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_frame = frame[::2].truncate(prev_date, last_date)
tm.assert_series_equal(
frame_result.xs(last_date),
trunc_frame.apply(compare_func, raw=raw),
check_names=False,
)
@td.skip_if_no_scipy
@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
def test_nans(sp_func, roll_func):
import scipy.stats
compare_func = partial(getattr(scipy.stats, sp_func), bias=False)
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = getattr(obj.rolling(50, min_periods=30), roll_func)()
tm.assert_almost_equal(result.iloc[-1], compare_func(obj[10:-10]))
# min_periods is working correctly
result = getattr(obj.rolling(20, min_periods=15), roll_func)()
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not isna(result.iloc[-6])
assert
|
isna(result.iloc[-5])
|
pandas.isna
|
from typing import Optional
import pandas as pd
from dero.ml.typing import ModelDict, AllModelResultsDict, DfDict
def model_dict_to_df(model_results: ModelDict, model_name: Optional[str] = None) -> pd.DataFrame:
df = pd.DataFrame(model_results).T
df.drop('score', inplace=True)
df['score'] = model_results['score']
if model_name is not None:
df['model'] = model_name
first_cols = ['model', 'score']
else:
first_cols = ['score']
other_cols = [col for col in df.columns if col not in first_cols]
return df[first_cols + other_cols]
def all_model_results_dict_to_df(results: AllModelResultsDict) -> pd.DataFrame:
df = pd.DataFrame()
for model_type, instance_list in results.items():
for instance in instance_list:
model_df = model_dict_to_df(instance, model_name=model_type)
df = df.append(model_df)
first_cols = ['model', 'score']
other_cols = [col for col in df.columns if col not in first_cols]
return df[first_cols + other_cols].sort_values('score', ascending=False)
def all_model_results_dict_to_model_df_dict(results: AllModelResultsDict) -> DfDict:
out_dict = {}
for model_type, instance_list in results.items():
model_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
from datasets import load_dataset
import streamlit as st
import pandas as pd
from googletrans import Translator
import session_state
import time
from fuzzywuzzy import fuzz,process
# Security
#passlib,hashlib,bcrypt,scrypt
import hashlib
# DB Management
import sqlite3
import os
import psycopg2
# import torch
# from transformers import PegasusForConditionalGeneration, PegasusTokenizer
state = session_state.get(question_number=0)
translator = Translator()
# model_name = 'tuner007/pegasus_paraphrase'
# torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
# tokenizer = PegasusTokenizer.from_pretrained(model_name)
# model = PegasusForConditionalGeneration.from_pretrained(model_name).to(torch_device)
# def get_response(input_text,num_return_sequences,num_beams):
# batch = tokenizer([input_text],truncation=True,padding='longest',max_length=60, return_tensors="pt").to(torch_device)
# translated = model.generate(**batch,max_length=60,num_beams=num_beams, num_return_sequences=num_return_sequences, temperature=1.5)
# tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True)
# return tgt_text
@st.cache(suppress_st_warning=True)
def get_qa_pair_low(file, rand):
df = pd.read_csv(file, sep="\t", lineterminator='\n')
a = df.sample(1).reset_index()
st.text(df)
return {
"text": a["text"][0],
"question": a["question"][0],
"answer": a["answer\r"][0]
}
@st.cache(suppress_st_warning=True)
def get_qa_pair_mid(file, rand):
df = pd.read_csv(file,sep="\t", lineterminator='\n')
a = df.sample(1).reset_index()
return {
"text": a["text"][0],
"question": a["question"][0],
"answer": a["answer\r"][0]
}
@st.cache(suppress_st_warning=True)
def get_qa_pair_high(file, rand):
df =
|
pd.read_csv(file,sep="\t", lineterminator='\n')
|
pandas.read_csv
|
'''
This is a follow up of https://letianzj.github.io/portfolio-management-one.html
It backtests four portfolios: GMV, tangent, maximum diversification and risk parity
and compare them with equally-weighted portfolio
'''
import os
import numpy as np
import pandas as pd
import pytz
from datetime import datetime, timezone
import quanttrader as qt
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import empyrical as ep
import pyfolio as pf
# set browser full width
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
# ------------------ help functions -------------------------------- #
def minimum_vol_obj(wo, cov):
w = wo.reshape(-1, 1)
sig_p = np.sqrt(np.matmul(w.T, np.matmul(cov, w)))[0, 0] # portfolio sigma
return sig_p
def maximum_sharpe_negative_obj(wo, mu_cov):
w = wo.reshape(-1, 1)
mu = mu_cov[0].reshape(-1, 1)
cov = mu_cov[1]
obj = np.matmul(w.T, mu)[0, 0]
sig_p = np.sqrt(np.matmul(w.T, np.matmul(cov, w)))[0, 0] # portfolio sigma
obj = -1 * obj/sig_p
return obj
def maximum_diversification_negative_obj(wo, cov):
w = wo.reshape(-1, 1)
w_vol = np.matmul(w.T, np.sqrt(np.diag(cov).reshape(-1, 1)))[0, 0]
port_vol = np.sqrt(np.matmul(w.T, np.matmul(cov, w)))[0, 0]
ratio = w_vol / port_vol
return -ratio
# this is also used to verify rc from optimal w
def calc_risk_contribution(wo, cov):
w = wo.reshape(-1, 1)
sigma = np.sqrt(np.matmul(w.T, np.matmul(cov, w)))[0, 0]
mrc = np.matmul(cov, w)
rc = (w * mrc) / sigma # element-wise multiplication
return rc
def risk_budget_obj(wo, cov_wb):
w = wo.reshape(-1, 1)
cov = cov_wb[0]
wb = cov_wb[1].reshape(-1, 1) # target/budget in percent of portfolio risk
sig_p = np.sqrt(np.matmul(w.T, np.matmul(cov, w)))[0, 0] # portfolio sigma
risk_target = sig_p * wb
asset_rc = calc_risk_contribution(w, cov)
f = np.sum(np.square(asset_rc - risk_target.T)) # sum of squared error
return f
class PortfolioOptimization(qt.StrategyBase):
def __init__(self, nlookback=200, model='gmv'):
super(PortfolioOptimization, self).__init__()
self.nlookback = nlookback,
self.model = model
self.current_time = None
def on_tick(self, tick_event):
self.current_time = tick_event.timestamp
# print('Processing {}'.format(self.current_time))
# wait for enough bars
for symbol in self.symbols:
df_hist = self._data_board.get_hist_price(symbol, self.current_time)
if df_hist.shape[0] < self.nlookback:
return
# wait for month end
time_index = self._data_board.get_hist_time_index()
time_loc = time_index.get_loc(self.current_time)
if (time_loc != len(time_index)-1) & (time_index[time_loc].month == time_index[time_loc+1].month):
return
npv = self._position_manager.current_total_capital
n_stocks = len(self.symbols)
TOL = 1e-12
prices = None
for symbol in self.symbols:
price = self._data_board.get_hist_price(symbol, self.current_time)['Close'].iloc[-self.nlookback:]
price = np.array(price)
if prices is None:
prices = price
else:
prices = np.c_[prices, price]
rets = prices[1:,:]/prices[0:-1, :]-1.0
mu = np.mean(rets, axis=0)
cov = np.cov(rets.T)
w = np.ones(n_stocks) / n_stocks # default
try:
if self.model == 'gmv':
w0 = np.ones(n_stocks) / n_stocks
cons = ({'type': 'eq', 'fun': lambda w: np.sum(w) - 1.0}, {'type': 'ineq', 'fun': lambda w: w})
res = minimize(minimum_vol_obj, w0, args=cov, method='SLSQP', constraints=cons, tol=TOL, options={'disp': True})
if not res.success:
print(f'{self.model} Optimization failed')
w = res.x
elif self.model == 'sharpe':
w0 = np.ones(n_stocks) / n_stocks
cons = ({'type': 'eq', 'fun': lambda w: np.sum(w) - 1.0}, {'type': 'ineq', 'fun': lambda w: w})
res = minimize(maximum_sharpe_negative_obj, w0, args=[mu, cov], method='SLSQP', constraints=cons, tol=TOL, options={'disp': True})
w = res.x
elif self.model == 'diversified':
w0 = np.ones(n_stocks) / n_stocks
cons = ({'type': 'eq', 'fun': lambda x: np.sum(x) - 1.0}) # weights sum to one
bnds = tuple([(0, 1)] * n_stocks)
res = minimize(maximum_diversification_negative_obj, w0, bounds=bnds, args=cov, method='SLSQP', constraints=cons, tol=TOL, options={'disp': True})
w = res.x
elif self.model == 'risk_parity':
w0 = np.ones(n_stocks) / n_stocks
w_b = np.ones(n_stocks) / n_stocks # risk budget/target, percent of total portfolio risk (in this case equal risk)
# bnds = ((0,1),(0,1),(0,1),(0,1)) # alternative, use bounds for weights, one for each stock
cons = ({'type': 'eq', 'fun': lambda x: np.sum(x) - 1.0}, {'type': 'ineq', 'fun': lambda x: x})
res = minimize(risk_budget_obj, w0, args=[cov, w_b], method='SLSQP', constraints=cons, tol=TOL, options={'disp': True})
w = res.x
except Exception as e:
print(f'{self.model} Optimization failed; {str(e)}')
i = 0
for sym in self.symbols:
current_size = self._position_manager.get_position_size(sym)
current_price = self._data_board.get_hist_price(sym, self.current_time)['Close'].iloc[-1]
target_size = (int)(npv * w[i] / current_price)
self.adjust_position(sym, size_from=current_size, size_to=target_size, timestamp=self.current_time)
print('REBALANCE ORDER SENT, %s, Price: %.2f, Percentage: %.2f, Target Size: %.2f' %
(sym,
current_price,
w[i],
target_size))
i += 1
if __name__ == '__main__':
etfs = ['SPY', 'EFA', 'TIP', 'GSG', 'VNQ']
models = ['gmv', 'sharpe', 'diversified', 'risk_parity']
benchmark = etfs
init_capital = 100_000.0
test_start_date = datetime(2010,1,1, 8, 30, 0, 0, pytz.timezone('America/New_York'))
test_end_date = datetime(2019,12,31, 6, 0, 0, 0, pytz.timezone('America/New_York'))
dict_results = dict()
for model in models:
dict_results[model] = dict()
# SPY: S&P 500
# EFA: MSCI EAFE
# TIP: UST
# GSG: GSCI
# VNQ: REITs
strategy = PortfolioOptimization()
strategy.set_capital(init_capital)
strategy.set_symbols(etfs)
strategy.set_params({'nlookback': 200, 'model': model})
backtest_engine = qt.BacktestEngine(test_start_date, test_end_date)
backtest_engine.set_capital(init_capital) # capital or portfolio >= capital for one strategy
for symbol in etfs:
data = qt.util.read_ohlcv_csv(os.path.join('../data/', f'{symbol}.csv'))
backtest_engine.add_data(symbol, data)
backtest_engine.set_strategy(strategy)
ds_equity, df_positions, df_trades = backtest_engine.run()
# save to excel
qt.util.save_one_run_results('./output', ds_equity, df_positions, df_trades, batch_tag=model)
ds_ret = ds_equity.pct_change().dropna()
ds_ret.name = model
dict_results[model]['equity'] = ds_equity
dict_results[model]['return'] = ds_ret
dict_results[model]['positions'] = df_positions
dict_results[model]['transactions'] = df_trades
# ------------------------- Evaluation and Plotting -------------------------------------- #
bm = pd.DataFrame()
for s in etfs:
df_temp = qt.util.read_ohlcv_csv(os.path.join('../data/', f'{s}.csv'))
df_temp = df_temp['Close']
df_temp.name = s
bm =
|
pd.concat([bm, df_temp], axis=1)
|
pandas.concat
|
import numpy as np
import pytest
import pandas as pd
from pandas import Series
class TestSeriesConcat:
@pytest.mark.parametrize(
"dtype", ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"]
)
def test_concat_empty_series_dtypes_match_roundtrips(self, dtype):
dtype = np.dtype(dtype)
result = pd.concat([Series(dtype=dtype)])
assert result.dtype == dtype
result = pd.concat([Series(dtype=dtype), Series(dtype=dtype)])
assert result.dtype == dtype
def test_concat_empty_series_dtypes_roundtrips(self):
# round-tripping with self & like self
dtypes = map(np.dtype, ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"])
def int_result_type(dtype, dtype2):
typs = {dtype.kind, dtype2.kind}
if not len(typs - {"i", "u", "b"}) and (
dtype.kind == "i" or dtype2.kind == "i"
):
return "i"
elif not len(typs - {"u", "b"}) and (
dtype.kind == "u" or dtype2.kind == "u"
):
return "u"
return None
def float_result_type(dtype, dtype2):
typs = {dtype.kind, dtype2.kind}
if not len(typs - {"f", "i", "u"}) and (
dtype.kind == "f" or dtype2.kind == "f"
):
return "f"
return None
def get_result_type(dtype, dtype2):
result = float_result_type(dtype, dtype2)
if result is not None:
return result
result = int_result_type(dtype, dtype2)
if result is not None:
return result
return "O"
for dtype in dtypes:
for dtype2 in dtypes:
if dtype == dtype2:
continue
expected = get_result_type(dtype, dtype2)
result = pd.concat([Series(dtype=dtype), Series(dtype=dtype2)]).dtype
assert result.kind == expected
@pytest.mark.parametrize(
"left,right,expected",
[
# booleans
(np.bool_, np.int32, np.int32),
(np.bool_, np.float32, np.object_),
# datetime-like
("m8[ns]", np.bool, np.object_),
("m8[ns]", np.int64, np.object_),
("M8[ns]", np.bool, np.object_),
("M8[ns]", np.int64, np.object_),
# categorical
("category", "category", "category"),
("category", "object", "object"),
],
)
def test_concat_empty_series_dtypes(self, left, right, expected):
result = pd.concat([Series(dtype=left), Series(dtype=right)])
assert result.dtype == expected
def test_concat_empty_series_dtypes_triple(self):
assert (
pd.concat(
[Series(dtype="M8[ns]"), Series(dtype=np.bool_), Series(dtype=np.int64)]
).dtype
== np.object_
)
def test_concat_empty_series_dtype_category_with_array(self):
# GH 18515
assert (
pd.concat(
[Series(np.array([]), dtype="category"), Series(dtype="float64")]
).dtype
== "float64"
)
def test_concat_empty_series_dtypes_sparse(self):
result = pd.concat(
[
Series(dtype="float64").astype("Sparse"),
Series(dtype="float64").astype("Sparse"),
]
)
assert result.dtype == "Sparse[float64]"
result = pd.concat(
[Series(dtype="float64").astype("Sparse"), Series(dtype="float64")]
)
# TODO: release-note: concat sparse dtype
expected = pd.SparseDtype(np.float64)
assert result.dtype == expected
result = pd.concat(
[
|
Series(dtype="float64")
|
pandas.Series
|
import json
import io
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import dash
from dash import html
from dash import dcc
import dash_bootstrap_components as dbc
import pandas as pd
import numpy as np
import plotly.express as px
from dash.dependencies import Output, Input, State
from datetime import datetime, timedelta
from server import app
import plotly.graph_objects as go
import plotly.express as px
from sqlalchemy import create_engine
from flask import send_file
import os
from joblib import Parallel, delayed
from dash.exceptions import PreventUpdate
import time
import re
# ----------------------------------------------------------------------------------------------------- 一级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取抗菌药物-菌检出-药敏一级第一张图数据
def get_first_lev_first_fig_date(engine):
res = pd.DataFrame(columns=['业务类型', 'num', 'month' ])
# 问题类别、问题数据量统计、全数据统计
bus_dic = {
'生化': "select '生化' as 业务类型 ,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where REQUESTTIME is not null group by substr(REQUESTTIME,1,7)",
'检查': " select '检查' as 业务类型 , count(1) as num ,substr(EXAM_DATE,1,7) as month from EXAM where EXAM_DATE is not null group by substr(EXAM_DATE,1,7) ",
'体温': " select '体温' as 业务类型 , count(1) as num ,substr(RECORDDATE,1,7) as month from TEMPERATURE where RECORDDATE is not null group by substr(RECORDDATE,1,7) ",
}
for bus in bus_dic:
res = res.append(pd.read_sql(bus_dic[bus],con=engine))
return res
# 更新抗菌药物-菌检出-药敏一级图一
@app.callback(
Output('rout_exam_temp_first_level_first_fig','figure'),
Output('rout_exam_temp_first_level_first_fig_data','data'),
Input('rout_exam_temp_first_level_first_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_first_fig(rout_exam_temp_first_level_first_fig_data,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
engine = create_engine(db_con_url['db'])
if rout_exam_temp_first_level_first_fig_data is None:
rout_exam_temp_first_level_first_fig_data = {}
rout_exam_temp_first_level_first_fig = get_first_lev_first_fig_date(engine)
rout_exam_temp_first_level_first_fig_data['rout_exam_temp_first_level_first_fig'] = rout_exam_temp_first_level_first_fig.to_json(orient='split', date_format='iso')
rout_exam_temp_first_level_first_fig_data['hosname'] = db_con_url['hosname']
rout_exam_temp_first_level_first_fig_data['btime'] = btime
rout_exam_temp_first_level_first_fig_data['etime'] = etime
rout_exam_temp_first_level_first_fig_data = json.dumps(rout_exam_temp_first_level_first_fig_data)
else:
rout_exam_temp_first_level_first_fig_data = json.loads(rout_exam_temp_first_level_first_fig_data)
if db_con_url['hosname'] != rout_exam_temp_first_level_first_fig_data['hosname']:
rout_exam_temp_first_level_first_fig = get_first_lev_first_fig_date(engine)
rout_exam_temp_first_level_first_fig_data['rout_exam_temp_first_level_first_fig'] = rout_exam_temp_first_level_first_fig.to_json(orient='split',date_format='iso')
rout_exam_temp_first_level_first_fig_data['hosname'] = db_con_url['hosname']
rout_exam_temp_first_level_first_fig_data = json.dumps(rout_exam_temp_first_level_first_fig_data)
else:
rout_exam_temp_first_level_first_fig = pd.read_json(rout_exam_temp_first_level_first_fig_data['rout_exam_temp_first_level_first_fig'], orient='split')
rout_exam_temp_first_level_first_fig_data = dash.no_update
#
rout_exam_temp_first_level_first_fig = rout_exam_temp_first_level_first_fig[(rout_exam_temp_first_level_first_fig['month']>=btime) & (rout_exam_temp_first_level_first_fig['month']<=etime)]
rout_exam_temp_first_level_first_fig = rout_exam_temp_first_level_first_fig.sort_values(['month','业务类型'])
fig1 = px.line(rout_exam_temp_first_level_first_fig, x='month', y='num', color='业务类型',
color_discrete_sequence=px.colors.qualitative.Dark24)
# 设置水平图例及位置
fig1.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
fig1.update_yaxes(title_text="业务数据量")
fig1.update_xaxes(title_text="时间")
return fig1,rout_exam_temp_first_level_first_fig_data
# ----------------------------------------------------------------------------------------------------- 一级图二 ----------------------------------------------------------------------------------------------------------------------
# 获取一级第二张图片数据
def get_first_lev_second_fig_date(engine):
res = pd.DataFrame(columns=['问题类型', 'num' ])
# 问题类别、问题数据量统计、全数据统计
bus_dic = {
'体温测量时间缺失': f"select '体温测量时间缺失' as 问题类型 ,count(1) as num from TEMPERATURE where RECORDDATE is null ",
'生化检验申请时间缺失': f"select '生化检验申请时间缺失' as 问题类型 ,count(1) as num from ROUTINE2 where REQUESTTIME is null ",
'生化检验报告时间缺失': f"select '生化检验报告时间缺失' as 问题类型 ,count(1) as num from ROUTINE2 where REPORTTIME is null",
'检查时间为空': f"select '检查时间为空' as 问题类型 ,count(1) as num from exam where EXAM_DATE is null ",
}
for bus in bus_dic:
res = res.append(pd.read_sql(bus_dic[bus],con=engine))
return res
# 更新一级图二
@app.callback(
Output('rout_exam_temp_first_level_second_fig','figure'),
Output('rout_exam_temp_first_level_second_fig_data','data'),
Input('rout_exam_temp_first_level_second_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_first_fig(rout_exam_temp_first_level_second_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if rout_exam_temp_first_level_second_fig_data is None:
rout_exam_temp_first_level_second_fig = get_first_lev_second_fig_date(engine)
rout_exam_temp_first_level_second_fig_data = {}
rout_exam_temp_first_level_second_fig_data['rout_exam_temp_first_level_second_fig'] = rout_exam_temp_first_level_second_fig.to_json( orient='split', date_format='iso')
rout_exam_temp_first_level_second_fig_data['hosname'] = db_con_url['hosname']
rout_exam_temp_first_level_second_fig_data = json.dumps(rout_exam_temp_first_level_second_fig_data)
else:
rout_exam_temp_first_level_second_fig_data = json.loads(rout_exam_temp_first_level_second_fig_data)
if db_con_url['hosname'] != rout_exam_temp_first_level_second_fig_data['hosname']:
rout_exam_temp_first_level_second_fig = get_first_lev_second_fig_date(engine)
rout_exam_temp_first_level_second_fig_data = {}
rout_exam_temp_first_level_second_fig_data[ 'rout_exam_temp_first_level_second_fig'] = rout_exam_temp_first_level_second_fig.to_json( orient='split', date_format='iso')
rout_exam_temp_first_level_second_fig_data['hosname'] = db_con_url['hosname']
rout_exam_temp_first_level_second_fig_data = json.dumps(rout_exam_temp_first_level_second_fig_data)
else:
rout_exam_temp_first_level_second_fig = pd.read_json( rout_exam_temp_first_level_second_fig_data['rout_exam_temp_first_level_second_fig'], orient='split')
rout_exam_temp_first_level_second_fig_data = dash.no_update
fig = go.Figure()
# fig = px.bar(rout_exam_temp_first_level_second_fig,x='问题类型',y='num',color_discrete_sequence=px.colors.qualitative.Dark24 )
fig.add_trace(
go.Bar(x=rout_exam_temp_first_level_second_fig['问题类型'], y=rout_exam_temp_first_level_second_fig['num'], name="问题类型",
marker_color=px.colors.qualitative.Dark24, )
)
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
)
)
fig.update_yaxes(title_text="问题数量")
fig.update_xaxes(title_text="月份")
return fig, rout_exam_temp_first_level_second_fig_data
# 下载一级图二明细
@app.callback(
Output('rout_exam_temp_first_level_second_fig_detail', 'data'),
Input('rout_exam_temp_first_level_second_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_third_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
bus_dic = {
'体温测量时间缺失': f"select * from TEMPERATURE where RECORDDATE is null ",
'生化检验申请时间缺失': f"select * from ROUTINE2 where REQUESTTIME is null ",
'生化检验报告时间缺失': f"select * from ROUTINE2 where REPORTTIME is null",
'检查时间为空': f"select * from exam where EXAM_DATE is null ",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}时间缺失数据明细.xlsx')
else:
return dash.no_update
# # ----------------------------------------------------------------------------------------------------- 二级图一 ----------------------------------------------------------------------------------------------------------------------
# # 获取体温二级第一张图数据
def get_second_lev_first_fig_date(engine,btime,etime):
res = pd.DataFrame(columns=['问题类型','num','momth'])
bus_dic = {
'体温测量值异常': f"select '体温测量值异常' as 问题类型 ,count(1) as num ,substr(RECORDDATE,1,7) as month from TEMPERATURE where (VALUE >46 or VALUE<34) and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' group by substr(RECORDDATE,1,7)",
'体温测量值缺失': f"select '体温测量值缺失' as 问题类型 ,count(1) as num ,substr(RECORDDATE,1,7) as month from TEMPERATURE where VALUE is null and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' group by substr(RECORDDATE,1,7)",
'科室缺失': f"select '科室缺失' as 问题类型 ,count(1) as num ,substr(RECORDDATE,1,7) as month from TEMPERATURE where DEPT is null and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' group by substr(RECORDDATE,1,7)",
'体温测量时机缺失': f"select '体温测量时机缺失' as 问题类型 ,count(1) as num ,substr(RECORDDATE,1,7) as month from TEMPERATURE where OUTSIDE is null and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' group by substr(RECORDDATE,1,7)",
'体温测量时间无时间点': f"select '检验测量时间无时间点' as 问题类型 ,count(1) as num ,substr(RECORDDATE,1,7) as month from TEMPERATURE where length(RECORDDATE)<19 and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' group by substr(RECORDDATE,1,7)",
'体温测量时间在出入院时间之外': f""" select '体温测量时间在出入院时间之外' as 问题类型,count(1) as num ,substr(RECORDDATE,1,7) as month from TEMPERATURE t1,overall t2 where
( t1.RECORDDATE is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.RECORDDATE<t2.IN_TIME or t1.RECORDDATE > t2.OUT_TIME )
and (substr(t1.RECORDDATE,1,7)>='{btime}' and substr(t1.RECORDDATE,1,7)<='{etime}')
group by substr(RECORDDATE,1,7)
""",
}
for bus in bus_dic:
res = res.append(pd.read_sql(bus_dic[bus],con=engine))
return res
# 更新二级图一
@app.callback(
Output('temp_second_level_first_fig','figure'),
Output('temp_second_level_first_fig_data','data'),
Input('temp_second_level_first_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
)
def update_first_level_second_fig(temp_second_level_first_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if temp_second_level_first_fig_data is None:
temp_second_level_first_fig_data = {}
temp_second_level_first_fig = get_second_lev_first_fig_date(engine, btime, etime)
temp_second_level_first_fig_data['temp_second_level_first_fig'] = temp_second_level_first_fig.to_json( orient='split', date_format='iso')
temp_second_level_first_fig_data['hosname'] = db_con_url['hosname']
temp_second_level_first_fig_data['btime'] = btime
temp_second_level_first_fig_data['etime'] = etime
temp_second_level_first_fig_data = json.dumps(temp_second_level_first_fig_data)
else:
temp_second_level_first_fig_data = json.loads(temp_second_level_first_fig_data)
if db_con_url['hosname'] != temp_second_level_first_fig_data['hosname']:
temp_second_level_first_fig = get_second_lev_first_fig_date(engine, btime, etime)
temp_second_level_first_fig_data['temp_second_level_first_fig'] = temp_second_level_first_fig.to_json( orient='split', date_format='iso')
temp_second_level_first_fig_data['hosname'] = db_con_url['hosname']
temp_second_level_first_fig_data['btime'] = btime
temp_second_level_first_fig_data['etime'] = etime
temp_second_level_first_fig_data = json.dumps(temp_second_level_first_fig_data)
else:
if temp_second_level_first_fig_data['btime'] != btime or temp_second_level_first_fig_data[ 'etime'] != etime:
temp_second_level_first_fig = get_second_lev_first_fig_date(engine, btime, etime)
temp_second_level_first_fig_data[ 'temp_second_level_first_fig'] = temp_second_level_first_fig.to_json( orient='split', date_format='iso')
temp_second_level_first_fig_data['btime'] = btime
temp_second_level_first_fig_data['etime'] = etime
temp_second_level_first_fig_data = json.dumps(temp_second_level_first_fig_data)
else:
temp_second_level_first_fig = pd.read_json( temp_second_level_first_fig_data['temp_second_level_first_fig'], orient='split')
temp_second_level_first_fig_data = dash.no_update
temp_second_level_first_fig = temp_second_level_first_fig.sort_values(['month'])
fig = px.line(temp_second_level_first_fig, x="month", y="num", color='问题类型',
color_discrete_sequence=px.colors.qualitative.Dark24)
fig.update_layout(
margin=dict(l=30, r=30, t=30, b=30),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
),
)
fig.update_yaxes(title_text="体温测量数量", )
fig.update_xaxes(title_text="月份", )
return fig, temp_second_level_first_fig_data
# 下载二级图一明细
@app.callback(
Output('temp_second_level_first_fig_detail', 'data'),
Input('temp_second_level_first_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_third_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
engine = create_engine(db_con_url['db'])
bus_dic = {
'体温测量值异常': f"select * from TEMPERATURE where (VALUE >46 or VALUE<34) and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' ",
'体温测量值缺失': f"select * from TEMPERATURE where VALUE is null and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' ",
'科室缺失': f"select * from TEMPERATURE where DEPT is null and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' ",
'体温测量时机缺失': f"select * from TEMPERATURE where OUTSIDE is null and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' ",
'体温测量时间无时间点': f"select * from TEMPERATURE where length(RECORDDATE)<19 and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' ",
'体温测量时间在出入院时间之外': f""" select t1.*,t2.in_time as 入院时间,t2.out_time as 出院时间 from TEMPERATURE t1,overall t2 where
( t1.RECORDDATE is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.RECORDDATE<t2.IN_TIME or t1.RECORDDATE > t2.OUT_TIME )
and (substr(t1.RECORDDATE,1,7)>='{btime}' and substr(t1.RECORDDATE,1,7)<='{etime}')
""",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}体温问题数据明细.xlsx')
else:
return dash.no_update
#
# # # ----------------------------------------------------------------------------------------------------- 三级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取生化检验三级第一张图数据
def get_third_lev_first_fig_date(engine,btime,etime):
res_数据时间缺失及汇总 = pd.DataFrame(columns=['问题类型', 'num', 'month' ])
# 问题类别、问题数据量统计、全数据统计
bus_dic = {
'标本缺失': f"select '标本缺失' as 问题类型 ,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and SPECIMEN is null group by substr(REQUESTTIME,1,7)",
'检验项目缺失': f"select '检验项目缺失' as 问题类型 ,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and RTYPE is null group by substr(REQUESTTIME,1,7)",
'检验结果缺失': f"select '检验结果缺失' as 问题类型 ,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and RVALUE is null group by substr(REQUESTTIME,1,7)",
'院内外标识缺失': f"select '院内外标识缺失' as 问题类型 ,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and OUTSIDE is null group by substr(REQUESTTIME,1,7)",
'检验子项缺失': f"select '检验子项缺失' as 问题类型 ,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and RITEM is null group by substr(REQUESTTIME,1,7)",
'定性结果缺失': f"select '定性结果缺失' as 问题类型 ,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and ABNORMAL is null group by substr(REQUESTTIME,1,7)",
'申请时间大于等于报告时间': f"select '申请时间大于等于报告时间' as 问题类型 ,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where REQUESTTIME >= REPORTTIME and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' group by substr(REQUESTTIME,1,7)",
'申请时间在出入院时间之外': f""" select '申请时间在出入院时间之外' as 问题类型,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 t1,overall t2 where
( t1.REQUESTTIME is not null and t1.REPORTTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}')
group by substr(REQUESTTIME,1,7)
""",
}
for bus in bus_dic:
res_数据时间缺失及汇总 = res_数据时间缺失及汇总.append(pd.read_sql(bus_dic[bus],con=engine))
return res_数据时间缺失及汇总
# 更新抗菌药物-菌检出-药敏一级图一
@app.callback(
Output('rout_third_level_first_fig','figure'),
Output('rout_third_level_first_fig_data','data'),
Input('rout_third_level_first_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_first_fig(rout_third_level_first_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if rout_third_level_first_fig_data is None:
rout_third_level_first_fig_data = {}
rout_third_level_first_fig = get_third_lev_first_fig_date(engine, btime, etime)
rout_third_level_first_fig_data['rout_third_level_first_fig'] = rout_third_level_first_fig.to_json( orient='split', date_format='iso')
rout_third_level_first_fig_data['hosname'] = db_con_url['hosname']
rout_third_level_first_fig_data['btime'] = btime
rout_third_level_first_fig_data['etime'] = etime
rout_third_level_first_fig_data = json.dumps(rout_third_level_first_fig_data)
else:
rout_third_level_first_fig_data = json.loads(rout_third_level_first_fig_data)
if db_con_url['hosname'] != rout_third_level_first_fig_data['hosname']:
rout_third_level_first_fig = get_third_lev_first_fig_date(engine, btime, etime)
rout_third_level_first_fig_data['rout_third_level_first_fig'] = rout_third_level_first_fig.to_json( orient='split', date_format='iso')
rout_third_level_first_fig_data['hosname'] = db_con_url['hosname']
rout_third_level_first_fig_data['btime'] = btime
rout_third_level_first_fig_data['etime'] = etime
rout_third_level_first_fig_data = json.dumps(rout_third_level_first_fig_data)
else:
if rout_third_level_first_fig_data['btime'] != btime or rout_third_level_first_fig_data[ 'etime'] != etime:
rout_third_level_first_fig = get_third_lev_first_fig_date(engine, btime, etime)
rout_third_level_first_fig_data[ 'rout_third_level_first_fig'] = rout_third_level_first_fig.to_json(orient='split', date_format='iso')
rout_third_level_first_fig_data['btime'] = btime
rout_third_level_first_fig_data['etime'] = etime
rout_third_level_first_fig_data = json.dumps(rout_third_level_first_fig_data)
else:
rout_third_level_first_fig = pd.read_json( rout_third_level_first_fig_data['rout_third_level_first_fig'], orient='split')
rout_third_level_first_fig_data = dash.no_update
rout_third_level_first_fig = rout_third_level_first_fig.sort_values(['month'])
fig = px.line(rout_third_level_first_fig,x='month',y='num',color='问题类型',color_discrete_sequence=px.colors.qualitative.Dark24 )
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
)
)
fig.update_yaxes(title_text="问题数量")
fig.update_xaxes(title_text="月份")
return fig, rout_third_level_first_fig_data
# 下载三级图一明细
@app.callback(
Output('rout_third_level_first_fig_detail', 'data'),
Input('rout_third_level_first_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_third_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
bus_dic = {
'标本缺失': f"select * from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and SPECIMEN is null ",
'检验项目缺失': f"select * from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and RTYPE is null ",
'检验结果缺失': f"select * from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and RVALUE is null ",
'院内外标识缺失': f"select * from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and OUTSIDE is null ",
'检验子项缺失': f"select * from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and RITEM is null ",
'定性结果缺失': f"select * from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and ABNORMAL is null ",
'申请时间大于等于报告时间': f"select * from ROUTINE2 where REQUESTTIME >= REPORTTIME and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' ",
'申请时间在出入院时间之外': f""" select t1.* ,t2.in_time as 入院时间,t2.out_time as 出院时间 from ROUTINE2 t1,overall t2 where
( t1.REQUESTTIME is not null and t1.REPORTTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}')
""",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}生化检验问题数据明细.xlsx')
else:
return dash.no_update
# # # ----------------------------------------------------------------------------------------------------- 三级图二 ----------------------------------------------------------------------------------------------------------------------
# 获取生化三级第二张图数据
def get_third_level_second_fig_date(engine,btime,etime):
res = pd.read_sql(f"select RTYPE as 生化检验类型,count(distinct CASEID||TESTNO||RTYPE) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where RTYPE is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' group by RTYPE,substr(REQUESTTIME,1,7)",con=engine)
return res
# 更新生化三级第二张图
@app.callback(
Output('rout_third_level_second_fig','figure'),
Output('rout_third_level_second_fig_data','data'),
Input('rout_third_level_second_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_second_level_fig(rout_third_level_second_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if rout_third_level_second_fig_data is None:
rout_third_level_second_fig_data = {}
rout_third_level_second_fig = get_third_level_second_fig_date(engine, btime, etime)
rout_third_level_second_fig_data['rout_third_level_second_fig'] = rout_third_level_second_fig.to_json(orient='split', date_format='iso')
rout_third_level_second_fig_data['hosname'] = db_con_url['hosname']
rout_third_level_second_fig_data['btime'] = btime
rout_third_level_second_fig_data['etime'] = etime
rout_third_level_second_fig_data = json.dumps(rout_third_level_second_fig_data)
else:
rout_third_level_second_fig_data = json.loads(rout_third_level_second_fig_data)
if db_con_url['hosname'] != rout_third_level_second_fig_data['hosname']:
rout_third_level_second_fig = get_third_level_second_fig_date(engine, btime, etime)
rout_third_level_second_fig_data['rout_third_level_second_fig'] = rout_third_level_second_fig.to_json(orient='split',date_format='iso')
rout_third_level_second_fig_data['hosname'] = db_con_url['hosname']
rout_third_level_second_fig_data['btime'] = btime
rout_third_level_second_fig_data['etime'] = etime
rout_third_level_second_fig_data = json.dumps(rout_third_level_second_fig_data)
else:
if rout_third_level_second_fig_data['btime'] != btime or rout_third_level_second_fig_data['etime'] != etime:
rout_third_level_second_fig = get_third_level_second_fig_date(engine, btime, etime)
rout_third_level_second_fig_data['rout_third_level_second_fig'] = rout_third_level_second_fig.to_json(orient='split',date_format='iso')
rout_third_level_second_fig_data['btime'] = btime
rout_third_level_second_fig_data['etime'] = etime
rout_third_level_second_fig_data = json.dumps(rout_third_level_second_fig_data)
else:
rout_third_level_second_fig = pd.read_json(rout_third_level_second_fig_data['rout_third_level_second_fig'], orient='split')
rout_third_level_second_fig_data = dash.no_update
rout_third_level_second_fig = rout_third_level_second_fig.sort_values(['month'])
# fig = px.line(rout_third_level_second_fig,x='month',y='num',color='生化检验类型',color_discrete_sequence=px.colors.qualitative.Dark24)
fig = px.bar(rout_third_level_second_fig,x='month',y='num',color='生化检验类型',color_discrete_sequence=px.colors.qualitative.Dark24)
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
)
)
fig.update_yaxes(title_text="生化检验数量", )
fig.update_xaxes(title_text="月份", )
return fig,rout_third_level_second_fig_data
#
# # ----------------------------------------------------------------------------------------------------- 四级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取检查四级第一张图数据
def get_fourth_level_first_fig_date(engine,btime,etime):
res = pd.DataFrame(columns=['问题类型', 'num', 'month'])
# 问题类别、问题数据量统计、全数据统计
bus_dic = {
'检查类别缺失': f"select '检查类别缺失' as 问题类型 ,count(1) as num ,substr(EXAM_DATE,1,7) as month from EXAM where EXAM_DATE is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' and EXAM_CLASS is null group by substr(EXAM_DATE,1,7)",
'检查部位缺失': f"select '检验部位缺失' as 问题类型 ,count(1) as num ,substr(EXAM_DATE,1,7) as month from EXAM where EXAM_DATE is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' and EXAM_PARA is null group by substr(EXAM_DATE,1,7)",
'检查所见缺失': f"select '检查所见缺失' as 问题类型 ,count(1) as num ,substr(EXAM_DATE,1,7) as month from EXAM where EXAM_DATE is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' and DESCRIPTION is null group by substr(EXAM_DATE,1,7)",
'检查印象缺失': f"select '检查印象缺失' as 问题类型 ,count(1) as num ,substr(EXAM_DATE,1,7) as month from EXAM where EXAM_DATE is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' and IMPRESSION is null group by substr(EXAM_DATE,1,7)",
'检查时间在出入院时间之外': f""" select '检查时间在出入院时间之外' as 问题类型,count(1) as num ,substr(EXAM_DATE,1,7) as month from EXAM t1,overall t2 where
( t1.EXAM_DATE is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.EXAM_DATE<t2.IN_TIME or t1.EXAM_DATE > t2.OUT_TIME )
and (substr(t1.EXAM_DATE,1,7)>='{btime}' and substr(t1.EXAM_DATE,1,7)<='{etime}')
group by substr(EXAM_DATE,1,7)
""",
}
for bus in bus_dic:
res = res.append(pd.read_sql(bus_dic[bus], con=engine))
return res
# 四级第一张图更新
@app.callback(
Output('exam_fourth_level_first_fig','figure'),
Output('exam_fourth_level_first_fig_data', 'data'),
Input('exam_fourth_level_first_fig_data', 'data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
)
def update_third_level_first_fig(exam_fourth_level_first_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if exam_fourth_level_first_fig_data is None:
exam_fourth_level_first_fig_data = {}
exam_fourth_level_first_fig = get_fourth_level_first_fig_date(engine, btime, etime)
exam_fourth_level_first_fig_data['exam_fourth_level_first_fig'] = exam_fourth_level_first_fig.to_json( orient='split', date_format='iso')
exam_fourth_level_first_fig_data['hosname'] = db_con_url['hosname']
exam_fourth_level_first_fig_data['btime'] = btime
exam_fourth_level_first_fig_data['etime'] = etime
exam_fourth_level_first_fig_data = json.dumps(exam_fourth_level_first_fig_data)
else:
exam_fourth_level_first_fig_data = json.loads(exam_fourth_level_first_fig_data)
if db_con_url['hosname'] != exam_fourth_level_first_fig_data['hosname']:
exam_fourth_level_first_fig = get_fourth_level_first_fig_date(engine, btime, etime)
exam_fourth_level_first_fig_data['exam_fourth_level_first_fig'] = exam_fourth_level_first_fig.to_json(orient='split', date_format='iso')
exam_fourth_level_first_fig_data['hosname'] = db_con_url['hosname']
exam_fourth_level_first_fig_data['btime'] = btime
exam_fourth_level_first_fig_data['etime'] = etime
exam_fourth_level_first_fig_data = json.dumps(exam_fourth_level_first_fig_data)
else:
if exam_fourth_level_first_fig_data['btime'] != btime or exam_fourth_level_first_fig_data['etime'] != etime:
exam_fourth_level_first_fig = get_fourth_level_first_fig_date(engine, btime, etime)
exam_fourth_level_first_fig_data['exam_fourth_level_first_fig'] = exam_fourth_level_first_fig.to_json(orient='split', date_format='iso')
exam_fourth_level_first_fig_data['btime'] = btime
exam_fourth_level_first_fig_data['etime'] = etime
exam_fourth_level_first_fig_data = json.dumps(exam_fourth_level_first_fig_data)
else:
exam_fourth_level_first_fig = pd.read_json( exam_fourth_level_first_fig_data['exam_fourth_level_first_fig'], orient='split')
exam_fourth_level_first_fig_data = dash.no_update
exam_fourth_level_first_fig = exam_fourth_level_first_fig.sort_values(['month'])
fig = px.line(exam_fourth_level_first_fig, x="month", y="num", color='问题类型', color_discrete_sequence=px.colors.qualitative.Dark24)
fig.update_layout(
margin=dict(l=30, r=30, t=30, b=30),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
),
)
fig.update_yaxes(title_text="问题数量", )
fig.update_xaxes(title_text="月份", )
return fig,exam_fourth_level_first_fig_data
# 下载四级图一明细
@app.callback(
Output('exam_fourth_level_first_fig_detail', 'data'),
Input('exam_fourth_level_first_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_third_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
bus_dic = {
'检查类别缺失': f"select * from EXAM where EXAM_DATE is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' and EXAM_CLASS is null ",
'检查部位缺失': f"select * from EXAM where EXAM_DATE is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' and EXAM_PARA is null ",
'检查所见缺失': f"select * from EXAM where EXAM_DATE is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' and DESCRIPTION is null ",
'检查印象缺失': f"select * from EXAM where EXAM_DATE is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' and IMPRESSION is null ",
'检查时间在出入院时间之外': f""" select t1.* ,t2.in_time as 入院时间,t2.out_time as 出院时间 from EXAM t1,overall t2 where
( t1.EXAM_DATE is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.EXAM_DATE<t2.IN_TIME or t1.EXAM_DATE > t2.OUT_TIME )
and (substr(t1.EXAM_DATE,1,7)>='{btime}' and substr(t1.EXAM_DATE,1,7)<='{etime}')
""",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}检查问题数据明细.xlsx')
else:
return dash.no_update
# # ----------------------------------------------------------------------------------------------------- 四级图二 ----------------------------------------------------------------------------------------------------------------------
# 获取检查四级第二张图数据
def get_fourth_level_second_fig_date(engine,btime,etime):
res = pd.read_sql(f"select EXAM_CLASS as 检查类别,count(1) as num ,substr(EXAM_DATE,1,7) as month from EXAM where EXAM_CLASS is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' group by substr(EXAM_DATE,1,7),EXAM_CLASS ",con=engine)
return res
# 四级第一张图更新
@app.callback(
Output('exam_fourth_level_second_fig','figure'),
Output('exam_fourth_level_second_fig_data', 'data'),
Input('exam_fourth_level_second_fig_data', 'data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
)
def update_third_level_first_fig(exam_fourth_level_second_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if exam_fourth_level_second_fig_data is None:
exam_fourth_level_second_fig_data = {}
exam_fourth_level_second_fig = get_fourth_level_second_fig_date(engine, btime, etime)
exam_fourth_level_second_fig_data['exam_fourth_level_second_fig'] = exam_fourth_level_second_fig.to_json( orient='split', date_format='iso')
exam_fourth_level_second_fig_data['hosname'] = db_con_url['hosname']
exam_fourth_level_second_fig_data['btime'] = btime
exam_fourth_level_second_fig_data['etime'] = etime
exam_fourth_level_second_fig_data = json.dumps(exam_fourth_level_second_fig_data)
else:
exam_fourth_level_second_fig_data = json.loads(exam_fourth_level_second_fig_data)
if db_con_url['hosname'] != exam_fourth_level_second_fig_data['hosname']:
exam_fourth_level_second_fig = get_fourth_level_second_fig_date(engine, btime, etime)
exam_fourth_level_second_fig_data['exam_fourth_level_second_fig'] = exam_fourth_level_second_fig.to_json(orient='split', date_format='iso')
exam_fourth_level_second_fig_data['hosname'] = db_con_url['hosname']
exam_fourth_level_second_fig_data['btime'] = btime
exam_fourth_level_second_fig_data['etime'] = etime
exam_fourth_level_second_fig_data = json.dumps(exam_fourth_level_second_fig_data)
else:
if exam_fourth_level_second_fig_data['btime'] != btime or exam_fourth_level_second_fig_data['etime'] != etime:
exam_fourth_level_second_fig = get_fourth_level_second_fig_date(engine, btime, etime)
exam_fourth_level_second_fig_data['exam_fourth_level_second_fig'] = exam_fourth_level_second_fig.to_json(orient='split', date_format='iso')
exam_fourth_level_second_fig_data['btime'] = btime
exam_fourth_level_second_fig_data['etime'] = etime
exam_fourth_level_second_fig_data = json.dumps(exam_fourth_level_second_fig_data)
else:
exam_fourth_level_second_fig = pd.read_json( exam_fourth_level_second_fig_data['exam_fourth_level_second_fig'], orient='split')
exam_fourth_level_second_fig_data = dash.no_update
exam_fourth_level_second_fig = exam_fourth_level_second_fig.sort_values(['month'])
fig = px.bar(exam_fourth_level_second_fig, x="month", y="num", color='检查类别', color_discrete_sequence=px.colors.qualitative.Dark24)
fig.update_layout(
margin=dict(l=30, r=30, t=30, b=30),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
),
)
fig.update_yaxes(title_text="检查数量", )
fig.update_xaxes(title_text="月份", )
return fig,exam_fourth_level_second_fig_data
# # # ----------------------------------------------------------------------------------------------------- 全部下载 ----------------------------------------------------------------------------------------------------------------------
# 页面数据统计结果下载
@app.callback(
Output("down-rout-exam-temp", "data"),
Input("rout-exam-temp-all-count-data-down", "n_clicks"),
Input("rout_exam_temp_first_level_first_fig_data", "data"),
Input("rout_exam_temp_first_level_second_fig_data", "data"),
Input("temp_second_level_first_fig_data", "data"),
Input("rout_third_level_first_fig_data", "data"),
Input("rout_third_level_second_fig_data", "data"),
Input("exam_fourth_level_first_fig_data", "data"),
Input("exam_fourth_level_second_fig_data", "data"),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def get_all_count_data(n_clicks, rout_exam_temp_first_level_first_fig_data,
rout_exam_temp_first_level_second_fig_data,
temp_second_level_first_fig_data,
rout_third_level_first_fig_data,
rout_third_level_second_fig_data,
exam_fourth_level_first_fig_data,
exam_fourth_level_second_fig_data,
db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
hosName = db_con_url['hosname']
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
now_time = str(datetime.now())[0:19].replace(' ', '_').replace(':', '_')
if rout_exam_temp_first_level_first_fig_data is not None and rout_exam_temp_first_level_second_fig_data is not None and temp_second_level_first_fig_data is not None and \
rout_third_level_first_fig_data is not None and rout_third_level_second_fig_data is not None and exam_fourth_level_first_fig_data is not None and exam_fourth_level_second_fig_data is not None :
rout_exam_temp_first_level_first_fig_data = json.loads(rout_exam_temp_first_level_first_fig_data )
rout_exam_temp_first_level_second_fig_data = json.loads(rout_exam_temp_first_level_second_fig_data )
temp_second_level_first_fig_data = json.loads(temp_second_level_first_fig_data )
rout_third_level_first_fig_data = json.loads(rout_third_level_first_fig_data )
rout_third_level_second_fig_data = json.loads(rout_third_level_second_fig_data )
exam_fourth_level_first_fig_data = json.loads(exam_fourth_level_first_fig_data )
exam_fourth_level_second_fig_data = json.loads(exam_fourth_level_second_fig_data )
if rout_exam_temp_first_level_first_fig_data['hosname'] == hosName and \
rout_exam_temp_first_level_second_fig_data['hosname'] == hosName and \
temp_second_level_first_fig_data['hosname'] == hosName and temp_second_level_first_fig_data['btime'] == btime and temp_second_level_first_fig_data['etime'] == etime and \
rout_third_level_first_fig_data['hosname'] == hosName and rout_third_level_first_fig_data['btime'] == btime and rout_third_level_first_fig_data['etime'] == etime and\
rout_third_level_second_fig_data['hosname'] == hosName and rout_third_level_second_fig_data['btime'] == btime and rout_third_level_second_fig_data['etime'] == etime and \
exam_fourth_level_first_fig_data['hosname'] == hosName and exam_fourth_level_first_fig_data['btime'] == btime and exam_fourth_level_first_fig_data['etime'] == etime and \
exam_fourth_level_second_fig_data['hosname'] == hosName and exam_fourth_level_second_fig_data['btime'] == btime and exam_fourth_level_second_fig_data['etime'] == etime :
rout_exam_temp_first_level_first_fig_data = pd.read_json( rout_exam_temp_first_level_first_fig_data['rout_exam_temp_first_level_first_fig'], orient='split')
rout_exam_temp_first_level_first_fig_data = rout_exam_temp_first_level_first_fig_data[ (rout_exam_temp_first_level_first_fig_data['month'] >= btime) & (
rout_exam_temp_first_level_first_fig_data['month'] <= etime)]
rout_exam_temp_first_level_second_fig_data = pd.read_json( rout_exam_temp_first_level_second_fig_data['rout_exam_temp_first_level_second_fig'], orient='split')
temp_second_level_first_fig_data = pd.read_json( temp_second_level_first_fig_data['temp_second_level_first_fig'], orient='split')
rout_third_level_first_fig_data = pd.read_json( rout_third_level_first_fig_data['rout_third_level_first_fig'], orient='split')
rout_third_level_second_fig_data =
|
pd.read_json( rout_third_level_second_fig_data['rout_third_level_second_fig'], orient='split')
|
pandas.read_json
|
from os import name
import pandas as pd
import numpy as np
from pandas.core.frame import DataFrame
import seaborn as sns
import matplotlib.pyplot as plt
#open csv
df = pd.read_csv('cereal.csv')
#find negative values and replace will null
df = df.replace(-1, np.NaN)
#fill null values with mean values
for column in ['carbo','sugars','potass']:
df[column] = df[column].fillna(df[column].mean())
#apply mean method to selected columns
cereal_means = df.loc[:,'protein':'cups'].apply(np.mean)
cereal_std = df.loc[:,'protein':'cups'].apply(np.std)
#open to file and append to it
f = open('cereal data.txt','a+')
f.write('Cereal means: \n')
f.write(cereal_means.to_string())
f.writelines('\n\nCereal Standard Deviation: \n ')
f.write(cereal_std.to_string())
#print to console
print('Cereal Means: \n{}' '\n\nCereal Standard Deviation: \n{}'.format(cereal_means,cereal_std))
#max a list by selected column name and return the max value and save the name of the row
calories = list(df[df['calories'] == max(df['calories'])]['name'])[0]
protein = list(df[df['protein'] == max(df['protein'])]['name'])[0]
fat = list(df[df['fat'] == max(df['fat'])]['name'])[0]
sodium = list(df[df['sodium'] == max(df['sodium'])]['name'])[0]
fiber = list(df[df['fiber'] == max(df['fiber'])]['name'])[0]
max_cereals = str('\nCereal with the most calories: {}' '\nCereal with the most protein: {}' '\nCereal with the most fat: {}'
'\nCereal with the most sodium: {}''\nCereal with the most fiber: {}'.format(calories,protein,fat,sodium,fiber))
print(max_cereals)
f.write('\n\nCereal Max Values: \n')
f.write(max_cereals)
#get the mfr column
manufactors = df.loc[:,'mfr']
manufactors = manufactors.reset_index().melt(id_vars='index')
#plot with kind= count
sns.catplot(
x='value',
data=manufactors,
kind='count',
)
plt.xlabel('Manufactors')
plt.ylabel('Counts')
plt.title('Cereal Totals by Manufactor')
plt.savefig('manufactors.png')
plt.show()
#plot the calories per serving with the distribution of the mean
cps = df.loc[:,'calories']
cps = cps.reset_index().melt(id_vars='index')
#plot with the distribution line
sns.displot(
df['calories'],
kde=True,
bins=10
)
plt.axvline
plt.xlabel('calories')
plt.ylabel('Counts')
plt.title('Calorie Distribution')
plt.savefig('calories.png')
plt.show()
#boxplot calories per manufactor
cb =
|
pd.DataFrame(df.loc[:,['calories','name','mfr']])
|
pandas.DataFrame
|
from sklearn.cluster import MeanShift, estimate_bandwidth
import pandas as pd
import glob
from pathlib import Path
from spatiotemporal.util import sampling
def load_data_nrel(path, resampling=None):
## some resampling options: 'H' - hourly, '15min' - 15 minutes, 'M' - montlhy
## more options at:
## http://benalexkeen.com/resampling-time-series-data-with-pandas/
allFiles = glob.iglob(path + "/**/*.txt", recursive=True)
frame = pd.DataFrame()
list_ = []
for file_ in allFiles:
#print("Reading: ",file_)
df = pd.read_csv(file_,index_col="datetime",parse_dates=['datetime'], header=0, sep=",")
if frame.columns is None :
frame.columns = df.columns
list_.append(df)
frame = pd.concat(list_)
if resampling is not None:
frame = frame.resample(resampling).mean()
frame = frame.fillna(method='ffill')
frame.columns = ['DHHL_3', 'DHHL_4', 'DHHL_5', 'DHHL_10', 'DHHL_11', 'DHHL_9', 'DHHL_2', 'DHHL_1', 'DHHL_1_Tilt',
'AP_6', 'AP_6_Tilt', 'AP_1', 'AP_3', 'AP_5', 'AP_4', 'AP_7', 'DHHL_6', 'DHHL_7', 'DHHL_8']
return frame
def create_spatio_temporal_data_oahu(oahu_df):
lat = [21.31236,21.31303,21.31357,21.31183,21.31042,21.31268,21.31451,21.31533,21.30812,21.31276,21.31281,21.30983,21.31141,21.31478,21.31179,21.31418,21.31034]
lon = [-158.08463,-158.08505,-158.08424,-158.08554,-158.0853,-158.08688,-158.08534,-158.087,-158.07935,-158.08389,-158.08163,-158.08249,-158.07947,-158.07785,-158.08678,-158.08685,-158.08675]
additional_info = pd.DataFrame({'station': oahu_df.columns, 'latitude': lat, 'longitude': lon })
ll = []
for ind, row in oahu_df.iterrows():
for col in oahu_df.columns:
lat = additional_info[(additional_info.station == col)].latitude.values[0]
lon = additional_info[(additional_info.station == col)].longitude.values[0]
irradiance = row[col]
ll.append([lat, lon, irradiance])
return pd.DataFrame(columns=['latitude','longitude','irradiance'], data=ll)
def load_oahu_dataset(start_date = "2010-04-01", end_date = "2011-10-31"):
"""
Dataset used in
"Impact of network layout and time resolution on spatio-temporal solar forecasting" - <NAME>, <NAME>. - Solar Energy 2018
:param start_date: time series start date in dd-mm-yyyy
:param end_date: time series end date in dd-mm-yyyy
:return: dataset in dataframe
"""
# read raw dataset
df =
|
pd.read_csv('https://query.data.world/s/76ohtd4zd6a6fhiwwe742y23fiplgk')
|
pandas.read_csv
|
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
import math
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day
from qteasy.utilfuncs import next_market_trade_day, unify, mask_to_signal, list_or_slice, labels_to_dict
from qteasy.utilfuncs import weekday_name, prev_market_trade_day, is_number_like, list_truncate, input_to_list
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator, TimingDMA, TimingMACD, TimingCDL, TimingTRIX
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic, stock_company
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.history import stack_dataframes, dataframe_to_hp, HistoryPanel
from qteasy.database import DataSource
from qteasy.strategy import Strategy, SimpleTiming, RollingTiming, SimpleSelecting, FactoralSelecting
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
from qteasy.blender import _exp_to_token, blender_parser, signal_blend
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000., 20000., 10000.])
self.op = np.array([0., 1., -0.33333333])
self.amounts_to_sell = np.array([0., 0., -3333.3333])
self.cash_to_spend = np.array([0., 20000., 0.])
self.prices = np.array([10., 20., 10.])
self.r = qt.Cost(0.0)
def test_rate_creation(self):
"""测试对象生成"""
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
self.assertEqual(self.r.buy_fix, 0)
self.assertEqual(self.r.sell_fix, 0)
def test_rate_operations(self):
"""测试交易费率对象"""
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r.calculate(self.amounts),
[0.003, 0.003, 0.003]),
True,
'fee calculation wrong')
def test_rate_fee(self):
"""测试买卖交易费率"""
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 0.
self.r.sell_min = 0.
self.r.slipage = 0.
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1.))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
"""测试最低交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""测试最低交易费用对其他交易费率参数的影响"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
"""测试固定交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
"""测试交易滑点"""
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
Timestamp('2031-12-29'),
Timestamp('2032-12-29'),
Timestamp('2033-12-29'),
Timestamp('2034-12-29'),
Timestamp('2035-12-29'),
Timestamp('2036-12-29'),
Timestamp('2037-12-29'),
Timestamp('2038-12-29'),
Timestamp('2039-12-29'),
Timestamp('2040-12-29'),
Timestamp('2041-12-29'),
Timestamp('2042-12-29')])
self.assertEqual(cp_mul_float2.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31')])
self.assertEqual(cp_mul_float2.amounts, [20000.0,
22000.0,
24000.0,
26000.0,
28000.0,
30000.0,
32000.0,
34000.0,
36000.0,
38000.0,
40000.0,
42000.0])
class TestPool(unittest.TestCase):
def setUp(self):
self.p = ResultPool(5)
self.items = ['first', 'second', (1, 2, 3), 'this', 24]
self.perfs = [1, 2, 3, 4, 5]
self.additional_result1 = ('abc', 12)
self.additional_result2 = ([1, 2], -1)
self.additional_result3 = (12, 5)
def test_create(self):
self.assertIsInstance(self.p, ResultPool)
def test_operation(self):
self.p.in_pool(self.additional_result1[0], self.additional_result1[1])
self.p.cut()
self.assertEqual(self.p.item_count, 1)
self.assertEqual(self.p.items, ['abc'])
for item, perf in zip(self.items, self.perfs):
self.p.in_pool(item, perf)
self.assertEqual(self.p.item_count, 6)
self.assertEqual(self.p.items, ['abc', 'first', 'second', (1, 2, 3), 'this', 24])
self.p.cut()
self.assertEqual(self.p.items, ['second', (1, 2, 3), 'this', 24, 'abc'])
self.assertEqual(self.p.perfs, [2, 3, 4, 5, 12])
self.p.in_pool(self.additional_result2[0], self.additional_result2[1])
self.p.in_pool(self.additional_result3[0], self.additional_result3[1])
self.assertEqual(self.p.item_count, 7)
self.p.cut(keep_largest=False)
self.assertEqual(self.p.items, [[1, 2], 'second', (1, 2, 3), 'this', 24])
self.assertEqual(self.p.perfs, [-1, 2, 3, 4, 5])
class TestCoreSubFuncs(unittest.TestCase):
"""Test all functions in core.py"""
def setUp(self):
pass
def test_input_to_list(self):
print('Testing input_to_list() function')
input_str = 'first'
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 3), ['first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 4), ['first', 'first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 2, None), ['first', 'first'])
input_list = ['first', 'second']
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 3), ['first', 'second', None])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 4, 'padder'), ['first', 'second', 'padder', 'padder'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 1), ['first', 'second'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, -5), ['first', 'second'])
def test_point_in_space(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
p2 = (-1, 3, 10)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
self.assertFalse(p2 in sp)
print(f'point {p2} is not in space {sp}')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)], 'conti, conti, enum')
p1 = (5.5, 3.2, 8)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
def test_space_in_space(self):
print('test if a space is in another space')
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
sp2 = Space([(0., 10.), (0., 10.), (0., 10.)])
self.assertTrue(sp2 in sp)
self.assertTrue(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is in space {sp2}\n'
f'they are equal to each other\n')
sp2 = Space([(0, 5.), (2, 7.), (3., 9.)])
self.assertTrue(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'{sp2} is a sub space of {sp}\n')
sp2 = Space([(0, 5), (2, 7), (3., 9)])
self.assertFalse(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)])
self.assertFalse(sp in sp2)
self.assertFalse(sp2 in sp)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
def test_space_around_centre(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
ssp = space_around_centre(space=sp, centre=p1, radius=1.2)
print(ssp.boes)
print('\ntest multiple diameters:')
self.assertEqual(ssp.boes, [(4.3, 6.7), (2.0, 4.4), (5.8, 8.2)])
ssp = space_around_centre(space=sp, centre=p1, radius=[1, 2, 1])
print(ssp.boes)
self.assertEqual(ssp.boes, [(4.5, 6.5), (1.2000000000000002, 5.2), (6.0, 8.0)])
print('\ntest points on edge:')
p2 = (5.5, 3.2, 10)
ssp = space_around_centre(space=sp, centre=p1, radius=3.9)
print(ssp.boes)
self.assertEqual(ssp.boes, [(1.6, 9.4), (0.0, 7.1), (3.1, 10.0)])
print('\ntest enum spaces')
sp = Space([(0, 100), range(40, 3, -2)], 'discr, enum')
p1 = [34, 12]
ssp = space_around_centre(space=sp, centre=p1, radius=5, ignore_enums=False)
self.assertEqual(ssp.boes, [(29, 39), (22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(ssp.boes)
print('\ntest enum space and ignore enum axis')
ssp = space_around_centre(space=sp, centre=p1, radius=5)
self.assertEqual(ssp.boes, [(29, 39),
(40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(sp.boes)
def test_get_stock_pool(self):
print(f'start test building stock pool function\n')
share_basics = stock_basic(fields='ts_code,symbol,name,area,industry,market,list_date,exchange')
print(f'\nselect all stocks by area')
stock_pool = qt.get_stock_pool(area='上海')
print(f'{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "上海"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].eq('上海').all())
print(f'\nselect all stocks by multiple areas')
stock_pool = qt.get_stock_pool(area='贵州,北京,天津')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are in list of ["贵州", "北京", "天津"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['贵州',
'北京',
'天津']).all())
print(f'\nselect all stocks by area and industry')
stock_pool = qt.get_stock_pool(area='四川', industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "四川", and industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['四川']).all())
print(f'\nselect all stocks by industry')
stock_pool = qt.get_stock_pool(industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stocks industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
print(f'\nselect all stocks by market')
stock_pool = qt.get_stock_pool(market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
print(f'\nselect all stocks by market and list date')
stock_pool = qt.get_stock_pool(date='2000-01-01', market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板", and list date after "2000-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('2000-01-01').all())
print(f'\nselect all stocks by list date')
stock_pool = qt.get_stock_pool(date='1997-01-01')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all list date after "1997-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1997-01-01').all())
print(f'\nselect all stocks by exchange')
stock_pool = qt.get_stock_pool(exchange='SSE')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['exchange'].eq('SSE').all())
print(f'\nselect all stocks by industry, area and list date')
industry_list = ['银行', '全国地产', '互联网', '环境保护', '区域地产',
'酒店餐饮', '运输设备', '综合类', '建筑工程', '玻璃',
'家用电器', '文教休闲', '其他商业', '元器件', 'IT设备',
'其他建材', '汽车服务', '火力发电', '医药商业', '汽车配件',
'广告包装', '轻工机械', '新型电力', '多元金融', '饲料']
area_list = ['深圳', '北京', '吉林', '江苏', '辽宁', '广东',
'安徽', '四川', '浙江', '湖南', '河北', '新疆',
'山东', '河南', '山西', '江西', '青海', '湖北',
'内蒙', '海南', '重庆', '陕西', '福建', '广西',
'上海']
stock_pool = qt.get_stock_pool(date='19980101',
industry=industry_list,
area=area_list)
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1998-01-01').all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(industry_list).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(area_list).all())
self.assertRaises(KeyError, qt.get_stock_pool, industry=25)
self.assertRaises(KeyError, qt.get_stock_pool, share_name='000300.SH')
self.assertRaises(KeyError, qt.get_stock_pool, markets='SSE')
class TestEvaluations(unittest.TestCase):
"""Test all evaluation functions in core.py"""
# 以下手动计算结果在Excel文件中
def setUp(self):
"""用np.random生成测试用数据,使用cumsum()模拟股票走势"""
self.test_data1 = pd.DataFrame([5.34892759, 5.65768696, 5.79227076, 5.56266871, 5.88189632,
6.24795001, 5.92755558, 6.38748165, 6.31331899, 5.86001665,
5.61048472, 5.30696736, 5.40406792, 5.03180571, 5.37886353,
5.78608307, 6.26540339, 6.59348026, 6.90943801, 6.70911677,
6.33015954, 6.06697417, 5.9752499, 6.45786408, 6.95273763,
6.7691991, 6.70355481, 6.28048969, 6.61344541, 6.24620003,
6.47409983, 6.4522311, 6.8773094, 6.99727832, 6.59262674,
6.59014938, 6.63758237, 6.38331869, 6.09902105, 6.35390109,
6.51993567, 6.87244592, 6.83963485, 7.08797815, 6.88003144,
6.83657323, 6.97819483, 7.01600276, 7.12554256, 7.58941523,
7.61014457, 7.21224091, 7.48174399, 7.66490854, 7.51371968,
7.11586198, 6.97147399, 6.67453301, 6.2042138, 6.33967015,
6.22187938, 5.98426993, 6.37096079, 6.55897161, 6.26422645,
6.69363762, 7.12668015, 6.83232926, 7.30524081, 7.4262041,
7.54031383, 7.17545919, 7.20659257, 7.44886016, 7.37094393,
6.88011022, 7.08142491, 6.74992833, 6.5967097, 6.21336693,
6.35565105, 6.82347596, 6.44773408, 6.84538053, 6.47966466,
6.09699528, 5.63927014, 6.01081024, 6.20585303, 6.60528206,
7.01594726, 7.03684251, 6.76574977, 7.08740846, 6.65336462,
7.07126686, 6.80058956, 6.79241977, 6.47843472, 6.39245474],
columns=['value'])
self.test_data2 = pd.DataFrame([5.09276527, 4.83828592, 4.6000911, 4.63170487, 4.63566451,
4.50546921, 4.96390044, 4.64557907, 4.25787855, 3.76585551,
3.38826334, 3.76243422, 4.06365426, 3.87084726, 3.91400935,
4.13438822, 4.27064542, 4.56776104, 5.03800296, 5.31070529,
5.39902276, 5.21186286, 5.05683114, 4.68842046, 5.11895168,
5.27151571, 5.72294993, 6.09961056, 6.26569635, 6.48806151,
6.16058885, 6.2582459, 6.38934791, 6.57831057, 6.19508831,
5.70155153, 5.20435735, 5.36538825, 5.40450056, 5.2227697,
5.37828693, 5.53058991, 6.02996797, 5.76802181, 5.66166713,
6.07988994, 5.61794367, 5.63218151, 6.10728013, 6.0324168,
6.27164431, 6.27551239, 6.52329665, 7.00470007, 7.34163113,
7.33699083, 7.67661334, 8.09395749, 7.68086668, 7.58341161,
7.46219819, 7.58671899, 7.19348298, 7.40088323, 7.47562005,
7.93342043, 8.2286081, 8.3521632, 8.43590025, 8.34977395,
8.57563095, 8.81586328, 9.08738649, 9.01542031, 8.8653815,
9.21763111, 9.04233017, 8.59533999, 8.47590075, 8.70857222,
8.78890756, 8.92697606, 9.35743773, 9.68280866, 10.15622021,
10.55908549, 10.6337894, 10.55197128, 10.65435176, 10.54611045,
10.19432562, 10.48320884, 10.36176768, 10.03186854, 10.23656092,
10.0062843, 10.13669686, 10.30758958, 9.87904176, 10.05126375],
columns=['value'])
self.test_data3 = pd.DataFrame([5.02851874, 5.20700348, 5.02410709, 5.49836387, 5.06834371,
5.10956737, 5.15314979, 5.02256472, 5.09746382, 5.23909247,
4.93410336, 4.96316186, 5.40026682, 5.7353255, 5.53438319,
5.79092139, 5.67528173, 5.89840855, 5.75379463, 6.10855386,
5.77322365, 5.84538021, 5.6103973, 5.7518655, 5.49729695,
5.13610628, 5.30524121, 5.68093462, 5.73251319, 6.04420783,
6.26929843, 6.59610234, 6.09872345, 6.25475121, 6.72927396,
6.91395783, 7.00693283, 7.36217783, 7.71516676, 7.67580263,
7.62477511, 7.73600568, 7.53457914, 7.46170277, 7.83658014,
8.11481319, 8.03705544, 7.64948845, 7.52043731, 7.67247943,
7.46511982, 7.43541798, 7.58856517, 7.9392717, 8.25406287,
7.77031632, 8.03223447, 7.86799055, 7.57630999, 7.33230519,
7.22378732, 6.85972264, 7.17548456, 7.5387846, 7.2392632,
6.8455644, 6.59557185, 6.6496796, 6.73685623, 7.18598015,
7.13619128, 6.88060157, 7.1399681, 7.30308077, 6.94942434,
7.0247815, 7.37567798, 7.50080197, 7.59719284, 7.14520561,
7.29913484, 7.79551341, 8.15497781, 8.40456095, 8.86516528,
8.53042688, 8.94268762, 8.52048006, 8.80036284, 8.91602364,
9.19953385, 8.70828953, 8.24613093, 8.18770453, 7.79548389,
7.68627967, 7.23205036, 6.98302636, 7.06515819, 6.95068113],
columns=['value'])
self.test_data4 = pd.DataFrame([4.97926539, 5.44016005, 5.45122915, 5.74485615, 5.45600553,
5.44858945, 5.2435413, 5.47315161, 5.58464303, 5.36179749,
5.38236326, 5.29614981, 5.76523508, 5.75102892, 6.15316618,
6.03852528, 6.01442228, 5.70510182, 5.22748133, 5.46762379,
5.78926267, 5.8221362, 5.61236849, 5.30615725, 5.24200611,
5.41042642, 5.59940342, 5.28306781, 4.99451932, 5.08799266,
5.38865647, 5.58229139, 5.33492845, 5.48206276, 5.09721379,
5.39190493, 5.29965087, 5.0374415, 5.50798022, 5.43107577,
5.22759507, 4.991809, 5.43153084, 5.39966868, 5.59916352,
5.66412137, 6.00611838, 5.63564902, 5.66723484, 5.29863863,
4.91115153, 5.3749929, 5.75082334, 6.08308148, 6.58091182,
6.77848803, 7.19588758, 7.64862286, 7.99818347, 7.91824794,
8.30341071, 8.45984973, 7.98700002, 8.18924931, 8.60755649,
8.66233396, 8.91018407, 9.0782739, 9.33515448, 8.95870245,
8.98426422, 8.50340317, 8.64916085, 8.93592407, 8.63145745,
8.65322862, 8.39543204, 8.37969997, 8.23394504, 8.04062872,
7.91259763, 7.57252171, 7.72670114, 7.74486117, 8.06908188,
7.99166889, 7.92155906, 8.39956136, 8.80181323, 8.47464091,
8.06557064, 7.87145573, 8.0237959, 8.39481998, 8.68525692,
8.81185461, 8.98632237, 9.0989835, 8.89787405, 8.86508591],
columns=['value'])
self.test_data5 = pd.DataFrame([4.50258923, 4.35142568, 4.07459514, 3.87791297, 3.73715985,
3.98455684, 4.07587908, 4.00042472, 4.28276612, 4.01362051,
4.13713565, 4.49312372, 4.48633159, 4.4641207, 4.13444605,
3.79107217, 4.22941629, 4.56548511, 4.92472163, 5.27723158,
5.67409193, 6.00176917, 5.88889928, 5.55256103, 5.39308314,
5.2610492, 5.30738908, 5.22222408, 4.90332238, 4.57499908,
4.96097146, 4.81531011, 4.39115442, 4.63200662, 5.04588813,
4.67866025, 5.01705123, 4.83562258, 4.60381702, 4.66187576,
4.41292828, 4.86604507, 4.42280124, 4.07517294, 4.16317319,
4.10316596, 4.42913598, 4.06609666, 3.96725913, 4.15965746,
4.12379564, 4.04054068, 3.84342851, 3.45902867, 3.17649855,
3.09773586, 3.5502119, 3.66396995, 3.66306483, 3.29131401,
2.79558533, 2.88319542, 3.03671098, 3.44645857, 3.88167161,
3.57961874, 3.60180276, 3.96702102, 4.05429995, 4.40056979,
4.05653231, 3.59600456, 3.60792477, 4.09989922, 3.73503663,
4.01892626, 3.94597242, 3.81466605, 3.71417992, 3.93767156,
4.42806557, 4.06988106, 4.03713636, 4.34408673, 4.79810156,
5.18115011, 4.89798406, 5.3960077, 5.72504875, 5.61894017,
5.1958197, 4.85275896, 5.17550207, 4.71548987, 4.62408567,
4.55488535, 4.36532649, 4.26031979, 4.25225607, 4.58627048],
columns=['value'])
self.test_data6 = pd.DataFrame([5.08639513, 5.05761083, 4.76160923, 4.62166504, 4.62923183,
4.25070173, 4.13447513, 3.90890013, 3.76687608, 3.43342482,
3.67648224, 3.6274775, 3.9385404, 4.39771627, 4.03199346,
3.93265288, 3.50059789, 3.3851961, 3.29743973, 3.2544872,
2.93692949, 2.70893003, 2.55461976, 2.20922332, 2.29054475,
2.2144714, 2.03726827, 2.39007617, 2.29866155, 2.40607111,
2.40440444, 2.79374649, 2.66541922, 2.27018079, 2.08505127,
2.55478864, 2.22415625, 2.58517923, 2.58802256, 2.94870959,
2.69301739, 2.19991535, 2.69473146, 2.64704637, 2.62753542,
2.14240825, 2.38565154, 1.94592117, 2.32243877, 2.69337246,
2.51283854, 2.62484451, 2.15559054, 2.35410875, 2.31219177,
1.96018265, 2.34711266, 2.58083322, 2.40290041, 2.20439791,
2.31472425, 2.16228248, 2.16439749, 2.20080737, 1.73293206,
1.9264407, 2.25089861, 2.69269101, 2.59296687, 2.1420998,
1.67819153, 1.98419023, 2.14479494, 1.89055376, 1.96720648,
1.9916694, 2.37227761, 2.14446036, 2.34573903, 1.86162546,
2.1410721, 2.39204939, 2.52529064, 2.47079939, 2.9299031,
3.09452923, 2.93276708, 3.21731309, 3.06248964, 2.90413406,
2.67844632, 2.45621213, 2.41463398, 2.7373913, 3.14917045,
3.4033949, 3.82283446, 4.02285451, 3.7619638, 4.10346795],
columns=['value'])
self.test_data7 = pd.DataFrame([4.75233583, 4.47668283, 4.55894263, 4.61765848, 4.622892,
4.58941116, 4.32535872, 3.88112797, 3.47237806, 3.50898953,
3.82530406, 3.6718017, 3.78918195, 4.1800752, 4.01818557,
4.40822582, 4.65474654, 4.89287256, 4.40879274, 4.65505126,
4.36876403, 4.58418934, 4.75687172, 4.3689799, 4.16126498,
4.0203982, 3.77148242, 3.38198096, 3.07261764, 2.9014741,
2.5049543, 2.756105, 2.28779058, 2.16986991, 1.8415962,
1.83319008, 2.20898291, 2.00128981, 1.75747025, 1.26676663,
1.40316876, 1.11126484, 1.60376367, 1.22523829, 1.58816681,
1.49705679, 1.80244138, 1.55128293, 1.35339409, 1.50985759,
1.0808451, 1.05892796, 1.43414812, 1.43039101, 1.73631655,
1.43940867, 1.82864425, 1.71088265, 2.12015154, 2.45417128,
2.84777618, 2.7925612, 2.90975121, 3.25920745, 3.13801182,
3.52733677, 3.65468491, 3.69395211, 3.49862035, 3.24786017,
3.64463138, 4.00331929, 3.62509565, 3.78013949, 3.4174012,
3.76312271, 3.62054004, 3.67206716, 3.60596058, 3.38636199,
3.42580676, 3.32921095, 3.02976759, 3.28258676, 3.45760838,
3.24917528, 2.94618304, 2.86980011, 2.63191259, 2.39566759,
2.53159917, 2.96273967, 3.25626185, 2.97425402, 3.16412191,
3.58280763, 3.23257727, 3.62353556, 3.12806399, 2.92532313],
columns=['value'])
# 建立一个长度为 500 个数据点的测试数据, 用于测试数据点多于250个的情况下的评价过程
self.long_data = pd.DataFrame([9.879, 9.916, 10.109, 10.214, 10.361, 10.768, 10.594, 10.288,
10.082, 9.994, 10.125, 10.126, 10.384, 10.734, 10.4, 10.87,
11.338, 11.061, 11.415, 11.724, 12.077, 12.196, 12.064, 12.423,
12.19, 11.729, 11.677, 11.448, 11.485, 10.989, 11.242, 11.239,
11.113, 11.075, 11.471, 11.745, 11.754, 11.782, 12.079, 11.97,
12.178, 11.95, 12.438, 12.612, 12.804, 12.952, 12.612, 12.867,
12.832, 12.832, 13.015, 13.315, 13.249, 12.904, 12.776, 12.64,
12.543, 12.287, 12.225, 11.844, 11.985, 11.945, 11.542, 11.871,
12.245, 12.228, 12.362, 11.899, 11.962, 12.374, 12.816, 12.649,
12.252, 12.579, 12.3, 11.988, 12.177, 12.312, 12.744, 12.599,
12.524, 12.82, 12.67, 12.876, 12.986, 13.271, 13.606, 13.82,
14.161, 13.833, 13.831, 14.137, 13.705, 13.414, 13.037, 12.759,
12.642, 12.948, 13.297, 13.483, 13.836, 14.179, 13.709, 13.655,
13.198, 13.508, 13.953, 14.387, 14.043, 13.987, 13.561, 13.391,
12.923, 12.555, 12.503, 12.292, 11.877, 12.34, 12.141, 11.687,
11.992, 12.458, 12.131, 11.75, 11.739, 11.263, 11.762, 11.976,
11.578, 11.854, 12.136, 12.422, 12.311, 12.56, 12.879, 12.861,
12.973, 13.235, 13.53, 13.531, 13.137, 13.166, 13.31, 13.103,
13.007, 12.643, 12.69, 12.216, 12.385, 12.046, 12.321, 11.9,
11.772, 11.816, 11.871, 11.59, 11.518, 11.94, 11.803, 11.924,
12.183, 12.136, 12.361, 12.406, 11.932, 11.684, 11.292, 11.388,
11.874, 12.184, 12.002, 12.16, 11.741, 11.26, 11.123, 11.534,
11.777, 11.407, 11.275, 11.679, 11.62, 11.218, 11.235, 11.352,
11.366, 11.061, 10.661, 10.582, 10.899, 11.352, 11.792, 11.475,
11.263, 11.538, 11.183, 10.936, 11.399, 11.171, 11.214, 10.89,
10.728, 11.191, 11.646, 11.62, 11.195, 11.178, 11.18, 10.956,
11.205, 10.87, 11.098, 10.639, 10.487, 10.507, 10.92, 10.558,
10.119, 9.882, 9.573, 9.515, 9.845, 9.852, 9.495, 9.726,
10.116, 10.452, 10.77, 11.225, 10.92, 10.824, 11.096, 11.542,
11.06, 10.568, 10.585, 10.884, 10.401, 10.068, 9.964, 10.285,
10.239, 10.036, 10.417, 10.132, 9.839, 9.556, 9.084, 9.239,
9.304, 9.067, 8.587, 8.471, 8.007, 8.321, 8.55, 9.008,
9.138, 9.088, 9.434, 9.156, 9.65, 9.431, 9.654, 10.079,
10.411, 10.865, 10.51, 10.205, 10.519, 10.367, 10.855, 10.642,
10.298, 10.622, 10.173, 9.792, 9.995, 9.904, 9.771, 9.597,
9.506, 9.212, 9.688, 10.032, 9.723, 9.839, 9.918, 10.332,
10.236, 9.989, 10.192, 10.685, 10.908, 11.275, 11.72, 12.158,
12.045, 12.244, 12.333, 12.246, 12.552, 12.958, 13.11, 13.53,
13.123, 13.138, 13.57, 13.389, 13.511, 13.759, 13.698, 13.744,
13.467, 13.795, 13.665, 13.377, 13.423, 13.772, 13.295, 13.073,
12.718, 12.388, 12.399, 12.185, 11.941, 11.818, 11.465, 11.811,
12.163, 11.86, 11.935, 11.809, 12.145, 12.624, 12.768, 12.321,
12.277, 11.889, 12.11, 12.606, 12.943, 12.945, 13.112, 13.199,
13.664, 14.051, 14.189, 14.339, 14.611, 14.656, 15.112, 15.086,
15.263, 15.021, 15.346, 15.572, 15.607, 15.983, 16.151, 16.215,
16.096, 16.089, 16.32, 16.59, 16.657, 16.752, 16.583, 16.743,
16.373, 16.662, 16.243, 16.163, 16.491, 16.958, 16.977, 17.225,
17.637, 17.344, 17.684, 17.892, 18.036, 18.182, 17.803, 17.588,
17.101, 17.538, 17.124, 16.787, 17.167, 17.138, 16.955, 17.148,
17.135, 17.635, 17.718, 17.675, 17.622, 17.358, 17.754, 17.729,
17.576, 17.772, 18.239, 18.441, 18.729, 18.319, 18.608, 18.493,
18.069, 18.122, 18.314, 18.423, 18.709, 18.548, 18.384, 18.391,
17.988, 17.986, 17.653, 17.249, 17.298, 17.06, 17.36, 17.108,
17.348, 17.596, 17.46, 17.635, 17.275, 17.291, 16.933, 17.337,
17.231, 17.146, 17.148, 16.751, 16.891, 17.038, 16.735, 16.64,
16.231, 15.957, 15.977, 16.077, 16.054, 15.797, 15.67, 15.911,
16.077, 16.17, 15.722, 15.258, 14.877, 15.138, 15., 14.811,
14.698, 14.407, 14.583, 14.704, 15.153, 15.436, 15.634, 15.453,
15.877, 15.696, 15.563, 15.927, 16.255, 16.696, 16.266, 16.698,
16.365, 16.493, 16.973, 16.71, 16.327, 16.605, 16.486, 16.846,
16.935, 17.21, 17.389, 17.546, 17.773, 17.641, 17.485, 17.794,
17.354, 16.904, 16.675, 16.43, 16.898, 16.819, 16.921, 17.201,
17.617, 17.368, 17.864, 17.484],
columns=['value'])
self.long_bench = pd.DataFrame([9.7, 10.179, 10.321, 9.855, 9.936, 10.096, 10.331, 10.662,
10.59, 11.031, 11.154, 10.945, 10.625, 10.233, 10.284, 10.252,
10.221, 10.352, 10.444, 10.773, 10.904, 11.104, 10.797, 10.55,
10.943, 11.352, 11.641, 11.983, 11.696, 12.138, 12.365, 12.379,
11.969, 12.454, 12.947, 13.119, 13.013, 12.763, 12.632, 13.034,
12.681, 12.561, 12.938, 12.867, 13.202, 13.132, 13.539, 13.91,
13.456, 13.692, 13.771, 13.904, 14.069, 13.728, 13.97, 14.228,
13.84, 14.041, 13.963, 13.689, 13.543, 13.858, 14.118, 13.987,
13.611, 14.028, 14.229, 14.41, 14.74, 15.03, 14.915, 15.207,
15.354, 15.665, 15.877, 15.682, 15.625, 15.175, 15.105, 14.893,
14.86, 15.097, 15.178, 15.293, 15.238, 15., 15.283, 14.994,
14.907, 14.664, 14.888, 15.297, 15.313, 15.368, 14.956, 14.802,
14.506, 14.257, 14.619, 15.019, 15.049, 14.625, 14.894, 14.978,
15.434, 15.578, 16.038, 16.107, 16.277, 16.365, 16.204, 16.465,
16.401, 16.895, 17.057, 16.621, 16.225, 16.075, 15.863, 16.292,
16.551, 16.724, 16.817, 16.81, 17.192, 16.86, 16.745, 16.707,
16.552, 16.133, 16.301, 16.08, 15.81, 15.75, 15.909, 16.127,
16.457, 16.204, 16.329, 16.748, 16.624, 17.011, 16.548, 16.831,
16.653, 16.791, 16.57, 16.778, 16.928, 16.932, 17.22, 16.876,
17.301, 17.422, 17.689, 17.316, 17.547, 17.534, 17.409, 17.669,
17.416, 17.859, 17.477, 17.307, 17.245, 17.352, 17.851, 17.412,
17.144, 17.138, 17.085, 16.926, 16.674, 16.854, 17.064, 16.95,
16.609, 16.957, 16.498, 16.552, 16.175, 15.858, 15.697, 15.781,
15.583, 15.36, 15.558, 16.046, 15.968, 15.905, 16.358, 16.783,
17.048, 16.762, 17.224, 17.363, 17.246, 16.79, 16.608, 16.423,
15.991, 15.527, 15.147, 14.759, 14.792, 15.206, 15.148, 15.046,
15.429, 14.999, 15.407, 15.124, 14.72, 14.713, 15.022, 15.092,
14.982, 15.001, 14.734, 14.713, 14.841, 14.562, 15.005, 15.483,
15.472, 15.277, 15.503, 15.116, 15.12, 15.442, 15.476, 15.789,
15.36, 15.764, 16.218, 16.493, 16.642, 17.088, 16.816, 16.645,
16.336, 16.511, 16.2, 15.994, 15.86, 15.929, 16.316, 16.416,
16.746, 17.173, 17.531, 17.627, 17.407, 17.49, 17.768, 17.509,
17.795, 18.147, 18.63, 18.945, 19.021, 19.518, 19.6, 19.744,
19.63, 19.32, 18.933, 19.297, 19.598, 19.446, 19.236, 19.198,
19.144, 19.159, 19.065, 19.032, 18.586, 18.272, 18.119, 18.3,
17.894, 17.744, 17.5, 17.083, 17.092, 16.864, 16.453, 16.31,
16.681, 16.342, 16.447, 16.715, 17.068, 17.067, 16.822, 16.673,
16.675, 16.592, 16.686, 16.397, 15.902, 15.597, 15.357, 15.162,
15.348, 15.603, 15.283, 15.257, 15.082, 14.621, 14.366, 14.039,
13.957, 14.141, 13.854, 14.243, 14.414, 14.033, 13.93, 14.104,
14.461, 14.249, 14.053, 14.165, 14.035, 14.408, 14.501, 14.019,
14.265, 14.67, 14.797, 14.42, 14.681, 15.16, 14.715, 14.292,
14.411, 14.656, 15.094, 15.366, 15.055, 15.198, 14.762, 14.294,
13.854, 13.811, 13.549, 13.927, 13.897, 13.421, 13.037, 13.32,
13.721, 13.511, 13.999, 13.529, 13.418, 13.881, 14.326, 14.362,
13.987, 14.015, 13.599, 13.343, 13.307, 13.689, 13.851, 13.404,
13.577, 13.395, 13.619, 13.195, 12.904, 12.553, 12.294, 12.649,
12.425, 11.967, 12.062, 11.71, 11.645, 12.058, 12.136, 11.749,
11.953, 12.401, 12.044, 11.901, 11.631, 11.396, 11.036, 11.244,
10.864, 11.207, 11.135, 11.39, 11.723, 12.084, 11.8, 11.471,
11.33, 11.504, 11.295, 11.3, 10.901, 10.494, 10.825, 11.054,
10.866, 10.713, 10.875, 10.846, 10.947, 11.422, 11.158, 10.94,
10.521, 10.36, 10.411, 10.792, 10.472, 10.305, 10.525, 10.853,
10.556, 10.72, 10.54, 10.583, 10.299, 10.061, 10.004, 9.903,
9.796, 9.472, 9.246, 9.54, 9.456, 9.177, 9.484, 9.557,
9.493, 9.968, 9.536, 9.39, 8.922, 8.423, 8.518, 8.686,
8.771, 9.098, 9.281, 8.858, 9.027, 8.553, 8.784, 8.996,
9.379, 9.846, 9.855, 9.502, 9.608, 9.761, 9.409, 9.4,
9.332, 9.34, 9.284, 8.844, 8.722, 8.376, 8.775, 8.293,
8.144, 8.63, 8.831, 8.957, 9.18, 9.601, 9.695, 10.018,
9.841, 9.743, 9.292, 8.85, 9.316, 9.288, 9.519, 9.738,
9.289, 9.785, 9.804, 10.06, 10.188, 10.095, 9.739, 9.881,
9.7, 9.991, 10.391, 10.002],
columns=['value'])
def test_performance_stats(self):
"""test the function performance_statistics()
"""
pass
def test_fv(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_fv(self.test_data1), 6.39245474)
self.assertAlmostEqual(eval_fv(self.test_data2), 10.05126375)
self.assertAlmostEqual(eval_fv(self.test_data3), 6.95068113)
self.assertAlmostEqual(eval_fv(self.test_data4), 8.86508591)
self.assertAlmostEqual(eval_fv(self.test_data5), 4.58627048)
self.assertAlmostEqual(eval_fv(self.test_data6), 4.10346795)
self.assertAlmostEqual(eval_fv(self.test_data7), 2.92532313)
self.assertAlmostEqual(eval_fv(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
def test_max_drawdown(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_max_drawdown(self.test_data1)[0], 0.264274308)
self.assertEqual(eval_max_drawdown(self.test_data1)[1], 53)
self.assertEqual(eval_max_drawdown(self.test_data1)[2], 86)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data1)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data2)[0], 0.334690849)
self.assertEqual(eval_max_drawdown(self.test_data2)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data2)[2], 10)
self.assertEqual(eval_max_drawdown(self.test_data2)[3], 19)
self.assertAlmostEqual(eval_max_drawdown(self.test_data3)[0], 0.244452899)
self.assertEqual(eval_max_drawdown(self.test_data3)[1], 90)
self.assertEqual(eval_max_drawdown(self.test_data3)[2], 99)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data3)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data4)[0], 0.201849684)
self.assertEqual(eval_max_drawdown(self.test_data4)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4)[2], 50)
self.assertEqual(eval_max_drawdown(self.test_data4)[3], 54)
self.assertAlmostEqual(eval_max_drawdown(self.test_data5)[0], 0.534206456)
self.assertEqual(eval_max_drawdown(self.test_data5)[1], 21)
self.assertEqual(eval_max_drawdown(self.test_data5)[2], 60)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data5)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data6)[0], 0.670062689)
self.assertEqual(eval_max_drawdown(self.test_data6)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data6)[2], 70)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data6)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data7)[0], 0.783577449)
self.assertEqual(eval_max_drawdown(self.test_data7)[1], 17)
self.assertEqual(eval_max_drawdown(self.test_data7)[2], 51)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data7)[3]))
self.assertEqual(eval_max_drawdown(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
# test max drawdown == 0:
# TODO: investigate: how does divide by zero change?
self.assertAlmostEqual(eval_max_drawdown(self.test_data4 - 5)[0], 1.0770474121951792)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[2], 50)
def test_info_ratio(self):
reference = self.test_data1
self.assertAlmostEqual(eval_info_ratio(self.test_data2, reference, 'value'), 0.075553316)
self.assertAlmostEqual(eval_info_ratio(self.test_data3, reference, 'value'), 0.018949457)
self.assertAlmostEqual(eval_info_ratio(self.test_data4, reference, 'value'), 0.056328143)
self.assertAlmostEqual(eval_info_ratio(self.test_data5, reference, 'value'), -0.004270068)
self.assertAlmostEqual(eval_info_ratio(self.test_data6, reference, 'value'), 0.009198027)
self.assertAlmostEqual(eval_info_ratio(self.test_data7, reference, 'value'), -0.000890283)
def test_volatility(self):
self.assertAlmostEqual(eval_volatility(self.test_data1), 0.748646166)
self.assertAlmostEqual(eval_volatility(self.test_data2), 0.75527442)
self.assertAlmostEqual(eval_volatility(self.test_data3), 0.654188853)
self.assertAlmostEqual(eval_volatility(self.test_data4), 0.688375814)
self.assertAlmostEqual(eval_volatility(self.test_data5), 1.089989522)
self.assertAlmostEqual(eval_volatility(self.test_data6), 1.775419308)
self.assertAlmostEqual(eval_volatility(self.test_data7), 1.962758406)
self.assertAlmostEqual(eval_volatility(self.test_data1, logarithm=False), 0.750993311)
self.assertAlmostEqual(eval_volatility(self.test_data2, logarithm=False), 0.75571473)
self.assertAlmostEqual(eval_volatility(self.test_data3, logarithm=False), 0.655331424)
self.assertAlmostEqual(eval_volatility(self.test_data4, logarithm=False), 0.692683021)
self.assertAlmostEqual(eval_volatility(self.test_data5, logarithm=False), 1.09602969)
self.assertAlmostEqual(eval_volatility(self.test_data6, logarithm=False), 1.774789504)
self.assertAlmostEqual(eval_volatility(self.test_data7, logarithm=False), 2.003329156)
self.assertEqual(eval_volatility(pd.DataFrame()), -np.inf)
self.assertRaises(AssertionError, eval_volatility, [1, 2, 3])
# 测试长数据的Volatility计算
expected_volatility = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
0.39955371, 0.39974258, 0.40309866, 0.40486593, 0.4055514,
0.40710639, 0.40708157, 0.40609006, 0.4073625, 0.40835305,
0.41155304, 0.41218193, 0.41207489, 0.41300276, 0.41308415,
0.41292392, 0.41207645, 0.41238397, 0.41229291, 0.41164056,
0.41316317, 0.41348842, 0.41462249, 0.41474574, 0.41652625,
0.41649176, 0.41701556, 0.4166593, 0.41684221, 0.41491689,
0.41435209, 0.41549087, 0.41849338, 0.41998049, 0.41959106,
0.41907311, 0.41916103, 0.42120773, 0.42052391, 0.42111225,
0.42124589, 0.42356445, 0.42214672, 0.42324022, 0.42476639,
0.42621689, 0.42549439, 0.42533678, 0.42539414, 0.42545038,
0.42593637, 0.42652095, 0.42665489, 0.42699563, 0.42798159,
0.42784512, 0.42898006, 0.42868781, 0.42874188, 0.42789631,
0.4277768, 0.42776827, 0.42685216, 0.42660989, 0.42563155,
0.42618281, 0.42606281, 0.42505222, 0.42653242, 0.42555378,
0.42500842, 0.42561939, 0.42442059, 0.42395414, 0.42384356,
0.42319135, 0.42397497, 0.42488579, 0.42449729, 0.42508766,
0.42509878, 0.42456616, 0.42535577, 0.42681884, 0.42688552,
0.42779918, 0.42706058, 0.42792887, 0.42762114, 0.42894045,
0.42977398, 0.42919859, 0.42829041, 0.42780946, 0.42825318,
0.42858952, 0.42858315, 0.42805601, 0.42764751, 0.42744107,
0.42775518, 0.42707283, 0.4258592, 0.42615335, 0.42526286,
0.4248906, 0.42368986, 0.4232565, 0.42265079, 0.42263954,
0.42153046, 0.42132051, 0.41995353, 0.41916605, 0.41914271,
0.41876945, 0.41740175, 0.41583884, 0.41614026, 0.41457908,
0.41472411, 0.41310876, 0.41261041, 0.41212369, 0.41211677,
0.4100645, 0.40852504, 0.40860297, 0.40745338, 0.40698661,
0.40644546, 0.40591375, 0.40640744, 0.40620663, 0.40656649,
0.40727154, 0.40797605, 0.40807137, 0.40808913, 0.40809676,
0.40711767, 0.40724628, 0.40713077, 0.40772698, 0.40765157,
0.40658297, 0.4065991, 0.405011, 0.40537645, 0.40432626,
0.40390177, 0.40237701, 0.40291623, 0.40301797, 0.40324145,
0.40312864, 0.40328316, 0.40190955, 0.40246506, 0.40237663,
0.40198407, 0.401969, 0.40185623, 0.40198313, 0.40005643,
0.39940743, 0.39850438, 0.39845398, 0.39695093, 0.39697295,
0.39663201, 0.39675444, 0.39538699, 0.39331959, 0.39326074,
0.39193287, 0.39157266, 0.39021327, 0.39062591, 0.38917591,
0.38976991, 0.38864187, 0.38872158, 0.38868096, 0.38868377,
0.38842057, 0.38654784, 0.38649517, 0.38600464, 0.38408115,
0.38323049, 0.38260215, 0.38207663, 0.38142669, 0.38003262,
0.37969367, 0.37768092, 0.37732108, 0.37741991, 0.37617779,
0.37698504, 0.37606784, 0.37499276, 0.37533731, 0.37350437,
0.37375172, 0.37385382, 0.37384003, 0.37338938, 0.37212288,
0.37273075, 0.370559, 0.37038506, 0.37062153, 0.36964661,
0.36818564, 0.3656634, 0.36539259, 0.36428672, 0.36502487,
0.3647148, 0.36551435, 0.36409919, 0.36348181, 0.36254383,
0.36166601, 0.36142665, 0.35954942, 0.35846915, 0.35886759,
0.35813867, 0.35642888, 0.35375231, 0.35061783, 0.35078463,
0.34995508, 0.34688918, 0.34548257, 0.34633158, 0.34622833,
0.34652111, 0.34622774, 0.34540951, 0.34418809, 0.34276593,
0.34160916, 0.33811193, 0.33822709, 0.3391685, 0.33883381])
test_volatility = eval_volatility(self.long_data)
test_volatility_roll = self.long_data['volatility'].values
self.assertAlmostEqual(test_volatility, np.nanmean(expected_volatility))
self.assertTrue(np.allclose(expected_volatility, test_volatility_roll, equal_nan=True))
def test_sharp(self):
self.assertAlmostEqual(eval_sharp(self.test_data1, 5, 0), 0.06135557)
self.assertAlmostEqual(eval_sharp(self.test_data2, 5, 0), 0.167858667)
self.assertAlmostEqual(eval_sharp(self.test_data3, 5, 0), 0.09950547)
self.assertAlmostEqual(eval_sharp(self.test_data4, 5, 0), 0.154928241)
self.assertAlmostEqual(eval_sharp(self.test_data5, 5, 0.002), 0.007868673)
self.assertAlmostEqual(eval_sharp(self.test_data6, 5, 0.002), 0.018306537)
self.assertAlmostEqual(eval_sharp(self.test_data7, 5, 0.002), 0.006259971)
# 测试长数据的sharp率计算
expected_sharp = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.02346815, -0.02618783, -0.03763912, -0.03296276, -0.03085698,
-0.02851101, -0.02375842, -0.02016746, -0.01107885, -0.01426613,
-0.00787204, -0.01135784, -0.01164232, -0.01003481, -0.00022512,
-0.00046792, -0.01209378, -0.01278892, -0.01298135, -0.01938214,
-0.01671044, -0.02120509, -0.0244281, -0.02416067, -0.02763238,
-0.027579, -0.02372774, -0.02215294, -0.02467094, -0.02091266,
-0.02590194, -0.03049876, -0.02077131, -0.01483653, -0.02488144,
-0.02671638, -0.02561547, -0.01957986, -0.02479803, -0.02703162,
-0.02658087, -0.01641755, -0.01946472, -0.01647757, -0.01280889,
-0.00893643, -0.00643275, -0.00698457, -0.00549962, -0.00654677,
-0.00494757, -0.0035633, -0.00109037, 0.00750654, 0.00451208,
0.00625502, 0.01221367, 0.01326454, 0.01535037, 0.02269538,
0.02028715, 0.02127712, 0.02333264, 0.02273159, 0.01670643,
0.01376513, 0.01265342, 0.02211647, 0.01612449, 0.00856706,
-0.00077147, -0.00268848, 0.00210993, -0.00443934, -0.00411912,
-0.0018756, -0.00867461, -0.00581601, -0.00660835, -0.00861137,
-0.00678614, -0.01188408, -0.00589617, -0.00244323, -0.00201891,
-0.01042846, -0.01471016, -0.02167034, -0.02258554, -0.01306809,
-0.00909086, -0.01233746, -0.00595166, -0.00184208, 0.00750497,
0.01481886, 0.01761972, 0.01562886, 0.01446414, 0.01285826,
0.01357719, 0.00967613, 0.01636272, 0.01458437, 0.02280183,
0.02151903, 0.01700276, 0.01597368, 0.02114336, 0.02233297,
0.02585631, 0.02768459, 0.03519235, 0.04204535, 0.04328161,
0.04672855, 0.05046191, 0.04619848, 0.04525853, 0.05381529,
0.04598861, 0.03947394, 0.04665006, 0.05586077, 0.05617728,
0.06495018, 0.06205172, 0.05665466, 0.06500615, 0.0632062,
0.06084328, 0.05851466, 0.05659229, 0.05159347, 0.0432977,
0.0474047, 0.04231723, 0.03613176, 0.03618391, 0.03591012,
0.03885674, 0.0402686, 0.03846423, 0.04534014, 0.04721458,
0.05130912, 0.05026281, 0.05394312, 0.05529349, 0.05949243,
0.05463304, 0.06195165, 0.06767606, 0.06880985, 0.07048996,
0.07078815, 0.07420767, 0.06773439, 0.0658441, 0.06470875,
0.06302349, 0.06456876, 0.06411282, 0.06216669, 0.067094,
0.07055075, 0.07254976, 0.07119253, 0.06173308, 0.05393352,
0.05681246, 0.05250643, 0.06099845, 0.0655544, 0.06977334,
0.06636514, 0.06177949, 0.06869908, 0.06719767, 0.06178738,
0.05915714, 0.06882277, 0.06756821, 0.06507994, 0.06489791,
0.06553941, 0.073123, 0.07576757, 0.06805446, 0.06063571,
0.05033801, 0.05206971, 0.05540306, 0.05249118, 0.05755587,
0.0586174, 0.05051288, 0.0564852, 0.05757284, 0.06358355,
0.06130082, 0.04925482, 0.03834472, 0.04163981, 0.04648316,
0.04457858, 0.04324626, 0.04328791, 0.04156207, 0.04818652,
0.04972634, 0.06024123, 0.06489556, 0.06255485, 0.06069815,
0.06466389, 0.07081163, 0.07895358, 0.0881782, 0.09374151,
0.08336506, 0.08764795, 0.09080174, 0.08808926, 0.08641158,
0.07811943, 0.06885318, 0.06479503, 0.06851185, 0.07382819,
0.07047903, 0.06658251, 0.07638379, 0.08667974, 0.08867918,
0.08245323, 0.08961866, 0.09905298, 0.0961908, 0.08562706,
0.0839014, 0.0849072, 0.08338395, 0.08783487, 0.09463609,
0.10332336, 0.11806497, 0.11220297, 0.11589097, 0.11678405])
test_sharp = eval_sharp(self.long_data, 5, 0.00035)
self.assertAlmostEqual(np.nanmean(expected_sharp), test_sharp)
self.assertTrue(np.allclose(self.long_data['sharp'].values, expected_sharp, equal_nan=True))
def test_beta(self):
reference = self.test_data1
self.assertAlmostEqual(eval_beta(self.test_data2, reference, 'value'), -0.017148939)
self.assertAlmostEqual(eval_beta(self.test_data3, reference, 'value'), -0.042204233)
self.assertAlmostEqual(eval_beta(self.test_data4, reference, 'value'), -0.15652986)
self.assertAlmostEqual(eval_beta(self.test_data5, reference, 'value'), -0.049195532)
self.assertAlmostEqual(eval_beta(self.test_data6, reference, 'value'), -0.026995082)
self.assertAlmostEqual(eval_beta(self.test_data7, reference, 'value'), -0.01147809)
self.assertRaises(TypeError, eval_beta, [1, 2, 3], reference, 'value')
self.assertRaises(TypeError, eval_beta, self.test_data3, [1, 2, 3], 'value')
self.assertRaises(KeyError, eval_beta, self.test_data3, reference, 'not_found_value')
# 测试长数据的beta计算
expected_beta = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.04988841, -0.05127618, -0.04692104, -0.04272652, -0.04080598,
-0.0493347, -0.0460858, -0.0416761, -0.03691527, -0.03724924,
-0.03678865, -0.03987324, -0.03488321, -0.02567672, -0.02690303,
-0.03010128, -0.02437967, -0.02571932, -0.02455681, -0.02839811,
-0.03358653, -0.03396697, -0.03466321, -0.03050966, -0.0247583,
-0.01629325, -0.01880895, -0.01480403, -0.01348783, -0.00544294,
-0.00648176, -0.00467036, -0.01135331, -0.0156841, -0.02340763,
-0.02615705, -0.02730771, -0.02906174, -0.02860664, -0.02412914,
-0.02066416, -0.01744816, -0.02185133, -0.02145285, -0.02681765,
-0.02827694, -0.02394581, -0.02744096, -0.02778825, -0.02703065,
-0.03160023, -0.03615371, -0.03681072, -0.04265126, -0.04344738,
-0.04232421, -0.04705272, -0.04533344, -0.04605934, -0.05272737,
-0.05156463, -0.05134196, -0.04730733, -0.04425352, -0.03869831,
-0.04159571, -0.04223998, -0.04346747, -0.04229844, -0.04740093,
-0.04992507, -0.04621232, -0.04477644, -0.0486915, -0.04598224,
-0.04943463, -0.05006391, -0.05362256, -0.04994067, -0.05464769,
-0.05443275, -0.05513493, -0.05173594, -0.04500994, -0.04662891,
-0.03903505, -0.0419592, -0.04307773, -0.03925718, -0.03711574,
-0.03992631, -0.0433058, -0.04533641, -0.0461183, -0.05600344,
-0.05758377, -0.05959874, -0.05605942, -0.06002859, -0.06253002,
-0.06747014, -0.06427915, -0.05931947, -0.05769974, -0.04791515,
-0.05175088, -0.05748039, -0.05385232, -0.05072975, -0.05052637,
-0.05125567, -0.05005785, -0.05325104, -0.04977727, -0.04947867,
-0.05148544, -0.05739156, -0.05742069, -0.06047279, -0.0558414,
-0.06086126, -0.06265151, -0.06411129, -0.06828052, -0.06781762,
-0.07083409, -0.07211207, -0.06799162, -0.06913295, -0.06775162,
-0.0696265, -0.06678248, -0.06867502, -0.06581961, -0.07055823,
-0.06448184, -0.06097973, -0.05795587, -0.0618383, -0.06130145,
-0.06050652, -0.05936661, -0.05749424, -0.0499, -0.05050495,
-0.04962687, -0.05033439, -0.05070116, -0.05422009, -0.05369759,
-0.05548943, -0.05907353, -0.05933035, -0.05927918, -0.06227663,
-0.06011455, -0.05650432, -0.05828134, -0.05620949, -0.05715323,
-0.05482478, -0.05387113, -0.05095559, -0.05377999, -0.05334267,
-0.05220438, -0.04001521, -0.03892434, -0.03660782, -0.04282708,
-0.04324623, -0.04127048, -0.04227559, -0.04275226, -0.04347049,
-0.04125853, -0.03806295, -0.0330632, -0.03155531, -0.03277152,
-0.03304518, -0.03878731, -0.03830672, -0.03727434, -0.0370571,
-0.04509224, -0.04207632, -0.04116198, -0.04545179, -0.04584584,
-0.05287341, -0.05417433, -0.05175836, -0.05005509, -0.04268674,
-0.03442321, -0.03457309, -0.03613426, -0.03524391, -0.03629479,
-0.04361312, -0.02626705, -0.02406115, -0.03046384, -0.03181044,
-0.03375164, -0.03661673, -0.04520779, -0.04926951, -0.05726738,
-0.0584486, -0.06220608, -0.06800563, -0.06797431, -0.07562211,
-0.07481996, -0.07731229, -0.08413381, -0.09031826, -0.09691925,
-0.11018071, -0.11952675, -0.10826026, -0.11173895, -0.10756359,
-0.10775916, -0.11664559, -0.10505051, -0.10606547, -0.09855355,
-0.10004159, -0.10857084, -0.12209301, -0.11605758, -0.11105113,
-0.1155195, -0.11569505, -0.10513348, -0.09611072, -0.10719791,
-0.10843965, -0.11025856, -0.10247839, -0.10554044, -0.10927647,
-0.10645088, -0.09982498, -0.10542734, -0.09631372, -0.08229695])
test_beta_mean = eval_beta(self.long_data, self.long_bench, 'value')
test_beta_roll = self.long_data['beta'].values
self.assertAlmostEqual(test_beta_mean, np.nanmean(expected_beta))
self.assertTrue(np.allclose(test_beta_roll, expected_beta, equal_nan=True))
def test_alpha(self):
reference = self.test_data1
self.assertAlmostEqual(eval_alpha(self.test_data2, 5, reference, 'value', 0.5), 11.63072977)
self.assertAlmostEqual(eval_alpha(self.test_data3, 5, reference, 'value', 0.5), 1.886590071)
self.assertAlmostEqual(eval_alpha(self.test_data4, 5, reference, 'value', 0.5), 6.827021872)
self.assertAlmostEqual(eval_alpha(self.test_data5, 5, reference, 'value', 0.92), -1.192265168)
self.assertAlmostEqual(eval_alpha(self.test_data6, 5, reference, 'value', 0.92), -1.437142359)
self.assertAlmostEqual(eval_alpha(self.test_data7, 5, reference, 'value', 0.92), -1.781311545)
# 测试长数据的alpha计算
expected_alpha = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.09418119, -0.11188463, -0.17938358, -0.15588172, -0.1462678,
-0.13089586, -0.10780125, -0.09102891, -0.03987585, -0.06075686,
-0.02459503, -0.04104284, -0.0444565, -0.04074585, 0.02191275,
0.02255955, -0.05583375, -0.05875539, -0.06055551, -0.09648245,
-0.07913737, -0.10627829, -0.12320965, -0.12368335, -0.1506743,
-0.15768033, -0.13638829, -0.13065298, -0.14537834, -0.127428,
-0.15504529, -0.18184636, -0.12652146, -0.09190138, -0.14847221,
-0.15840648, -0.1525789, -0.11859418, -0.14700954, -0.16295761,
-0.16051645, -0.10364859, -0.11961134, -0.10258267, -0.08090148,
-0.05727746, -0.0429945, -0.04672356, -0.03581408, -0.0439215,
-0.03429495, -0.0260362, -0.01075022, 0.04931808, 0.02779388,
0.03984083, 0.08311951, 0.08995566, 0.10522428, 0.16159058,
0.14238174, 0.14759783, 0.16257712, 0.158908, 0.11302115,
0.0909566, 0.08272888, 0.15261884, 0.10546376, 0.04990313,
-0.01284111, -0.02720704, 0.00454725, -0.03965491, -0.03818265,
-0.02186992, -0.06574751, -0.04846454, -0.05204211, -0.06316498,
-0.05095099, -0.08502656, -0.04681162, -0.02362027, -0.02205091,
-0.07706374, -0.10371841, -0.14434688, -0.14797935, -0.09055402,
-0.06739549, -0.08824959, -0.04855888, -0.02291244, 0.04027138,
0.09370505, 0.11472939, 0.10243593, 0.0921445, 0.07662648,
0.07946651, 0.05450718, 0.10497677, 0.09068334, 0.15462924,
0.14231034, 0.10544952, 0.09980256, 0.14035223, 0.14942974,
0.17624102, 0.19035477, 0.2500807, 0.30724652, 0.31768915,
0.35007521, 0.38412975, 0.34356521, 0.33614463, 0.41206165,
0.33999177, 0.28045963, 0.34076789, 0.42220356, 0.42314636,
0.50790423, 0.47713348, 0.42520169, 0.50488411, 0.48705211,
0.46252601, 0.44325578, 0.42640573, 0.37986783, 0.30652822,
0.34503393, 0.2999069, 0.24928617, 0.24730218, 0.24326897,
0.26657905, 0.27861168, 0.26392824, 0.32552649, 0.34177792,
0.37837011, 0.37025267, 0.4030612, 0.41339361, 0.45076809,
0.40383354, 0.47093422, 0.52505036, 0.53614256, 0.5500943,
0.55319293, 0.59021451, 0.52358459, 0.50605947, 0.49359168,
0.47895956, 0.49320243, 0.4908336, 0.47310767, 0.51821564,
0.55105932, 0.57291504, 0.5599809, 0.46868842, 0.39620087,
0.42086934, 0.38317217, 0.45934108, 0.50048866, 0.53941991,
0.50676751, 0.46500915, 0.52993663, 0.51668366, 0.46405428,
0.44100603, 0.52726147, 0.51565458, 0.49186248, 0.49001081,
0.49367648, 0.56422294, 0.58882785, 0.51334664, 0.44386256,
0.35056709, 0.36490029, 0.39205071, 0.3677061, 0.41134736,
0.42315067, 0.35356394, 0.40324562, 0.41340007, 0.46503322,
0.44355762, 0.34854314, 0.26412842, 0.28633753, 0.32335224,
0.30761141, 0.29709569, 0.29570487, 0.28000063, 0.32802547,
0.33967726, 0.42511212, 0.46252357, 0.44244974, 0.42152907,
0.45436727, 0.50482359, 0.57339198, 0.6573356, 0.70912003,
0.60328917, 0.6395092, 0.67015805, 0.64241557, 0.62779142,
0.55028063, 0.46448736, 0.43709245, 0.46777983, 0.51789439,
0.48594916, 0.4456216, 0.52008189, 0.60548684, 0.62792473,
0.56645031, 0.62766439, 0.71829315, 0.69481356, 0.59550329,
0.58133754, 0.59014148, 0.58026655, 0.61719273, 0.67373203,
0.75573056, 0.89501633, 0.8347253, 0.87964685, 0.89015835])
test_alpha_mean = eval_alpha(self.long_data, 100, self.long_bench, 'value')
test_alpha_roll = self.long_data['alpha'].values
self.assertAlmostEqual(test_alpha_mean, np.nanmean(expected_alpha))
self.assertTrue(np.allclose(test_alpha_roll, expected_alpha, equal_nan=True))
def test_calmar(self):
"""test evaluate function eval_calmar()"""
pass
def test_benchmark(self):
reference = self.test_data1
tr, yr = eval_benchmark(self.test_data2, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data3, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data4, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data5, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data6, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data7, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
def test_evaluate(self):
pass
class TestLoop(unittest.TestCase):
"""通过一个假设但精心设计的例子来测试loop_step以及loop方法的正确性"""
def setUp(self):
# 精心设计的模拟股票名称、交易日期、以及股票价格
self.shares = ['share1', 'share2', 'share3', 'share4', 'share5', 'share6', 'share7']
self.dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08',
'2016/09/09', '2016/09/12', '2016/09/13', '2016/09/14', '2016/09/15',
'2016/09/16', '2016/09/19', '2016/09/20', '2016/09/21', '2016/09/22',
'2016/09/23', '2016/09/26', '2016/09/27', '2016/09/28', '2016/09/29',
'2016/09/30', '2016/10/10', '2016/10/11', '2016/10/12', '2016/10/13',
'2016/10/14', '2016/10/17', '2016/10/18', '2016/10/19', '2016/10/20',
'2016/10/21', '2016/10/23', '2016/10/24', '2016/10/25', '2016/10/26',
'2016/10/27', '2016/10/29', '2016/10/30', '2016/10/31', '2016/11/01',
'2016/11/02', '2016/11/05', '2016/11/06', '2016/11/07', '2016/11/08',
'2016/11/09', '2016/11/12', '2016/11/13', '2016/11/14', '2016/11/15',
'2016/11/16', '2016/11/19', '2016/11/20', '2016/11/21', '2016/11/22']
self.dates = [pd.Timestamp(date_text) for date_text in self.dates]
self.prices = np.array([[5.35, 5.09, 5.03, 4.98, 4.50, 5.09, 4.75],
[5.66, 4.84, 5.21, 5.44, 4.35, 5.06, 4.48],
[5.79, 4.60, 5.02, 5.45, 4.07, 4.76, 4.56],
[5.56, 4.63, 5.50, 5.74, 3.88, 4.62, 4.62],
[5.88, 4.64, 5.07, 5.46, 3.74, 4.63, 4.62],
[6.25, 4.51, 5.11, 5.45, 3.98, 4.25, 4.59],
[5.93, 4.96, 5.15, 5.24, 4.08, 4.13, 4.33],
[6.39, 4.65, 5.02, 5.47, 4.00, 3.91, 3.88],
[6.31, 4.26, 5.10, 5.58, 4.28, 3.77, 3.47],
[5.86, 3.77, 5.24, 5.36, 4.01, 3.43, 3.51],
[5.61, 3.39, 4.93, 5.38, 4.14, 3.68, 3.83],
[5.31, 3.76, 4.96, 5.30, 4.49, 3.63, 3.67],
[5.40, 4.06, 5.40, 5.77, 4.49, 3.94, 3.79],
[5.03, 3.87, 5.74, 5.75, 4.46, 4.40, 4.18],
[5.38, 3.91, 5.53, 6.15, 4.13, 4.03, 4.02],
[5.79, 4.13, 5.79, 6.04, 3.79, 3.93, 4.41],
[6.27, 4.27, 5.68, 6.01, 4.23, 3.50, 4.65],
[6.59, 4.57, 5.90, 5.71, 4.57, 3.39, 4.89],
[6.91, 5.04, 5.75, 5.23, 4.92, 3.30, 4.41],
[6.71, 5.31, 6.11, 5.47, 5.28, 3.25, 4.66],
[6.33, 5.40, 5.77, 5.79, 5.67, 2.94, 4.37],
[6.07, 5.21, 5.85, 5.82, 6.00, 2.71, 4.58],
[5.98, 5.06, 5.61, 5.61, 5.89, 2.55, 4.76],
[6.46, 4.69, 5.75, 5.31, 5.55, 2.21, 4.37],
[6.95, 5.12, 5.50, 5.24, 5.39, 2.29, 4.16],
[6.77, 5.27, 5.14, 5.41, 5.26, 2.21, 4.02],
[6.70, 5.72, 5.31, 5.60, 5.31, 2.04, 3.77],
[6.28, 6.10, 5.68, 5.28, 5.22, 2.39, 3.38],
[6.61, 6.27, 5.73, 4.99, 4.90, 2.30, 3.07],
[6.25, 6.49, 6.04, 5.09, 4.57, 2.41, 2.90],
[6.47, 6.16, 6.27, 5.39, 4.96, 2.40, 2.50],
[6.45, 6.26, 6.60, 5.58, 4.82, 2.79, 2.76],
[6.88, 6.39, 6.10, 5.33, 4.39, 2.67, 2.29],
[7.00, 6.58, 6.25, 5.48, 4.63, 2.27, 2.17],
[6.59, 6.20, 6.73, 5.10, 5.05, 2.09, 1.84],
[6.59, 5.70, 6.91, 5.39, 4.68, 2.55, 1.83],
[6.64, 5.20, 7.01, 5.30, 5.02, 2.22, 2.21],
[6.38, 5.37, 7.36, 5.04, 4.84, 2.59, 2.00],
[6.10, 5.40, 7.72, 5.51, 4.60, 2.59, 1.76],
[6.35, 5.22, 7.68, 5.43, 4.66, 2.95, 1.27],
[6.52, 5.38, 7.62, 5.23, 4.41, 2.69, 1.40],
[6.87, 5.53, 7.74, 4.99, 4.87, 2.20, 1.11],
[6.84, 6.03, 7.53, 5.43, 4.42, 2.69, 1.60],
[7.09, 5.77, 7.46, 5.40, 4.08, 2.65, 1.23],
[6.88, 5.66, 7.84, 5.60, 4.16, 2.63, 1.59],
[6.84, 6.08, 8.11, 5.66, 4.10, 2.14, 1.50],
[6.98, 5.62, 8.04, 6.01, 4.43, 2.39, 1.80],
[7.02, 5.63, 7.65, 5.64, 4.07, 1.95, 1.55],
[7.13, 6.11, 7.52, 5.67, 3.97, 2.32, 1.35],
[7.59, 6.03, 7.67, 5.30, 4.16, 2.69, 1.51],
[7.61, 6.27, 7.47, 4.91, 4.12, 2.51, 1.08],
[7.21, 6.28, 7.44, 5.37, 4.04, 2.62, 1.06],
[7.48, 6.52, 7.59, 5.75, 3.84, 2.16, 1.43],
[7.66, 7.00, 7.94, 6.08, 3.46, 2.35, 1.43],
[7.51, 7.34, 8.25, 6.58, 3.18, 2.31, 1.74],
[7.12, 7.34, 7.77, 6.78, 3.10, 1.96, 1.44],
[6.97, 7.68, 8.03, 7.20, 3.55, 2.35, 1.83],
[6.67, 8.09, 7.87, 7.65, 3.66, 2.58, 1.71],
[6.20, 7.68, 7.58, 8.00, 3.66, 2.40, 2.12],
[6.34, 7.58, 7.33, 7.92, 3.29, 2.20, 2.45],
[6.22, 7.46, 7.22, 8.30, 2.80, 2.31, 2.85],
[5.98, 7.59, 6.86, 8.46, 2.88, 2.16, 2.79],
[6.37, 7.19, 7.18, 7.99, 3.04, 2.16, 2.91],
[6.56, 7.40, 7.54, 8.19, 3.45, 2.20, 3.26],
[6.26, 7.48, 7.24, 8.61, 3.88, 1.73, 3.14],
[6.69, 7.93, 6.85, 8.66, 3.58, 1.93, 3.53],
[7.13, 8.23, 6.60, 8.91, 3.60, 2.25, 3.65],
[6.83, 8.35, 6.65, 9.08, 3.97, 2.69, 3.69],
[7.31, 8.44, 6.74, 9.34, 4.05, 2.59, 3.50],
[7.43, 8.35, 7.19, 8.96, 4.40, 2.14, 3.25],
[7.54, 8.58, 7.14, 8.98, 4.06, 1.68, 3.64],
[7.18, 8.82, 6.88, 8.50, 3.60, 1.98, 4.00],
[7.21, 9.09, 7.14, 8.65, 3.61, 2.14, 3.63],
[7.45, 9.02, 7.30, 8.94, 4.10, 1.89, 3.78],
[7.37, 8.87, 6.95, 8.63, 3.74, 1.97, 3.42],
[6.88, 9.22, 7.02, 8.65, 4.02, 1.99, 3.76],
[7.08, 9.04, 7.38, 8.40, 3.95, 2.37, 3.62],
[6.75, 8.60, 7.50, 8.38, 3.81, 2.14, 3.67],
[6.60, 8.48, 7.60, 8.23, 3.71, 2.35, 3.61],
[6.21, 8.71, 7.15, 8.04, 3.94, 1.86, 3.39],
[6.36, 8.79, 7.30, 7.91, 4.43, 2.14, 3.43],
[6.82, 8.93, 7.80, 7.57, 4.07, 2.39, 3.33],
[6.45, 9.36, 8.15, 7.73, 4.04, 2.53, 3.03],
[6.85, 9.68, 8.40, 7.74, 4.34, 2.47, 3.28],
[6.48, 10.16, 8.87, 8.07, 4.80, 2.93, 3.46],
[6.10, 10.56, 8.53, 7.99, 5.18, 3.09, 3.25],
[5.64, 10.63, 8.94, 7.92, 4.90, 2.93, 2.95],
[6.01, 10.55, 8.52, 8.40, 5.40, 3.22, 2.87],
[6.21, 10.65, 8.80, 8.80, 5.73, 3.06, 2.63],
[6.61, 10.55, 8.92, 8.47, 5.62, 2.90, 2.40],
[7.02, 10.19, 9.20, 8.07, 5.20, 2.68, 2.53],
[7.04, 10.48, 8.71, 7.87, 4.85, 2.46, 2.96],
[6.77, 10.36, 8.25, 8.02, 5.18, 2.41, 3.26],
[7.09, 10.03, 8.19, 8.39, 4.72, 2.74, 2.97],
[6.65, 10.24, 7.80, 8.69, 4.62, 3.15, 3.16],
[7.07, 10.01, 7.69, 8.81, 4.55, 3.40, 3.58],
[6.80, 10.14, 7.23, 8.99, 4.37, 3.82, 3.23],
[6.79, 10.31, 6.98, 9.10, 4.26, 4.02, 3.62],
[6.48, 9.88, 7.07, 8.90, 4.25, 3.76, 3.13],
[6.39, 10.05, 6.95, 8.87, 4.59, 4.10, 2.93]])
# 精心设计的模拟PT持股仓位目标信号:
self.pt_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.000, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.250, 0.150, 0.000, 0.300, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.370, 0.193, 0.120, 0.072, 0.072, 0.072, 0.096],
[0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111],
[0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190],
[0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272],
[0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300]])
# 精心设计的模拟PS比例交易信号,与模拟PT信号高度相似
self.ps_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.100, 0.150],
[0.200, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.100, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, -0.750, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.333, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, -0.500, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, -1.000],
[0.000, 0.000, 0.000, 0.000, 0.200, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.500, 0.000, 0.000, 0.150, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.200, 0.000, -1.000, 0.200, 0.000],
[0.500, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.200, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, -0.500, 0.200],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.150, 0.000, 0.000],
[-1.000, 0.000, 0.000, 0.250, 0.000, 0.250, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.250, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, -1.000, 0.000, -1.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.800, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.100, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, -1.000, 0.000, 0.100],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, -1.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-1.000, 0.000, 0.150, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000]])
# 精心设计的模拟VS股票交易信号,与模拟PS信号类似
self.vs_signals = np.array([[000, 000, 000, 000, 500, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 300, 300],
[400, 400, 000, 000, 000, 000, 000],
[000, 000, 250, 000, 000, 000, 000],
[000, 000, 000, 000, -400, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, -200, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, -300],
[000, 000, 000, 000, 500, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 000, 300, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 400, 000, -300, 600, 000],
[500, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[600, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, -400, 600],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 500, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 300, 000, 000],
[-500, 000, 000, 500, 000, 200, 000],
[000, 000, 000, 000, 000, 000, 000],
[500, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, -700, 000, -600, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-400, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 300, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, -600, 000, 300],
[000, 000, 000, 000, 000, 000, 000],
[000, -300, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 700, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000]])
# 精心设计的模拟多价格交易信号,模拟50个交易日对三只股票的操作
self.multi_shares = ['000010', '000030', '000039']
self.multi_dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08']
self.multi_dates = [pd.Timestamp(date_text) for date_text in self.multi_dates]
# 操作的交易价格包括开盘价、最高价和收盘价
self.multi_prices_open = np.array([[10.02, 9.88, 7.26],
[10.00, 9.88, 7.00],
[9.98, 9.89, 6.88],
[9.97, 9.75, 6.91],
[9.99, 9.74, np.nan],
[10.01, 9.80, 6.81],
[10.04, 9.62, 6.63],
[10.06, 9.65, 6.45],
[10.06, 9.58, 6.16],
[10.11, 9.67, 6.24],
[10.11, 9.81, 5.96],
[10.07, 9.80, 5.97],
[10.06, 10.00, 5.96],
[10.09, 9.95, 6.20],
[10.03, 10.10, 6.35],
[10.02, 10.06, 6.11],
[10.06, 10.14, 6.37],
[10.08, 9.90, 5.58],
[9.99, 10.20, 5.65],
[10.00, 10.29, 5.65],
[10.03, 9.86, 5.19],
[10.02, 9.48, 5.42],
[10.06, 10.01, 6.30],
[10.03, 10.24, 6.15],
[9.97, 10.26, 6.05],
[9.94, 10.24, 5.89],
[9.83, 10.12, 5.22],
[9.78, 10.65, 5.20],
[9.77, 10.64, 5.07],
[9.91, 10.56, 6.04],
[9.92, 10.42, 6.12],
[9.97, 10.43, 5.85],
[9.91, 10.29, 5.67],
[9.90, 10.30, 6.02],
[9.88, 10.44, 6.04],
[9.91, 10.60, 7.07],
[9.63, 10.67, 7.64],
[9.64, 10.46, 7.99],
[9.57, 10.39, 7.59],
[9.55, 10.90, 8.73],
[9.58, 11.01, 8.72],
[9.61, 11.01, 8.97],
[9.62, np.nan, 8.58],
[9.55, np.nan, 8.71],
[9.57, 10.82, 8.77],
[9.61, 11.02, 8.40],
[9.63, 10.96, 7.95],
[9.64, 11.55, 7.76],
[9.61, 11.74, 8.25],
[9.56, 11.80, 7.51]])
self.multi_prices_high = np.array([[10.07, 9.91, 7.41],
[10.00, 10.04, 7.31],
[10.00, 9.93, 7.14],
[10.00, 10.04, 7.00],
[10.03, 9.84, np.nan],
[10.03, 9.88, 6.82],
[10.04, 9.99, 6.96],
[10.09, 9.70, 6.85],
[10.10, 9.67, 6.50],
[10.14, 9.71, 6.34],
[10.11, 9.85, 6.04],
[10.10, 9.90, 6.02],
[10.09, 10.00, 6.12],
[10.09, 10.20, 6.38],
[10.10, 10.11, 6.43],
[10.05, 10.18, 6.46],
[10.07, 10.21, 6.43],
[10.09, 10.26, 6.27],
[10.10, 10.38, 5.77],
[10.00, 10.47, 6.01],
[10.04, 10.42, 5.67],
[10.04, 10.07, 5.67],
[10.06, 10.24, 6.35],
[10.09, 10.27, 6.32],
[10.05, 10.38, 6.43],
[9.97, 10.43, 6.36],
[9.96, 10.39, 5.79],
[9.86, 10.65, 5.47],
[9.77, 10.84, 5.65],
[9.92, 10.65, 6.04],
[9.94, 10.73, 6.14],
[9.97, 10.63, 6.23],
[9.97, 10.51, 5.83],
[9.92, 10.35, 6.25],
[9.92, 10.46, 6.27],
[9.92, 10.63, 7.12],
[9.93, 10.74, 7.82],
[9.64, 10.76, 8.14],
[9.58, 10.54, 8.27],
[9.60, 11.02, 8.92],
[9.58, 11.12, 8.76],
[9.62, 11.17, 9.15],
[9.62, np.nan, 8.90],
[9.64, np.nan, 9.01],
[9.59, 10.92, 9.16],
[9.62, 11.15, 9.00],
[9.63, 11.11, 8.27],
[9.70, 11.55, 7.99],
[9.66, 11.95, 8.33],
[9.64, 11.93, 8.25]])
self.multi_prices_close = np.array([[10.04, 9.68, 6.64],
[10.00, 9.87, 7.26],
[10.00, 9.86, 7.03],
[9.99, 9.87, 6.87],
[9.97, 9.79, np.nan],
[9.99, 9.82, 6.64],
[10.03, 9.80, 6.85],
[10.03, 9.66, 6.70],
[10.06, 9.62, 6.39],
[10.06, 9.58, 6.22],
[10.11, 9.69, 5.92],
[10.09, 9.78, 5.91],
[10.07, 9.75, 6.11],
[10.06, 9.96, 5.91],
[10.09, 9.90, 6.23],
[10.03, 10.04, 6.28],
[10.03, 10.06, 6.28],
[10.06, 10.08, 6.27],
[10.08, 10.24, 5.70],
[10.00, 10.24, 5.56],
[9.99, 10.24, 5.67],
[10.03, 9.86, 5.16],
[10.03, 10.13, 5.69],
[10.06, 10.12, 6.32],
[10.03, 10.10, 6.14],
[9.97, 10.25, 6.25],
[9.94, 10.24, 5.79],
[9.83, 10.22, 5.26],
[9.77, 10.75, 5.05],
[9.84, 10.64, 5.45],
[9.91, 10.56, 6.06],
[9.93, 10.60, 6.21],
[9.96, 10.42, 5.69],
[9.91, 10.25, 5.46],
[9.91, 10.24, 6.02],
[9.88, 10.49, 6.69],
[9.91, 10.57, 7.43],
[9.64, 10.63, 7.72],
[9.56, 10.48, 8.16],
[9.57, 10.37, 7.83],
[9.55, 10.96, 8.70],
[9.57, 11.02, 8.71],
[9.61, np.nan, 8.88],
[9.61, np.nan, 8.54],
[9.55, 10.88, 8.87],
[9.57, 10.87, 8.87],
[9.63, 11.01, 8.18],
[9.64, 11.01, 7.80],
[9.65, 11.58, 7.97],
[9.62, 11.80, 8.25]])
# 交易信号包括三组,分别作用与开盘价、最高价和收盘价
# 此时的关键是股票交割期的处理,交割期不为0时,以交易日为单位交割
self.multi_signals = []
# multisignal的第一组信号为开盘价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.000, 0.000],
[0.000, -0.500, 0.000],
[0.000, -0.500, 0.000],
[0.000, 0.000, 0.000],
[0.150, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.300, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.300],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.350, 0.250],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.100, 0.000, 0.350],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.050, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第二组信号为最高价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.150, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, -0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.200],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第三组信号为收盘价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-0.500, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, -0.800],
[0.000, 0.000, 0.000],
[0.000, -1.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-0.750, 0.000, 0.000],
[0.000, 0.000, -0.850],
[0.000, 0.000, 0.000],
[0.000, -0.700, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, -1.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-1.000, 0.000, 0.000],
[0.000, -1.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 交易回测所需的价格也有三组,分别是开盘价、最高价和收盘价
self.multi_histories = []
# multisignal的第一组信号为开盘价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_open,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第二组信号为最高价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_high,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第三组信号为收盘价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_close,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 设置回测参数
self.cash = qt.CashPlan(['2016/07/01', '2016/08/12', '2016/09/23'], [10000, 10000, 10000])
self.rate = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=0,
sell_min=0,
slipage=0)
self.rate2 = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=10,
sell_min=5,
slipage=0)
self.pt_signal_hp = dataframe_to_hp(
pd.DataFrame(self.pt_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.ps_signal_hp = dataframe_to_hp(
pd.DataFrame(self.ps_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.vs_signal_hp = dataframe_to_hp(
pd.DataFrame(self.vs_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.multi_signal_hp = stack_dataframes(
self.multi_signals,
stack_along='htypes',
htypes='open, high, close'
)
self.history_list = dataframe_to_hp(
pd.DataFrame(self.prices, index=self.dates, columns=self.shares),
htypes='close'
)
self.multi_history_list = stack_dataframes(
self.multi_histories,
stack_along='htypes',
htypes='open, high, close'
)
# 模拟PT信号回测结果
# PT信号,先卖后买,交割期为0
self.pt_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614],
[101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21979.4972],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21880.9628],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21630.0454],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20968.0007],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21729.9339],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21107.6400],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21561.1745],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21553.0916],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22316.9366],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22084.2862],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21777.3543],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22756.8225],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22843.4697],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22762.1766],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22257.0973],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23136.5259],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 21813.7852],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22395.3204],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23717.6858],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22715.4263],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 22498.3254],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23341.1733],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24162.3941],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24847.1508],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23515.9755],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24555.8997],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24390.6372],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24073.3309],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24394.6500],
[2076.3314, 903.0334, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 3487.5655, 0.0000, 34904.8150],
[0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 34198.4475],
[0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 33753.0190],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 34953.8178],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 33230.2498],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35026.7819],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36976.2649],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38673.8147],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38717.3429],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36659.0854],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35877.9607],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36874.4840],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37010.2695],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 38062.3510],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36471.1357],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37534.9927],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37520.2569],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36747.7952],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36387.9409],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 35925.9715],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36950.7028],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37383.2463],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37761.2724],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 39548.2653],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41435.1291],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41651.6261],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41131.9920],
[644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 41286.4702],
[644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 40978.7259],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 40334.5453],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41387.9172],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42492.6707],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42953.7188],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42005.1092],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42017.9106],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 43750.2824],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41766.8679],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 42959.1150],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 41337.9320],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 40290.3688]])
# PT信号,先买后卖,交割期为0
self.pt_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614],
[101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270],
[797.1684, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 2703.5808, 0.0000, 21979.4972],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21700.7241],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21446.6630],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20795.3593],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21557.2924],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 20933.6887],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21392.5581],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21390.2918],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22147.7562],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21910.9053],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21594.2980],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22575.4380],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22655.8312],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22578.4365],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22073.2661],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22955.2367],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 21628.1647],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22203.4237],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 23516.2598],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22505.8428],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22199.1042],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23027.9302],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23848.5806],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24540.8871],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23205.6838],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24267.6685],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24115.3796],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23814.3667],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24133.6611],
[2061.6837, 896.6628, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 3285.8830, 0.0000, 34658.5742],
[0.0000, 896.6628, 507.6643, 466.6033, 0.0000, 1523.7106, 1467.7407, 12328.8684, 0.0000, 33950.7917],
[0.0000, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 4380.3797, 0.0000, 33711.4045],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 34922.0959],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 33237.1081],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35031.8071],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36976.3376],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38658.5245],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38712.2854],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36655.3125],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35904.3692],
[644.1423, 902.2617, 514.8253, 0.0000, 15.5990, 0.0000, 1467.7407, 14821.9004, 0.0000, 36873.9080],
[644.1423, 902.2617, 514.8253, 0.0000, 1220.8683, 0.0000, 1467.7407, 10470.8781, 0.0000, 36727.7895],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37719.9840],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36138.1277],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37204.0760],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37173.1201],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36398.2298],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36034.2178],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 35583.6399],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36599.2645],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37013.3408],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37367.7449],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 39143.8273],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41007.3074],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41225.4657],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 40685.9525],
[644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 1467.7407, 6592.6891, 0.0000, 40851.5435],
[644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 0.0000, 0.0000, 41082.1210],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 40385.0135],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 41455.1513],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42670.6769],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 43213.7233],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42205.2480],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42273.9386],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 44100.0777],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42059.7208],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 43344.9653],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 41621.0324],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 40528.0648]])
# PT信号,先卖后买,交割期为2天(股票)0天(现金)以便利用先卖的现金继续买入
self.pt_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661],
[101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027],
[797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21584.441],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21309.576],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20664.323],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21445.597],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 20806.458],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21288.441],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21294.365],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 22058.784],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21805.540],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21456.333],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22459.720],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22611.602],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22470.912],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21932.634],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22425.864],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21460.103],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22376.968],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23604.295],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22704.826],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22286.293],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23204.755],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24089.017],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24768.185],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23265.196],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24350.540],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24112.706],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23709.076],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24093.545],
[2060.275, 896.050, 504.579, 288.667, 0.000, 763.410, 1577.904, 2835.944, 0.000, 34634.888],
[578.327, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 732.036, 0.000, 33912.261],
[0.000, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 4415.981, 0.000, 33711.951],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 34951.433],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 33224.596],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35065.209],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 37018.699],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38706.035],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38724.569],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 36647.268],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35928.930],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36967.229],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37056.598],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 38129.862],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36489.333],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37599.602],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37566.823],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36799.280],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36431.196],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 35940.942],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36973.050],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37393.292],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37711.276],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 39515.991],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41404.440],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41573.523],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41011.613],
[644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 41160.181],
[644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 40815.512],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 40145.531],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41217.281],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42379.061],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42879.589],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41891.452],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41929.003],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 43718.052],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41685.916],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 42930.410],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 41242.589],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 40168.084]])
# PT信号,先买后卖,交割期为2天(股票)1天(现金)
self.pt_res_bs21 = np.array([
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661],
[101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027],
[797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497],
[797.168, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 2475.037, 0.000, 21584.441],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21266.406],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20623.683],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21404.957],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 20765.509],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21248.748],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21256.041],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 22018.958],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21764.725],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21413.241],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22417.021],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22567.685],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22427.699],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21889.359],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22381.938],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21416.358],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22332.786],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 0.000, 2386.698, 0.000, 23557.595],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 23336.992],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 22907.742],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24059.201],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24941.902],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25817.514],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24127.939],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25459.688],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25147.370],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25005.842],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 1086.639, 2752.004, 0.000, 25598.700],
[2138.154, 929.921, 503.586, 288.667, 0.000, 761.900, 1086.639, 4818.835, 0.000, 35944.098],
[661.356, 929.921, 503.586, 553.843, 0.000, 1954.237, 1086.639, 8831.252, 0.000, 35237.243],
[0.000, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 9460.955, 0.000, 35154.442],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36166.632],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 34293.883],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 35976.901],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37848.552],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39512.574],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39538.024],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37652.984],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36687.909],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37749.277],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37865.518],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38481.190],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37425.087],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38051.341],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38065.478],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37429.495],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37154.479],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 36692.717],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37327.055],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37937.630],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 38298.645],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 39689.369],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40992.397],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 41092.265],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40733.622],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40708.515],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40485.321],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 39768.059],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 40519.595],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 41590.937],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42354.983],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41175.149],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41037.902],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42706.213],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 40539.205],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 41608.692],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39992.148],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39134.828]])
# 模拟PS信号回测结果
# PS信号,先卖后买,交割期为0
self.ps_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111],
[346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118],
[346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621],
[115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21389.4018],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22027.4535],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 20939.9992],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21250.0636],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22282.7812],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21407.0658],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21160.2373],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21826.7682],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22744.9403],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23466.1185],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22017.8821],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23191.4662],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23099.0822],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22684.7671],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22842.1346],
[1073.8232, 416.6787, 735.6442, 269.8496, 1785.2055, 938.6967, 1339.2073, 5001.4246, 0.0000,
33323.8359],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32820.2901],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32891.2308],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34776.5296],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 33909.0325],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34560.1906],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 36080.4552],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38618.4454],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38497.9230],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 37110.0991],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 35455.2467],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35646.1860],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35472.3020],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36636.4694],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35191.7035],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36344.2242],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36221.6005],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35943.5708],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35708.2608],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35589.0286],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36661.0285],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36310.5909],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36466.7637],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 37784.4918],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39587.6766],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 40064.0191],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39521.6439],
[0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39932.2761],
[0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39565.2475],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 38943.1632],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39504.1184],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40317.8004],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40798.5768],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39962.5711],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40194.4793],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 41260.4003],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39966.3024],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 40847.3160],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 39654.5445],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 38914.8151]])
# PS信号,先买后卖,交割期为0
self.ps_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111],
[346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118],
[346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621],
[115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21389.4018],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21625.6913],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 20873.0389],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21450.9447],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 22269.3892],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21969.5329],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21752.6924],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22000.6088],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23072.5655],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23487.5201],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22441.0460],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23201.2700],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23400.9485],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22306.2008],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21989.5913],
[1073.8232, 737.0632, 735.6442, 269.8496, 1708.7766, 938.6967, 0.0000, 5215.4255, 0.0000, 31897.1636],
[0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31509.5059],
[0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31451.7888],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32773.4592],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32287.0318],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32698.1938],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34031.5183],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 35537.8336],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36212.6487],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36007.5294],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34691.3797],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33904.8810],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34341.6098],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 35479.9505],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34418.4455],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34726.7182],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34935.0407],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34136.7505],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33804.1575],
[195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 33653.8970],
[195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 34689.8757],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 34635.7841],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 35253.2755],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 36388.1051],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 37987.4204],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38762.2103],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38574.0544],
[195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39101.9156],
[195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39132.5587],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38873.2941],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39336.6594],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39565.9568],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39583.4317],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39206.8350],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39092.6551],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39666.1834],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38798.0749],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 39143.5561],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38617.8779],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38156.1701]])
# PS信号,先卖后买,交割期为2天(股票)1天(现金)
self.ps_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 205.065, 321.089, 5059.722, 0.000, 9761.111],
[346.982, 416.679, 0.000, 0.000, 555.556, 205.065, 321.089, 1201.278, 0.000, 9646.112],
[346.982, 416.679, 191.037, 0.000, 555.556, 205.065, 321.089, 232.719, 0.000, 9685.586],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9813.218],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9803.129],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9608.020],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9311.573],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8883.625],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8751.390],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8794.181],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9136.570],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9209.359],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9093.829],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9387.554],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9585.959],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 9928.777],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10060.381],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10281.002],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10095.561],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 0.000, 4506.393, 0.000, 10029.957],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9875.613],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9614.946],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9824.172],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9732.574],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9968.339],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 10056.158],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9921.492],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9894.162],
[115.719, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 6179.774, 0.000, 20067.937],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21133.508],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20988.848],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20596.743],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 19910.773],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20776.707],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20051.797],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20725.388],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20828.880],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21647.181],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21310.169],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20852.099],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21912.395],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21937.828],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21962.458],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21389.402],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22027.453],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 20939.999],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21250.064],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22282.781],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21407.066],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21160.237],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21826.768],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22744.940],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23466.118],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22017.882],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23191.466],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23099.082],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22684.767],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22842.135],
[1073.823, 416.679, 735.644, 269.850, 1785.205, 938.697, 1339.207, 5001.425, 0.000, 33323.836],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32820.290],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32891.231],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34776.530],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 33909.032],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34560.191],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 36080.455],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38618.445],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38497.923],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 37110.099],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 35455.247],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35646.186],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35472.302],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36636.469],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35191.704],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36344.224],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36221.601],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35943.571],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35708.261],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35589.029],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36661.029],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36310.591],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36466.764],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 37784.492],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39587.677],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 40064.019],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39521.644],
[0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39932.276],
[0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39565.248],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 38943.163],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39504.118],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40317.800],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40798.577],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39962.571],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40194.479],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 41260.400],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39966.302],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 40847.316],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 39654.544],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 38914.815]])
# PS信号,先买后卖,交割期为2天(股票)1天(现金)
self.ps_res_bs21 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 208.333, 326.206, 5020.833, 0.000, 9761.111],
[351.119, 421.646, 0.000, 0.000, 555.556, 208.333, 326.206, 1116.389, 0.000, 9645.961],
[351.119, 421.646, 190.256, 0.000, 555.556, 208.333, 326.206, 151.793, 0.000, 9686.841],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9813.932],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9803.000],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9605.334],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9304.001],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8870.741],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8738.282],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8780.664],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9126.199],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9199.746],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9083.518],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9380.932],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9581.266],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 9927.154],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10059.283],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10281.669],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10093.263],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 0.000, 4453.525, 0.000, 10026.289],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9870.523],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9606.437],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9818.691],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9726.556],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9964.547],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 10053.449],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9917.440],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9889.495],
[117.098, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 6189.948, 0.000, 20064.523],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21124.484],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20827.077],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20396.124],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19856.445],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20714.156],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19971.485],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20733.948],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20938.903],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21660.772],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21265.298],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20684.378],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21754.770],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21775.215],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21801.488],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21235.427],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21466.714],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 20717.431],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21294.450],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 22100.247],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21802.552],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21593.608],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21840.028],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22907.725],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23325.945],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22291.942],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23053.050],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23260.084],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22176.244],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21859.297],
[1055.763, 740.051, 729.561, 272.237, 1706.748, 932.896, 0.000, 5221.105, 0.000, 31769.617],
[0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31389.961],
[0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31327.498],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32647.140],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32170.095],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32577.742],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 33905.444],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35414.492],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 36082.120],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35872.293],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 34558.132],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33778.138],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34213.578],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 35345.791],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34288.014],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34604.406],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34806.850],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34012.232],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33681.345],
[192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 33540.463],
[192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 34574.280],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 34516.781],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 35134.412],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 36266.530],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 37864.376],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38642.633],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38454.227],
[192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 38982.227],
[192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 39016.154],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38759.803],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39217.182],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39439.690],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39454.081],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39083.341],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38968.694],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39532.030],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38675.507],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 39013.741],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38497.668],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38042.410]])
# 模拟VS信号回测结果
# VS信号,先卖后买,交割期为0
self.vs_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 9925.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954.0000, 0.0000, 9785.0000],
[400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878.0000, 0.0000, 9666.0000],
[400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0.0000, 0.0000, 9731.0000],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9830.9270],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9785.8540],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9614.3412],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9303.1953],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8834.4398],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8712.7554],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8717.9507],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9079.1479],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9166.0276],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9023.6607],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9291.6864],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9411.6371],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357],
[0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20425.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20137.8405],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20711.3567],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21470.3891],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21902.9538],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20962.9538],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21833.5184],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21941.8169],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21278.5184],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21224.4700],
[1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160.0000, 0.0000, 31225.2119],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30894.5748],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30764.3811],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31815.5828],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31615.4215],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 32486.1394],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 33591.2847],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34056.5428],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34756.4863],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34445.5428],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34433.9541],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33870.4703],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34014.3010],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34680.5671],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33890.9945],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34004.6640],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34127.7768],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33421.1638],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33120.9057],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 32613.3171],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 33168.1558],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
33504.6236],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
33652.1318],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
34680.4867],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35557.5191],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35669.7128],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35211.4466],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35550.6079],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35711.6563],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35682.6079],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35880.8336],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36249.8740],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36071.6159],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35846.1562],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35773.3578],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36274.9465],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35739.3094],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 36135.0917],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35286.5835],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35081.3658]])
# VS信号,先买后卖,交割期为0
self.vs_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 10000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 9925],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954, 0.0000, 9785],
[400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878, 0.0000, 9666],
[400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0, 0.0000, 9731],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9830.927022],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9785.854043],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9614.341223],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9303.195266],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8834.439842],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8712.755424],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8717.95069],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9079.147929],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9166.027613],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9023.66075],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9291.686391],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9411.637081],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357],
[0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20425.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20137.84054],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20711.35674],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21470.38914],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21902.95375],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20962.95375],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21833.51837],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21941.81688],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21278.51837],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21224.46995],
[1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160, 0.0000, 31225.21185],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30894.57479],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30764.38113],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31815.5828],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31615.42154],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 32486.13941],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 33591.28466],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34056.54276],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34756.48633],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34445.54276],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34433.95412],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33870.47032],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34014.30104],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34680.56715],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33890.99452],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34004.66398],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34127.77683],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33421.1638],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33120.9057],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 32613.31706],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 33168.15579],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33504.62357],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33652.13176],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 34680.4867],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35557.51909],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35669.71276],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35211.44665],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35550.60792],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35711.65633],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35682.60792],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35880.83362],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36249.87403],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36071.61593],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35846.15615],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35773.35783],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36274.94647],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35739.30941],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 36135.09172],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35286.58353],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35081.36584]])
# VS信号,先卖后买,交割期为2天(股票)1天(现金)
self.vs_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000],
[400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000],
[400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736],
[0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470],
[1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]])
# VS信号,先买后卖,交割期为2天(股票)1天(现金)
self.vs_res_bs21 = np.array(
[[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000],
[400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000],
[400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736],
[0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470],
[1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]])
# Multi信号处理结果,先卖后买,使用卖出的现金买进,交割期为2天(股票)0天(现金)
self.multi_res = np.array(
[[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 9965.1867],
[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 10033.0650],
[0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10034.8513],
[0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10036.6376],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10019.3404],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10027.7062],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10030.1477],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10005.1399],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10002.5054],
[150.3516, 489.4532, 0.0000, 3765.8877, 0.0000, 9967.3860],
[75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10044.4059],
[75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10078.1430],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10138.2709],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10050.4768],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10300.0711],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10392.6970],
[75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10400.5282],
[75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10408.9220],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10376.5914],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10346.8794],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10364.7474],
[75.1758, 381.1856, 645.5014, 2459.1665, 0.0000, 10302.4570],
[18.7939, 381.1856, 645.5014, 3024.6764, 0.0000, 10747.4929],
[18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11150.9107],
[18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11125.2946],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11191.9956],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11145.7486],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11090.0768],
[132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11113.8733],
[132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11456.3281],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21983.7333],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22120.6165],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21654.5327],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21429.6550],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21912.5643],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22516.3100],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23169.0777],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23390.8080],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23743.3742],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 23210.7311],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24290.4375],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24335.3279],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18317.3553],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18023.4660],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24390.0527],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24389.6421],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24483.5953],
[0.0000, 559.9112, 0.0000, 18321.5674, 0.0000, 24486.1895],
[0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389],
[0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389]])
def test_loop_step_pt_sb00(self):
""" test loop step PT-signal, sell first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.pt_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[2][7],
own_amounts=self.pt_res_sb00[2][0:7],
available_cash=self.pt_res_sb00[2][7],
available_amounts=self.pt_res_sb00[2][0:7],
op=self.pt_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[2][7] + c_g + c_s
amounts = self.pt_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[30][7],
own_amounts=self.pt_res_sb00[30][0:7],
available_cash=self.pt_res_sb00[30][7],
available_amounts=self.pt_res_sb00[30][0:7],
op=self.pt_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[30][7] + c_g + c_s
amounts = self.pt_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[59][7] + 10000,
own_amounts=self.pt_res_sb00[59][0:7],
available_cash=self.pt_res_sb00[59][7] + 10000,
available_amounts=self.pt_res_sb00[59][0:7],
op=self.pt_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.pt_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[95][7],
own_amounts=self.pt_res_sb00[95][0:7],
available_cash=self.pt_res_sb00[95][7],
available_amounts=self.pt_res_sb00[95][0:7],
op=self.pt_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[96][7] + c_g + c_s
amounts = self.pt_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[97][0:7]))
def test_loop_step_pt_bs00(self):
""" test loop step PT-signal, buy first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.pt_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[2][7],
own_amounts=self.pt_res_bs00[2][0:7],
available_cash=self.pt_res_bs00[2][7],
available_amounts=self.pt_res_bs00[2][0:7],
op=self.pt_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[2][7] + c_g + c_s
amounts = self.pt_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[30][7],
own_amounts=self.pt_res_bs00[30][0:7],
available_cash=self.pt_res_bs00[30][7],
available_amounts=self.pt_res_bs00[30][0:7],
op=self.pt_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[30][7] + c_g + c_s
amounts = self.pt_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[59][7] + 10000,
own_amounts=self.pt_res_bs00[59][0:7],
available_cash=self.pt_res_bs00[59][7] + 10000,
available_amounts=self.pt_res_bs00[59][0:7],
op=self.pt_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.pt_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[95][7],
own_amounts=self.pt_res_bs00[95][0:7],
available_cash=self.pt_res_bs00[95][7],
available_amounts=self.pt_res_bs00[95][0:7],
op=self.pt_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[96][7] + c_g + c_s
amounts = self.pt_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[97][0:7]))
def test_loop_step_ps_sb00(self):
""" test loop step PS-signal, sell first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.ps_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[2][7],
own_amounts=self.ps_res_sb00[2][0:7],
available_cash=self.ps_res_sb00[2][7],
available_amounts=self.ps_res_sb00[2][0:7],
op=self.ps_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[2][7] + c_g + c_s
amounts = self.ps_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[30][7],
own_amounts=self.ps_res_sb00[30][0:7],
available_cash=self.ps_res_sb00[30][7],
available_amounts=self.ps_res_sb00[30][0:7],
op=self.ps_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[30][7] + c_g + c_s
amounts = self.ps_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[59][7] + 10000,
own_amounts=self.ps_res_sb00[59][0:7],
available_cash=self.ps_res_sb00[59][7] + 10000,
available_amounts=self.ps_res_sb00[59][0:7],
op=self.ps_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.ps_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[95][7],
own_amounts=self.ps_res_sb00[95][0:7],
available_cash=self.ps_res_sb00[95][7],
available_amounts=self.ps_res_sb00[95][0:7],
op=self.ps_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[96][7] + c_g + c_s
amounts = self.ps_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[97][0:7]))
def test_loop_step_ps_bs00(self):
""" test loop step PS-signal, buy first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.ps_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[2][7],
own_amounts=self.ps_res_sb00[2][0:7],
available_cash=self.ps_res_bs00[2][7],
available_amounts=self.ps_res_bs00[2][0:7],
op=self.ps_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[2][7] + c_g + c_s
amounts = self.ps_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[30][7],
own_amounts=self.ps_res_sb00[30][0:7],
available_cash=self.ps_res_bs00[30][7],
available_amounts=self.ps_res_bs00[30][0:7],
op=self.ps_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[30][7] + c_g + c_s
amounts = self.ps_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[59][7] + 10000,
own_amounts=self.ps_res_bs00[59][0:7],
available_cash=self.ps_res_bs00[59][7] + 10000,
available_amounts=self.ps_res_bs00[59][0:7],
op=self.ps_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.ps_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[95][7],
own_amounts=self.ps_res_bs00[95][0:7],
available_cash=self.ps_res_bs00[95][7],
available_amounts=self.ps_res_bs00[95][0:7],
op=self.ps_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[96][7] + c_g + c_s
amounts = self.ps_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[97][0:7]))
def test_loop_step_vs_sb00(self):
"""test loop step of Volume Signal type of signals"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.vs_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7750)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 500., 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[2][7],
own_amounts=self.vs_res_sb00[2][0:7],
available_cash=self.vs_res_sb00[2][7],
available_amounts=self.vs_res_sb00[2][0:7],
op=self.vs_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[2][7] + c_g + c_s
amounts = self.vs_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[30][7],
own_amounts=self.vs_res_sb00[30][0:7],
available_cash=self.vs_res_sb00[30][7],
available_amounts=self.vs_res_sb00[30][0:7],
op=self.vs_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[30][7] + c_g + c_s
amounts = self.vs_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[59][7] + 10000,
own_amounts=self.vs_res_sb00[59][0:7],
available_cash=self.vs_res_sb00[59][7] + 10000,
available_amounts=self.vs_res_sb00[59][0:7],
op=self.vs_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.vs_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[95][7],
own_amounts=self.vs_res_sb00[95][0:7],
available_cash=self.vs_res_sb00[95][7],
available_amounts=self.vs_res_sb00[95][0:7],
op=self.vs_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[96][7] + c_g + c_s
amounts = self.vs_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[97][0:7]))
def test_loop_step_vs_bs00(self):
"""test loop step of Volume Signal type of signals"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.vs_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7750)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 500., 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[2][7],
own_amounts=self.vs_res_bs00[2][0:7],
available_cash=self.vs_res_bs00[2][7],
available_amounts=self.vs_res_bs00[2][0:7],
op=self.vs_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[2][7] + c_g + c_s
amounts = self.vs_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[30][7],
own_amounts=self.vs_res_bs00[30][0:7],
available_cash=self.vs_res_bs00[30][7],
available_amounts=self.vs_res_bs00[30][0:7],
op=self.vs_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[30][7] + c_g + c_s
amounts = self.vs_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[59][7] + 10000,
own_amounts=self.vs_res_bs00[59][0:7],
available_cash=self.vs_res_bs00[59][7] + 10000,
available_amounts=self.vs_res_bs00[59][0:7],
op=self.vs_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.vs_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[95][7],
own_amounts=self.vs_res_bs00[95][0:7],
available_cash=self.vs_res_bs00[95][7],
available_amounts=self.vs_res_bs00[95][0:7],
op=self.vs_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[96][7] + c_g + c_s
amounts = self.vs_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[97][0:7]))
def test_loop_pt(self):
""" Test looping of PT proportion target signals, with
stock delivery delay = 0 days
cash delivery delay = 0 day
buy-sell sequence = sell first
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 0 days \n'
'cash delivery delay = 0 day \n'
'buy-sell sequence = sell first')
res = apply_loop(op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
# print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.pt_res_bs00, 2))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=0,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
# print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_pt_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.pt_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.pt_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_pt_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.pt_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.pt_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps(self):
""" Test looping of PS Proportion Signal type of signals
"""
res = apply_loop(op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.ps_res_bs00, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.ps_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.ps_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.ps_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.ps_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs(self):
""" Test looping of VS Volume Signal type of signals
"""
res = apply_loop(op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.vs_res_bs00, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.vs_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.vs_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.vs_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.vs_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_multiple_signal(self):
""" Test looping of PS Proportion Signal type of signals
"""
res = apply_loop(op_type=1,
op_list=self.multi_signal_hp,
history_list=self.multi_history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
max_cash_usage=True,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.multi_res[i]))
print()
self.assertTrue(np.allclose(res, self.multi_res, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=1,
op_list=self.multi_signal_hp,
history_list=self.multi_history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=0,
stock_delivery_period=2,
max_cash_usage=False,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
class TestStrategy(unittest.TestCase):
""" test all properties and methods of strategy base class"""
def setUp(self) -> None:
pass
class TestLSStrategy(RollingTiming):
"""用于test测试的简单多空蒙板生成策略。基于RollingTiming滚动择时方法生成
该策略有两个参数,N与Price
N用于计算OHLC价格平均值的N日简单移动平均,判断,当移动平均值大于等于Price时,状态为看多,否则为看空
"""
def __init__(self):
super().__init__(stg_name='test_LS',
stg_text='test long/short strategy',
par_count=2,
par_types='discr, conti',
par_bounds_or_enums=([1, 5], [2, 10]),
data_types='close, open, high, low',
data_freq='d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
n, price = params
h = hist_data.T
avg = (h[0] + h[1] + h[2] + h[3]) / 4
ma = sma(avg, n)
if ma[-1] < price:
return 0
else:
return 1
class TestSelStrategy(SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='high, low, close',
data_freq='d',
sample_freq='10d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = np.nanmean(hist_data, axis=(1, 2))
dif = (hist_data[:, :, 2] - np.roll(hist_data[:, :, 2], 1, 1))
dif_no_nan = np.array([arr[~np.isnan(arr)][-1] for arr in dif])
difper = dif_no_nan / avg
large2 = difper.argsort()[1:]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSelStrategyDiffTime(SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
# TODO: This strategy is not working, find out why and improve
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='close, low, open',
data_freq='d',
sample_freq='w',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = hist_data.mean(axis=1).squeeze()
difper = (hist_data[:, :, 0] - np.roll(hist_data[:, :, 0], 1))[:, -1] / avg
large2 = difper.argsort()[0:2]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSigStrategy(SimpleTiming):
"""用于Test测试的简单信号生成策略,基于SimpleTiming策略生成
策略有三个参数,第一个参数为ratio,另外两个参数为price1以及price2
ratio是k线形状比例的阈值,定义为abs((C-O)/(H-L))。当这个比值小于ratio阈值时,判断该K线为十字交叉(其实还有丁字等多种情形,但这里做了
简化处理。
信号生成的规则如下:
1,当某个K线出现十字交叉,且昨收与今收之差大于price1时,买入信号
2,当某个K线出现十字交叉,且昨收与今收之差小于price2时,卖出信号
"""
def __init__(self):
super().__init__(stg_name='test_SIG',
stg_text='test signal creation strategy',
par_count=3,
par_types='conti, conti, conti',
par_bounds_or_enums=([2, 10], [0, 3], [0, 3]),
data_types='close, open, high, low',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
r, price1, price2 = params
h = hist_data.T
ratio = np.abs((h[0] - h[1]) / (h[3] - h[2]))
diff = h[0] - np.roll(h[0], 1)
sig = np.where((ratio < r) & (diff > price1),
1,
np.where((ratio < r) & (diff < price2), -1, 0))
return sig
class MyStg(qt.RollingTiming):
"""自定义双均线择时策略策略"""
def __init__(self):
"""这个均线择时策略只有三个参数:
- SMA 慢速均线,所选择的股票
- FMA 快速均线
- M 边界值
策略的其他说明
"""
"""
必须初始化的关键策略参数清单:
"""
super().__init__(
pars=(20, 100, 0.01),
par_count=3,
par_types=['discr', 'discr', 'conti'],
par_bounds_or_enums=[(10, 250), (10, 250), (0.0, 0.5)],
stg_name='CUSTOM ROLLING TIMING STRATEGY',
stg_text='Customized Rolling Timing Strategy for Testing',
data_types='close',
window_length=100,
)
print(f'=====================\n====================\n'
f'custom strategy initialized, \npars: {self.pars}\npar_count:{self.par_count}\npar_types:'
f'{self.par_types}\n'
f'{self.info()}')
# 策略的具体实现代码写在策略的_realize()函数中
# 这个函数固定接受两个参数: hist_price代表特定组合的历史数据, params代表具体的策略参数
def _realize(self, hist_price, params):
"""策略的具体实现代码:
s:短均线计算日期;l:长均线计算日期;m:均线边界宽度;hesitate:均线跨越类型"""
f, s, m = params
# 临时处理措施,在策略实现层对传入的数据切片,后续应该在策略实现层以外事先对数据切片,保证传入的数据符合data_types参数即可
h = hist_price.T
# 计算长短均线的当前值
s_ma = qt.sma(h[0], s)[-1]
f_ma = qt.sma(h[0], f)[-1]
# 计算慢均线的停止边界,当快均线在停止边界范围内时,平仓,不发出买卖信号
s_ma_u = s_ma * (1 + m)
s_ma_l = s_ma * (1 - m)
# 根据观望模式在不同的点位产生Long/short/empty标记
if f_ma > s_ma_u: # 当快均线在慢均线停止范围以上时,持有多头头寸
return 1
elif s_ma_l < f_ma < s_ma_u: # 当均线在停止边界以内时,平仓
return 0
else: # f_ma < s_ma_l 当快均线在慢均线停止范围以下时,持有空头头寸
return -1
class TestOperator(unittest.TestCase):
"""全面测试Operator对象的所有功能。包括:
1, Strategy 参数的设置
2, 历史数据的获取与分配提取
3, 策略优化参数的批量设置和优化空间的获取
4, 策略输出值的正确性验证
5, 策略结果的混合结果确认
"""
def setUp(self):
"""prepare data for Operator test"""
print('start testing HistoryPanel object\n')
# build up test data: a 4-type, 3-share, 50-day matrix of prices that contains nan values in some days
# for some share_pool
# for share1:
data_rows = 50
share1_close = [10.04, 10, 10, 9.99, 9.97, 9.99, 10.03, 10.03, 10.06, 10.06, 10.11,
10.09, 10.07, 10.06, 10.09, 10.03, 10.03, 10.06, 10.08, 10, 9.99,
10.03, 10.03, 10.06, 10.03, 9.97, 9.94, 9.83, 9.77, 9.84, 9.91, 9.93,
9.96, 9.91, 9.91, 9.88, 9.91, 9.64, 9.56, 9.57, 9.55, 9.57, 9.61, 9.61,
9.55, 9.57, 9.63, 9.64, 9.65, 9.62]
share1_open = [10.02, 10, 9.98, 9.97, 9.99, 10.01, 10.04, 10.06, 10.06, 10.11,
10.11, 10.07, 10.06, 10.09, 10.03, 10.02, 10.06, 10.08, 9.99, 10,
10.03, 10.02, 10.06, 10.03, 9.97, 9.94, 9.83, 9.78, 9.77, 9.91, 9.92,
9.97, 9.91, 9.9, 9.88, 9.91, 9.63, 9.64, 9.57, 9.55, 9.58, 9.61, 9.62,
9.55, 9.57, 9.61, 9.63, 9.64, 9.61, 9.56]
share1_high = [10.07, 10, 10, 10, 10.03, 10.03, 10.04, 10.09, 10.1, 10.14, 10.11, 10.1,
10.09, 10.09, 10.1, 10.05, 10.07, 10.09, 10.1, 10, 10.04, 10.04, 10.06,
10.09, 10.05, 9.97, 9.96, 9.86, 9.77, 9.92, 9.94, 9.97, 9.97, 9.92, 9.92,
9.92, 9.93, 9.64, 9.58, 9.6, 9.58, 9.62, 9.62, 9.64, 9.59, 9.62, 9.63,
9.7, 9.66, 9.64]
share1_low = [9.99, 10, 9.97, 9.97, 9.97, 9.98, 9.99, 10.03, 10.03, 10.04, 10.11, 10.07,
10.05, 10.03, 10.03, 10.01, 9.99, 10.03, 9.95, 10, 9.95, 10, 10.01, 9.99,
9.96, 9.89, 9.83, 9.77, 9.77, 9.8, 9.9, 9.91, 9.89, 9.89, 9.87, 9.85, 9.6,
9.64, 9.53, 9.55, 9.54, 9.55, 9.58, 9.54, 9.53, 9.53, 9.63, 9.64, 9.59, 9.56]
# for share2:
share2_close = [9.68, 9.87, 9.86, 9.87, 9.79, 9.82, 9.8, 9.66, 9.62, 9.58, 9.69, 9.78, 9.75,
9.96, 9.9, 10.04, 10.06, 10.08, 10.24, 10.24, 10.24, 9.86, 10.13, 10.12,
10.1, 10.25, 10.24, 10.22, 10.75, 10.64, 10.56, 10.6, 10.42, 10.25, 10.24,
10.49, 10.57, 10.63, 10.48, 10.37, 10.96, 11.02, np.nan, np.nan, 10.88, 10.87, 11.01,
11.01, 11.58, 11.8]
share2_open = [9.88, 9.88, 9.89, 9.75, 9.74, 9.8, 9.62, 9.65, 9.58, 9.67, 9.81, 9.8, 10,
9.95, 10.1, 10.06, 10.14, 9.9, 10.2, 10.29, 9.86, 9.48, 10.01, 10.24, 10.26,
10.24, 10.12, 10.65, 10.64, 10.56, 10.42, 10.43, 10.29, 10.3, 10.44, 10.6,
10.67, 10.46, 10.39, 10.9, 11.01, 11.01, np.nan, np.nan, 10.82, 11.02, 10.96,
11.55, 11.74, 11.8]
share2_high = [9.91, 10.04, 9.93, 10.04, 9.84, 9.88, 9.99, 9.7, 9.67, 9.71, 9.85, 9.9, 10,
10.2, 10.11, 10.18, 10.21, 10.26, 10.38, 10.47, 10.42, 10.07, 10.24, 10.27,
10.38, 10.43, 10.39, 10.65, 10.84, 10.65, 10.73, 10.63, 10.51, 10.35, 10.46,
10.63, 10.74, 10.76, 10.54, 11.02, 11.12, 11.17, np.nan, np.nan, 10.92, 11.15,
11.11, 11.55, 11.95, 11.93]
share2_low = [9.63, 9.84, 9.81, 9.74, 9.67, 9.72, 9.57, 9.54, 9.51, 9.47, 9.68, 9.63, 9.75,
9.65, 9.9, 9.93, 10.03, 9.8, 10.14, 10.09, 9.78, 9.21, 9.11, 9.68, 10.05,
10.12, 9.89, 9.89, 10.59, 10.43, 10.34, 10.32, 10.21, 10.2, 10.18, 10.36,
10.51, 10.41, 10.32, 10.37, 10.87, 10.95, np.nan, np.nan, 10.65, 10.71, 10.75,
10.91, 11.31, 11.58]
# for share3:
share3_close = [6.64, 7.26, 7.03, 6.87, np.nan, 6.64, 6.85, 6.7, 6.39, 6.22, 5.92, 5.91, 6.11,
5.91, 6.23, 6.28, 6.28, 6.27, np.nan, 5.56, 5.67, 5.16, 5.69, 6.32, 6.14, 6.25,
5.79, 5.26, 5.05, 5.45, 6.06, 6.21, 5.69, 5.46, 6.02, 6.69, 7.43, 7.72, 8.16,
7.83, 8.7, 8.71, 8.88, 8.54, 8.87, 8.87, 8.18, 7.8, 7.97, 8.25]
share3_open = [7.26, 7, 6.88, 6.91, np.nan, 6.81, 6.63, 6.45, 6.16, 6.24, 5.96, 5.97, 5.96,
6.2, 6.35, 6.11, 6.37, 5.58, np.nan, 5.65, 5.19, 5.42, 6.3, 6.15, 6.05, 5.89,
5.22, 5.2, 5.07, 6.04, 6.12, 5.85, 5.67, 6.02, 6.04, 7.07, 7.64, 7.99, 7.59,
8.73, 8.72, 8.97, 8.58, 8.71, 8.77, 8.4, 7.95, 7.76, 8.25, 7.51]
share3_high = [7.41, 7.31, 7.14, 7, np.nan, 6.82, 6.96, 6.85, 6.5, 6.34, 6.04, 6.02, 6.12, 6.38,
6.43, 6.46, 6.43, 6.27, np.nan, 6.01, 5.67, 5.67, 6.35, 6.32, 6.43, 6.36, 5.79,
5.47, 5.65, 6.04, 6.14, 6.23, 5.83, 6.25, 6.27, 7.12, 7.82, 8.14, 8.27, 8.92,
8.76, 9.15, 8.9, 9.01, 9.16, 9, 8.27, 7.99, 8.33, 8.25]
share3_low = [6.53, 6.87, 6.83, 6.7, np.nan, 6.63, 6.57, 6.41, 6.15, 6.07, 5.89, 5.82, 5.73, 5.81,
6.1, 6.06, 6.16, 5.57, np.nan, 5.51, 5.19, 5.12, 5.69, 6.01, 5.97, 5.86, 5.18, 5.19,
4.96, 5.45, 5.84, 5.85, 5.28, 5.42, 6.02, 6.69, 7.28, 7.64, 7.25, 7.83, 8.41, 8.66,
8.53, 8.54, 8.73, 8.27, 7.95, 7.67, 7.8, 7.51]
# for sel_finance test
shares_eps = np.array([[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, 0.2, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[0.1, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.3, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.15, np.nan, np.nan],
[np.nan, 0.1, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.2, np.nan, np.nan],
[np.nan, 0.5, np.nan],
[0.4, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[0.9, np.nan, np.nan],
[np.nan, np.nan, 0.1]])
self.date_indices = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14', '2016-07-15', '2016-07-18',
'2016-07-19', '2016-07-20', '2016-07-21', '2016-07-22',
'2016-07-25', '2016-07-26', '2016-07-27', '2016-07-28',
'2016-07-29', '2016-08-01', '2016-08-02', '2016-08-03',
'2016-08-04', '2016-08-05', '2016-08-08', '2016-08-09',
'2016-08-10', '2016-08-11', '2016-08-12', '2016-08-15',
'2016-08-16', '2016-08-17', '2016-08-18', '2016-08-19',
'2016-08-22', '2016-08-23', '2016-08-24', '2016-08-25',
'2016-08-26', '2016-08-29', '2016-08-30', '2016-08-31',
'2016-09-01', '2016-09-02', '2016-09-05', '2016-09-06',
'2016-09-07', '2016-09-08']
self.shares = ['000010', '000030', '000039']
self.types = ['close', 'open', 'high', 'low']
self.sel_finance_tyeps = ['eps']
self.test_data_3D = np.zeros((3, data_rows, 4))
self.test_data_2D = np.zeros((data_rows, 3))
self.test_data_2D2 = np.zeros((data_rows, 4))
self.test_data_sel_finance = np.empty((3, data_rows, 1))
# Build up 3D data
self.test_data_3D[0, :, 0] = share1_close
self.test_data_3D[0, :, 1] = share1_open
self.test_data_3D[0, :, 2] = share1_high
self.test_data_3D[0, :, 3] = share1_low
self.test_data_3D[1, :, 0] = share2_close
self.test_data_3D[1, :, 1] = share2_open
self.test_data_3D[1, :, 2] = share2_high
self.test_data_3D[1, :, 3] = share2_low
self.test_data_3D[2, :, 0] = share3_close
self.test_data_3D[2, :, 1] = share3_open
self.test_data_3D[2, :, 2] = share3_high
self.test_data_3D[2, :, 3] = share3_low
self.test_data_sel_finance[:, :, 0] = shares_eps.T
self.hp1 = qt.HistoryPanel(values=self.test_data_3D,
levels=self.shares,
columns=self.types,
rows=self.date_indices)
print(f'in test Operator, history panel is created for timing test')
self.hp1.info()
self.hp2 = qt.HistoryPanel(values=self.test_data_sel_finance,
levels=self.shares,
columns=self.sel_finance_tyeps,
rows=self.date_indices)
print(f'in test_Operator, history panel is created for selection finance test:')
self.hp2.info()
self.op = qt.Operator(strategies='dma', signal_type='PS')
self.op2 = qt.Operator(strategies='dma, macd, trix')
def test_init(self):
""" test initialization of Operator class"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.signal_type, 'pt')
self.assertIsInstance(op.strategies, list)
self.assertEqual(len(op.strategies), 0)
op = qt.Operator('dma')
self.assertIsInstance(op, qt.Operator)
self.assertIsInstance(op.strategies, list)
self.assertIsInstance(op.strategies[0], TimingDMA)
op = qt.Operator('dma, macd')
self.assertIsInstance(op, qt.Operator)
op = qt.Operator(['dma', 'macd'])
self.assertIsInstance(op, qt.Operator)
def test_repr(self):
""" test basic representation of Opeartor class"""
op = qt.Operator()
self.assertEqual(op.__repr__(), 'Operator()')
op = qt.Operator('macd, dma, trix, random, avg_low')
self.assertEqual(op.__repr__(), 'Operator(macd, dma, trix, random, avg_low)')
self.assertEqual(op['dma'].__repr__(), 'Q-TIMING(DMA)')
self.assertEqual(op['macd'].__repr__(), 'R-TIMING(MACD)')
self.assertEqual(op['trix'].__repr__(), 'R-TIMING(TRIX)')
self.assertEqual(op['random'].__repr__(), 'SELECT(RANDOM)')
self.assertEqual(op['avg_low'].__repr__(), 'FACTOR(AVG LOW)')
def test_info(self):
"""Test information output of Operator"""
print(f'test printing information of operator object')
self.op.info()
def test_get_strategy_by_id(self):
""" test get_strategy_by_id()"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
self.assertEqual(op.strategy_ids, ['macd', 'dma', 'trix'])
self.assertIs(op.get_strategy_by_id('macd'), op.strategies[0])
self.assertIs(op.get_strategy_by_id(1), op.strategies[1])
self.assertIs(op.get_strategy_by_id('trix'), op.strategies[2])
def test_get_items(self):
""" test method __getitem__(), it should be the same as geting strategies by id"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
self.assertEqual(op.strategy_ids, ['macd', 'dma', 'trix'])
self.assertIs(op['macd'], op.strategies[0])
self.assertIs(op['trix'], op.strategies[2])
self.assertIs(op[1], op.strategies[1])
self.assertIs(op[3], op.strategies[2])
def test_get_strategies_by_price_type(self):
""" test get_strategies_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategies_by_price_type('close')
stg_open = op.get_strategies_by_price_type('open')
stg_high = op.get_strategies_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, [op.strategies[1]])
self.assertEqual(stg_open, [op.strategies[0], op.strategies[2]])
self.assertEqual(stg_high, [])
stg_wrong = op.get_strategies_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_get_strategy_count_by_price_type(self):
""" test get_strategy_count_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_count_by_price_type('close')
stg_open = op.get_strategy_count_by_price_type('open')
stg_high = op.get_strategy_count_by_price_type('high')
self.assertIsInstance(stg_close, int)
self.assertIsInstance(stg_open, int)
self.assertIsInstance(stg_high, int)
self.assertEqual(stg_close, 1)
self.assertEqual(stg_open, 2)
self.assertEqual(stg_high, 0)
stg_wrong = op.get_strategy_count_by_price_type(123)
self.assertIsInstance(stg_wrong, int)
self.assertEqual(stg_wrong, 0)
def test_get_strategy_names_by_price_type(self):
""" test get_strategy_names_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_names_by_price_type('close')
stg_open = op.get_strategy_names_by_price_type('open')
stg_high = op.get_strategy_names_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, ['DMA'])
self.assertEqual(stg_open, ['MACD', 'TRIX'])
self.assertEqual(stg_high, [])
stg_wrong = op.get_strategy_names_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_get_strategy_id_by_price_type(self):
""" test get_strategy_IDs_by_price_type"""
print('-----Test get strategy IDs by price type------\n')
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_id_by_price_type('close')
stg_open = op.get_strategy_id_by_price_type('open')
stg_high = op.get_strategy_id_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, ['dma'])
self.assertEqual(stg_open, ['macd', 'trix'])
self.assertEqual(stg_high, [])
op.add_strategies('dma, macd')
op.set_parameter('dma_1', price_type='open')
op.set_parameter('macd', price_type='open')
op.set_parameter('macd_1', price_type='high')
op.set_parameter('trix', price_type='close')
print(f'Operator strategy id:\n'
f'{op.strategies} on memory pos:\n'
f'{[id(stg) for stg in op.strategies]}')
stg_close = op.get_strategy_id_by_price_type('close')
stg_open = op.get_strategy_id_by_price_type('open')
stg_high = op.get_strategy_id_by_price_type('high')
stg_all = op.get_strategy_id_by_price_type()
print(f'All IDs of strategies:\n'
f'{stg_all}\n'
f'All price types of strategies:\n'
f'{[stg.price_type for stg in op.strategies]}')
self.assertEqual(stg_close, ['dma', 'trix'])
self.assertEqual(stg_open, ['macd', 'dma_1'])
self.assertEqual(stg_high, ['macd_1'])
stg_wrong = op.get_strategy_id_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_property_strategies(self):
""" test property strategies"""
print(f'created a new simple Operator with only one strategy: DMA')
op = qt.Operator('dma')
strategies = op.strategies
self.assertIsInstance(strategies, list)
op.info()
print(f'created the second simple Operator with three strategies')
self.assertIsInstance(strategies[0], TimingDMA)
op = qt.Operator('dma, macd, cdl')
strategies = op.strategies
op.info()
self.assertIsInstance(strategies, list)
self.assertIsInstance(strategies[0], TimingDMA)
self.assertIsInstance(strategies[1], TimingMACD)
self.assertIsInstance(strategies[2], TimingCDL)
def test_property_strategy_count(self):
""" test Property strategy_count, and the method get_strategy_count_by_price_type()"""
self.assertEqual(self.op.strategy_count, 1)
self.assertEqual(self.op2.strategy_count, 3)
self.assertEqual(self.op.get_strategy_count_by_price_type(), 1)
self.assertEqual(self.op2.get_strategy_count_by_price_type(), 3)
self.assertEqual(self.op.get_strategy_count_by_price_type('close'), 1)
self.assertEqual(self.op.get_strategy_count_by_price_type('high'), 0)
self.assertEqual(self.op2.get_strategy_count_by_price_type('close'), 3)
self.assertEqual(self.op2.get_strategy_count_by_price_type('open'), 0)
def test_property_strategy_names(self):
""" test property strategy_ids"""
op = qt.Operator('dma')
self.assertIsInstance(op.strategy_ids, list)
names = op.strategy_ids[0]
print(f'names are {names}')
self.assertEqual(names, 'dma')
op = qt.Operator('dma, macd, trix, cdl')
self.assertIsInstance(op.strategy_ids, list)
self.assertEqual(op.strategy_ids[0], 'dma')
self.assertEqual(op.strategy_ids[1], 'macd')
self.assertEqual(op.strategy_ids[2], 'trix')
self.assertEqual(op.strategy_ids[3], 'cdl')
op = qt.Operator('dma, macd, trix, dma, dma')
self.assertIsInstance(op.strategy_ids, list)
self.assertEqual(op.strategy_ids[0], 'dma')
self.assertEqual(op.strategy_ids[1], 'macd')
self.assertEqual(op.strategy_ids[2], 'trix')
self.assertEqual(op.strategy_ids[3], 'dma_1')
self.assertEqual(op.strategy_ids[4], 'dma_2')
def test_property_strategy_blenders(self):
""" test property strategy blenders including property setter,
and test the method get_blender()"""
print(f'------- Test property strategy blenders ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'pt')
# test adding blender to empty operator
op.strategy_blenders = '1 + 2'
op.signal_type = 'proportion signal'
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'ps')
op.add_strategy('dma')
op.strategy_blenders = '1+2'
self.assertEqual(op.strategy_blenders, {'close': ['+', '2', '1']})
op.clear_strategies()
self.assertEqual(op.strategy_blenders, {})
op.add_strategies('dma, trix, macd, dma')
op.set_parameter('dma', price_type='open')
op.set_parameter('trix', price_type='high')
op.set_blender('open', '1+2')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '2', '1'])
self.assertEqual(blender_close, None)
self.assertEqual(blender_high, None)
op.set_blender('open', '1+2+3')
op.set_blender('abc', '1+2+3')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
blender_abc = op.get_blender('abc')
self.assertEqual(op.strategy_blenders, {'open': ['+', '3', '+', '2', '1']})
self.assertEqual(blender_open, ['+', '3', '+', '2', '1'])
self.assertEqual(blender_close, None)
self.assertEqual(blender_high, None)
self.assertEqual(blender_abc, None)
op.set_blender('open', 123)
blender_open = op.get_blender('open')
self.assertEqual(blender_open, [])
op.set_blender(None, '1+1')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(op.bt_price_types, ['close', 'high', 'open'])
self.assertEqual(op.get_blender(), {'close': ['+', '1', '1'],
'open': ['+', '1', '1'],
'high': ['+', '1', '1']})
self.assertEqual(blender_open, ['+', '1', '1'])
self.assertEqual(blender_close, ['+', '1', '1'])
self.assertEqual(blender_high, ['+', '1', '1'])
op.set_blender(None, ['1+1', '3+4'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '3'])
self.assertEqual(blender_close, ['+', '1', '1'])
self.assertEqual(blender_high, ['+', '4', '3'])
self.assertEqual(op.view_blender('open'), '3+4')
self.assertEqual(op.view_blender('close'), '1+1')
self.assertEqual(op.view_blender('high'), '3+4')
op.strategy_blenders = (['1+2', '2*3', '1+4'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
self.assertEqual(op.view_blender('open'), '1+4')
self.assertEqual(op.view_blender('close'), '1+2')
self.assertEqual(op.view_blender('high'), '2*3')
# test error inputs:
# wrong type of price_type
self.assertRaises(TypeError, op.set_blender, 1, '1+3')
# price_type not found, no change is made
op.set_blender('volume', '1+3')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# price_type not valid, no change is made
op.set_blender('closee', '1+2')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# wrong type of blender, set to empty list
op.set_blender('open', 55)
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# wrong type of blender, set to empty list
op.set_blender('close', ['1+2'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, [])
self.assertEqual(blender_high, ['*', '3', '2'])
# can't parse blender, set to empty list
op.set_blender('high', 'a+bc')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, [])
self.assertEqual(blender_high, [])
def test_property_singal_type(self):
""" test property signal_type"""
op = qt.Operator()
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.signal_type, 'pt')
op = qt.Operator(signal_type='ps')
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator(signal_type='PS')
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator(signal_type='proportion signal')
self.assertEqual(op.signal_type, 'ps')
print(f'"pt" will be the default type if wrong value is given')
op = qt.Operator(signal_type='wrong value')
self.assertEqual(op.signal_type, 'pt')
print(f'test signal_type.setter')
op.signal_type = 'ps'
self.assertEqual(op.signal_type, 'ps')
print(f'test error raising')
self.assertRaises(TypeError, setattr, op, 'signal_type', 123)
self.assertRaises(ValueError, setattr, op, 'signal_type', 'wrong value')
def test_property_op_data_types(self):
""" test property op_data_types"""
op = qt.Operator()
self.assertIsInstance(op.op_data_types, list)
self.assertEqual(op.op_data_types, [])
op = qt.Operator('macd, dma, trix')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
op = qt.Operator('macd, cdl')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
self.assertEqual(dt[1], 'high')
self.assertEqual(dt[2], 'low')
self.assertEqual(dt[3], 'open')
self.assertEqual(dt, ['close', 'high', 'low', 'open'])
op.add_strategy('dma')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
self.assertEqual(dt[1], 'high')
self.assertEqual(dt[2], 'low')
self.assertEqual(dt[3], 'open')
self.assertEqual(dt, ['close', 'high', 'low', 'open'])
def test_property_op_data_type_count(self):
""" test property op_data_type_count"""
op = qt.Operator()
self.assertIsInstance(op.op_data_type_count, int)
self.assertEqual(op.op_data_type_count, 0)
op = qt.Operator('macd, dma, trix')
dtn = op.op_data_type_count
self.assertEqual(dtn, 1)
op = qt.Operator('macd, cdl')
dtn = op.op_data_type_count
self.assertEqual(dtn, 4)
op.add_strategy('dma')
dtn = op.op_data_type_count
self.assertEqual(dtn, 4)
def test_property_op_data_freq(self):
""" test property op_data_freq"""
op = qt.Operator()
self.assertIsInstance(op.op_data_freq, str)
self.assertEqual(len(op.op_data_freq), 0)
self.assertEqual(op.op_data_freq, '')
op = qt.Operator('macd, dma, trix')
dtf = op.op_data_freq
self.assertIsInstance(dtf, str)
self.assertEqual(dtf[0], 'd')
op.set_parameter('macd', data_freq='m')
dtf = op.op_data_freq
self.assertIsInstance(dtf, list)
self.assertEqual(len(dtf), 2)
self.assertEqual(dtf[0], 'd')
self.assertEqual(dtf[1], 'm')
def test_property_bt_price_types(self):
""" test property bt_price_types"""
print('------test property bt_price_tyeps-------')
op = qt.Operator()
self.assertIsInstance(op.bt_price_types, list)
self.assertEqual(len(op.bt_price_types), 0)
self.assertEqual(op.bt_price_types, [])
op = qt.Operator('macd, dma, trix')
btp = op.bt_price_types
self.assertIsInstance(btp, list)
self.assertEqual(btp[0], 'close')
op.set_parameter('macd', price_type='open')
btp = op.bt_price_types
btpc = op.bt_price_type_count
print(f'price_types are \n{btp}')
self.assertIsInstance(btp, list)
self.assertEqual(len(btp), 2)
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
op.add_strategies(['dma', 'macd'])
op.set_parameter('dma_1', price_type='high')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'high')
self.assertEqual(btp[2], 'open')
self.assertEqual(btpc, 3)
op.remove_strategy('dma_1')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
op.remove_strategy('macd_1')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
def test_property_op_data_type_list(self):
""" test property op_data_type_list"""
op = qt.Operator()
self.assertIsInstance(op.op_data_type_list, list)
self.assertEqual(len(op.op_data_type_list), 0)
self.assertEqual(op.op_data_type_list, [])
op = qt.Operator('macd, dma, trix, cdl')
ohd = op.op_data_type_list
print(f'ohd is {ohd}')
self.assertIsInstance(ohd, list)
self.assertEqual(ohd[0], ['close'])
op.set_parameter('macd', data_types='open, close')
ohd = op.op_data_type_list
print(f'ohd is {ohd}')
self.assertIsInstance(ohd, list)
self.assertEqual(len(ohd), 4)
self.assertEqual(ohd[0], ['open', 'close'])
self.assertEqual(ohd[1], ['close'])
self.assertEqual(ohd[2], ['close'])
self.assertEqual(ohd[3], ['open', 'high', 'low', 'close'])
def test_property_op_history_data(self):
""" Test this important function to get operation history data that shall be used in
signal generation
these data are stored in list of nd-arrays, each ndarray represents the data
that is needed for each and every strategy
"""
print(f'------- Test getting operation history data ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.op_history_data, {})
self.assertEqual(op.signal_type, 'pt')
def test_property_opt_space_par(self):
""" test property opt_space_par"""
print(f'-----test property opt_space_par--------:\n')
op = qt.Operator()
self.assertIsInstance(op.opt_space_par, tuple)
self.assertIsInstance(op.opt_space_par[0], list)
self.assertIsInstance(op.opt_space_par[1], list)
self.assertEqual(len(op.opt_space_par), 2)
self.assertEqual(op.opt_space_par, ([], []))
op = qt.Operator('macd, dma, trix, cdl')
osp = op.opt_space_par
print(f'before setting opt_tags opt_space_par is empty:\n'
f'osp is {osp}\n')
self.assertIsInstance(osp, tuple)
self.assertEqual(osp[0], [])
self.assertEqual(osp[1], [])
op.set_parameter('macd', opt_tag=1)
op.set_parameter('dma', opt_tag=1)
osp = op.opt_space_par
print(f'after setting opt_tags opt_space_par is not empty:\n'
f'osp is {osp}\n')
self.assertIsInstance(osp, tuple)
self.assertEqual(len(osp), 2)
self.assertIsInstance(osp[0], list)
self.assertIsInstance(osp[1], list)
self.assertEqual(len(osp[0]), 6)
self.assertEqual(len(osp[1]), 6)
self.assertEqual(osp[0], [(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
self.assertEqual(osp[1], ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
def test_property_opt_types(self):
""" test property opt_tags"""
print(f'-----test property opt_tags--------:\n')
op = qt.Operator()
self.assertIsInstance(op.opt_tags, list)
self.assertEqual(len(op.opt_tags), 0)
self.assertEqual(op.opt_tags, [])
op = qt.Operator('macd, dma, trix, cdl')
otp = op.opt_tags
print(f'before setting opt_tags opt_space_par is empty:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, list)
self.assertEqual(otp, [0, 0, 0, 0])
op.set_parameter('macd', opt_tag=1)
op.set_parameter('dma', opt_tag=1)
otp = op.opt_tags
print(f'after setting opt_tags opt_space_par is not empty:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, list)
self.assertEqual(len(otp), 4)
self.assertEqual(otp, [1, 1, 0, 0])
def test_property_max_window_length(self):
""" test property max_window_length"""
print(f'-----test property max window length--------:\n')
op = qt.Operator()
self.assertIsInstance(op.max_window_length, int)
self.assertEqual(op.max_window_length, 0)
op = qt.Operator('macd, dma, trix, cdl')
mwl = op.max_window_length
print(f'before setting window_length the value is 270:\n'
f'mwl is {mwl}\n')
self.assertIsInstance(mwl, int)
self.assertEqual(mwl, 270)
op.set_parameter('macd', window_length=300)
op.set_parameter('dma', window_length=350)
mwl = op.max_window_length
print(f'after setting window_length the value is new set value:\n'
f'mwl is {mwl}\n')
self.assertIsInstance(mwl, int)
self.assertEqual(mwl, 350)
def test_property_bt_price_type_count(self):
""" test property bt_price_type_count"""
print(f'-----test property bt_price_type_count--------:\n')
op = qt.Operator()
self.assertIsInstance(op.bt_price_type_count, int)
self.assertEqual(op.bt_price_type_count, 0)
op = qt.Operator('macd, dma, trix, cdl')
otp = op.bt_price_type_count
print(f'before setting price_type the price count is 1:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, int)
self.assertEqual(otp, 1)
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='open')
otp = op.bt_price_type_count
print(f'after setting price_type the price type count is 2:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, int)
self.assertEqual(otp, 2)
def test_property_set(self):
""" test all property setters:
setting following properties:
- strategy_blenders
- signal_type
other properties can not be set"""
print(f'------- Test setting properties ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'pt')
op.strategy_blenders = '1 + 2'
op.signal_type = 'proportion signal'
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator('macd, dma, trix, cdl')
# TODO: 修改set_parameter(),使下面的用法成立
# a_to_sell.set_parameter('dma, cdl', price_type='open')
op.set_parameter('dma', price_type='open')
op.set_parameter('cdl', price_type='open')
sb = op.strategy_blenders
st = op.signal_type
self.assertIsInstance(sb, dict)
print(f'before setting: strategy_blenders={sb}')
self.assertEqual(sb, {})
op.strategy_blenders = '1+2 * 3'
sb = op.strategy_blenders
print(f'after setting strategy_blender={sb}')
self.assertEqual(sb, {'close': ['+', '*', '3', '2', '1'],
'open': ['+', '*', '3', '2', '1']})
op.strategy_blenders = ['1+2', '3-4']
sb = op.strategy_blenders
print(f'after setting strategy_blender={sb}')
self.assertEqual(sb, {'close': ['+', '2', '1'],
'open': ['-', '4', '3']})
def test_operator_ready(self):
"""test the method ready of Operator"""
op = qt.Operator()
print(f'operator is ready? "{op.ready}"')
def test_operator_add_strategy(self):
"""test adding strategies to Operator"""
op = qt.Operator('dma, all, urgent')
self.assertIsInstance(op, qt.Operator)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[1], qt.SelectingAll)
self.assertIsInstance(op.strategies[2], qt.RiconUrgent)
self.assertIsInstance(op[0], qt.TimingDMA)
self.assertIsInstance(op[1], qt.SelectingAll)
self.assertIsInstance(op[2], qt.RiconUrgent)
self.assertIsInstance(op['dma'], qt.TimingDMA)
self.assertIsInstance(op['all'], qt.SelectingAll)
self.assertIsInstance(op['urgent'], qt.RiconUrgent)
self.assertEqual(op.strategy_count, 3)
print(f'test adding strategies into existing op')
print('test adding strategy by string')
op.add_strategy('macd')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[3], qt.TimingMACD)
self.assertEqual(op.strategy_count, 4)
op.add_strategy('random')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[4], qt.SelectingRandom)
self.assertEqual(op.strategy_count, 5)
test_ls = TestLSStrategy()
op.add_strategy(test_ls)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[5], TestLSStrategy)
self.assertEqual(op.strategy_count, 6)
print(f'Test different instance of objects are added to operator')
op.add_strategy('dma')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[6], qt.TimingDMA)
self.assertIsNot(op.strategies[0], op.strategies[6])
def test_operator_add_strategies(self):
""" etst adding multiple strategies to Operator"""
op = qt.Operator('dma, all, urgent')
self.assertEqual(op.strategy_count, 3)
print('test adding multiple strategies -- adding strategy by list of strings')
op.add_strategies(['dma', 'macd'])
self.assertEqual(op.strategy_count, 5)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[3], qt.TimingDMA)
self.assertIsInstance(op.strategies[4], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by comma separated strings')
op.add_strategies('dma, macd')
self.assertEqual(op.strategy_count, 7)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[5], qt.TimingDMA)
self.assertIsInstance(op.strategies[6], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by list of strategies')
op.add_strategies([qt.TimingDMA(), qt.TimingMACD()])
self.assertEqual(op.strategy_count, 9)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[7], qt.TimingDMA)
self.assertIsInstance(op.strategies[8], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by list of strategy and str')
op.add_strategies(['DMA', qt.TimingMACD()])
self.assertEqual(op.strategy_count, 11)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[9], qt.TimingDMA)
self.assertIsInstance(op.strategies[10], qt.TimingMACD)
self.assertIsNot(op.strategies[0], op.strategies[9])
self.assertIs(type(op.strategies[0]), type(op.strategies[9]))
print('test adding fault data')
self.assertRaises(AssertionError, op.add_strategies, 123)
self.assertRaises(AssertionError, op.add_strategies, None)
def test_opeartor_remove_strategy(self):
""" test method remove strategy"""
op = qt.Operator('dma, all, urgent')
op.add_strategies(['dma', 'macd'])
op.add_strategies(['DMA', TestLSStrategy()])
self.assertEqual(op.strategy_count, 7)
print('test removing strategies from Operator')
op.remove_strategy('dma')
self.assertEqual(op.strategy_count, 6)
self.assertEqual(op.strategy_ids, ['all', 'urgent', 'dma_1', 'macd', 'dma_2', 'custom'])
self.assertEqual(op.strategies[0], op['all'])
self.assertEqual(op.strategies[1], op['urgent'])
self.assertEqual(op.strategies[2], op['dma_1'])
self.assertEqual(op.strategies[3], op['macd'])
self.assertEqual(op.strategies[4], op['dma_2'])
self.assertEqual(op.strategies[5], op['custom'])
op.remove_strategy('dma_1')
self.assertEqual(op.strategy_count, 5)
self.assertEqual(op.strategy_ids, ['all', 'urgent', 'macd', 'dma_2', 'custom'])
self.assertEqual(op.strategies[0], op['all'])
self.assertEqual(op.strategies[1], op['urgent'])
self.assertEqual(op.strategies[2], op['macd'])
self.assertEqual(op.strategies[3], op['dma_2'])
self.assertEqual(op.strategies[4], op['custom'])
def test_opeartor_clear_strategies(self):
""" test operator clear strategies"""
op = qt.Operator('dma, all, urgent')
op.add_strategies(['dma', 'macd'])
op.add_strategies(['DMA', TestLSStrategy()])
self.assertEqual(op.strategy_count, 7)
print('test removing strategies from Operator')
op.clear_strategies()
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op.add_strategy('dma', pars=(12, 123, 25))
self.assertEqual(op.strategy_count, 1)
self.assertEqual(op.strategy_ids, ['dma'])
self.assertEqual(type(op.strategies[0]), TimingDMA)
self.assertEqual(op.strategies[0].pars, (12, 123, 25))
op.clear_strategies()
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
def test_operator_prepare_data(self):
"""test processes that related to prepare data"""
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(strategies=[test_ls, test_sel, test_sig])
too_early_cash = qt.CashPlan(dates='2016-01-01', amounts=10000)
early_cash = qt.CashPlan(dates='2016-07-01', amounts=10000)
on_spot_cash = qt.CashPlan(dates='2016-07-08', amounts=10000)
no_trade_cash = qt.CashPlan(dates='2016-07-08, 2016-07-30, 2016-08-11, 2016-09-03',
amounts=[10000, 10000, 10000, 10000])
# 在所有策略的参数都设置好之前调用prepare_data会发生assertion Error
self.assertRaises(AssertionError,
self.op.prepare_data,
hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
late_cash = qt.CashPlan(dates='2016-12-31', amounts=10000)
multi_cash = qt.CashPlan(dates='2016-07-08, 2016-08-08', amounts=[10000, 10000])
self.op.set_parameter(stg_id='custom',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.assertEqual(self.op.strategies[0].pars, {'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='custom_1',
pars=())
self.assertEqual(self.op.strategies[1].pars, ()),
self.op.set_parameter(stg_id='custom_2',
pars=(0.2, 0.02, -0.02))
self.assertEqual(self.op.strategies[2].pars, (0.2, 0.02, -0.02)),
self.op.prepare_data(hist_data=self.hp1,
cash_plan=on_spot_cash)
self.assertIsInstance(self.op._op_history_data, dict)
self.assertEqual(len(self.op._op_history_data), 3)
# test if automatic strategy blenders are set
self.assertEqual(self.op.strategy_blenders,
{'close': ['+', '2', '+', '1', '0']})
tim_hist_data = self.op._op_history_data['custom']
sel_hist_data = self.op._op_history_data['custom_1']
ric_hist_data = self.op._op_history_data['custom_2']
print(f'in test_prepare_data in TestOperator:')
print('selecting history data:\n', sel_hist_data)
print('originally passed data in correct sequence:\n', self.test_data_3D[:, 3:, [2, 3, 0]])
print('difference is \n', sel_hist_data - self.test_data_3D[:, :, [2, 3, 0]])
self.assertTrue(np.allclose(sel_hist_data, self.test_data_3D[:, :, [2, 3, 0]], equal_nan=True))
self.assertTrue(np.allclose(tim_hist_data, self.test_data_3D, equal_nan=True))
self.assertTrue(np.allclose(ric_hist_data, self.test_data_3D[:, 3:, :], equal_nan=True))
# raises Value Error if empty history panel is given
empty_hp = qt.HistoryPanel()
correct_hp = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 4)),
columns=self.types,
levels=self.shares,
rows=self.date_indices)
too_many_shares = qt.HistoryPanel(values=np.random.randint(10, size=(5, 50, 4)))
too_many_types = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 5)))
# raises Error when history panel is empty
self.assertRaises(ValueError,
self.op.prepare_data,
empty_hp,
on_spot_cash)
# raises Error when first investment date is too early
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
early_cash)
# raises Error when last investment date is too late
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
late_cash)
# raises Error when some of the investment dates are on no-trade-days
self.assertRaises(ValueError,
self.op.prepare_data,
correct_hp,
no_trade_cash)
# raises Error when number of shares in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_shares,
on_spot_cash)
# raises Error when too early cash investment date
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
too_early_cash)
# raises Error when number of d_types in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_types,
on_spot_cash)
# test the effect of data type sequence in strategy definition
def test_operator_generate(self):
""" Test signal generation process of operator objects
:return:
"""
# 使用test模块的自定义策略生成三种交易策略
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sel2 = TestSelStrategyDiffTime()
test_sig = TestSigStrategy()
print('--Test PT type signal generation--')
# 测试PT类型的信号生成:
# 创建一个Operator对象,信号类型为PT(比例目标信号)
# 这个Operator对象包含两个策略,分别为LS-Strategy以及Sel-Strategy,代表择时和选股策略
# 两个策略分别生成PT信号后混合成一个信号输出
self.op = qt.Operator(strategies=[test_ls, test_sel])
self.op.set_parameter(stg_id='custom',
pars={'000010': (5, 10.),
'000030': (5, 10.),
'000039': (5, 6.)})
self.op.set_parameter(stg_id=1,
pars=())
# self.a_to_sell.set_blender(blender='0+1+2')
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
print('--test operator information in normal mode--')
self.op.info()
self.assertEqual(self.op.strategy_blenders,
{'close': ['+', '1', '0']})
self.op.set_blender(None, '0*1')
self.assertEqual(self.op.strategy_blenders,
{'close': ['*', '1', '0']})
print('--test operation signal created in Proportional Target (PT) Mode--')
op_list = self.op.create_signal(hist_data=self.hp1)
self.assertTrue(isinstance(op_list, HistoryPanel))
backtest_price_types = op_list.htypes
self.assertEqual(backtest_price_types[0], 'close')
self.assertEqual(op_list.shape, (3, 45, 1))
reduced_op_list = op_list.values.squeeze().T
print(f'op_list created, it is a 3 share/45 days/1 htype array, to make comparison happen, \n'
f'it will be squeezed to a 2-d array to compare on share-wise:\n'
f'{reduced_op_list}')
target_op_values = np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0]])
self.assertTrue(np.allclose(target_op_values, reduced_op_list, equal_nan=True))
print('--Test two separate signal generation for different price types--')
# 测试两组PT类型的信号生成:
# 在Operator对象中增加两个SigStrategy策略,策略类型相同但是策略的参数不同,回测价格类型为"OPEN"
# Opeartor应该生成两组交易信号,分别用于"close"和"open"两中不同的价格类型
# 这里需要重新生成两个新的交易策略对象,否则在op的strategies列表中产生重复的对象引用,从而引起错误
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
self.op.add_strategies([test_ls, test_sel])
self.op.set_parameter(stg_id='custom_2',
price_type='open')
self.op.set_parameter(stg_id='custom_3',
price_type='open')
self.assertEqual(self.op['custom'].price_type, 'close')
self.assertEqual(self.op['custom_2'].price_type, 'open')
self.op.set_parameter(stg_id='custom_2',
pars={'000010': (5, 10.),
'000030': (5, 10.),
'000039': (5, 6.)})
self.op.set_parameter(stg_id='custom_3',
pars=())
self.op.set_blender(blender='0 or 1', price_type='open')
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
print('--test how operator information is printed out--')
self.op.info()
self.assertEqual(self.op.strategy_blenders,
{'close': ['*', '1', '0'],
'open': ['or', '1', '0']})
print('--test opeartion signal created in Proportional Target (PT) Mode--')
op_list = self.op.create_signal(hist_data=self.hp1)
self.assertTrue(isinstance(op_list, HistoryPanel))
signal_close = op_list['close'].squeeze().T
signal_open = op_list['open'].squeeze().T
self.assertEqual(signal_close.shape, (45, 3))
self.assertEqual(signal_open.shape, (45, 3))
target_op_close = np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0]])
target_op_open = np.array([[0.5, 0.5, 1.0],
[0.5, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 0.5, 0.0],
[1.0, 0.5, 0.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.5, 1.0, 0.0],
[0.5, 1.0, 0.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0]])
signal_pairs = [[list(sig1), list(sig2), sig1 == sig2]
for sig1, sig2
in zip(list(target_op_close), list(signal_close))]
print(f'signals side by side:\n'
f'{signal_pairs}')
self.assertTrue(np.allclose(target_op_close, signal_close, equal_nan=True))
signal_pairs = [[list(sig1), list(sig2), sig1 == sig2]
for sig1, sig2
in zip(list(target_op_open), list(signal_open))]
print(f'signals side by side:\n'
f'{signal_pairs}')
self.assertTrue(np.allclose(target_op_open, signal_open, equal_nan=True))
print('--Test two separate signal generation for different price types--')
# 更多测试集合
def test_stg_parameter_setting(self):
""" test setting parameters of strategies
test the method set_parameters
:return:
"""
op = qt.Operator(strategies='dma, all, urgent')
print(op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy in new op: \n{op.strategies[0].info()}')
# TODO: allow set_parameters to a list of strategies or str-listed strategies
# TODO: allow set_parameters to all strategies of specific bt price type
print(f'Set up strategy parameters by strategy id')
op.set_parameter('dma',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
op.set_parameter('all',
window_length=20)
op.set_parameter('all', price_type='high')
print(f'Can also set up strategy parameters by strategy index')
op.set_parameter(2, price_type='open')
op.set_parameter(2,
opt_tag=1,
pars=(9, -0.09),
window_length=10)
self.assertEqual(op.strategies[0].pars, (5, 10, 5))
self.assertEqual(op.strategies[0].par_boes, ((5, 10), (5, 15), (10, 15)))
self.assertEqual(op.strategies[2].pars, (9, -0.09))
self.assertEqual(op.op_data_freq, 'd')
self.assertEqual(op.op_data_types, ['close', 'high', 'open'])
self.assertEqual(op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (1, 40), (-0.5, 0.5)],
['discr', 'discr', 'discr', 'discr', 'conti']))
self.assertEqual(op.max_window_length, 20)
print(f'KeyError will be raised if wrong strategy id is given')
self.assertRaises(KeyError, op.set_parameter, stg_id='t-1', pars=(1, 2))
self.assertRaises(KeyError, op.set_parameter, stg_id='wrong_input', pars=(1, 2))
print(f'ValueError will be raised if parameter can be set')
self.assertRaises(ValueError, op.set_parameter, stg_id=0, pars=('wrong input', 'wrong input'))
# test blenders of different price types
# test setting blenders to different price types
# TODO: to allow operands like "and", "or", "not", "xor"
# a_to_sell.set_blender('close', '0 and 1 or 2')
# self.assertEqual(a_to_sell.get_blender('close'), 'str-1.2')
self.assertEqual(op.bt_price_types, ['close', 'high', 'open'])
op.set_blender('open', '0 & 1 | 2')
self.assertEqual(op.get_blender('open'), ['|', '2', '&', '1', '0'])
op.set_blender('high', '(0|1) & 2')
self.assertEqual(op.get_blender('high'), ['&', '2', '|', '1', '0'])
op.set_blender('close', '0 & 1 | 2')
self.assertEqual(op.get_blender(), {'close': ['|', '2', '&', '1', '0'],
'high': ['&', '2', '|', '1', '0'],
'open': ['|', '2', '&', '1', '0']})
self.assertEqual(op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (1, 40), (-0.5, 0.5)],
['discr', 'discr', 'discr', 'discr', 'conti']))
self.assertEqual(op.opt_tags, [1, 0, 1])
def test_signal_blend(self):
self.assertEqual(blender_parser('0 & 1'), ['&', '1', '0'])
self.assertEqual(blender_parser('0 or 1'), ['or', '1', '0'])
self.assertEqual(blender_parser('0 & 1 | 2'), ['|', '2', '&', '1', '0'])
blender = blender_parser('0 & 1 | 2')
self.assertEqual(signal_blend([1, 1, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 1, 0], blender), 1)
self.assertEqual(signal_blend([0, 1, 1], blender), 1)
self.assertEqual(signal_blend([0, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 0], blender), 0)
self.assertEqual(signal_blend([0, 1, 0], blender), 0)
self.assertEqual(signal_blend([0, 0, 0], blender), 0)
# parse: '0 & ( 1 | 2 )'
self.assertEqual(blender_parser('0 & ( 1 | 2 )'), ['&', '|', '2', '1', '0'])
blender = blender_parser('0 & ( 1 | 2 )')
self.assertEqual(signal_blend([1, 1, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 1, 0], blender), 1)
self.assertEqual(signal_blend([0, 1, 1], blender), 0)
self.assertEqual(signal_blend([0, 0, 1], blender), 0)
self.assertEqual(signal_blend([1, 0, 0], blender), 0)
self.assertEqual(signal_blend([0, 1, 0], blender), 0)
self.assertEqual(signal_blend([0, 0, 0], blender), 0)
# parse: '(1-2)/3 + 0'
self.assertEqual(blender_parser('(1-2)/3 + 0'), ['+', '0', '/', '3', '-', '2', '1'])
blender = blender_parser('(1-2)/3 + 0')
self.assertEqual(signal_blend([5, 9, 1, 4], blender), 7)
# pars: '(0*1/2*(3+4))+5*(6+7)-8'
self.assertEqual(blender_parser('(0*1/2*(3+4))+5*(6+7)-8'), ['-', '8', '+', '*', '+', '7', '6', '5', '*',
'+', '4', '3', '/', '2', '*', '1', '0'])
blender = blender_parser('(0*1/2*(3+4))+5*(6+7)-8')
self.assertEqual(signal_blend([1, 1, 1, 1, 1, 1, 1, 1, 1], blender), 3)
self.assertEqual(signal_blend([2, 1, 4, 3, 5, 5, 2, 2, 10], blender), 14)
# parse: '0/max(2,1,3 + 5)+4'
self.assertEqual(blender_parser('0/max(2,1,3 + 5)+4'), ['+', '4', '/', 'max(3)', '+', '5', '3', '1', '2', '0'])
blender = blender_parser('0/max(2,1,3 + 5)+4')
self.assertEqual(signal_blend([8.0, 4, 3, 5.0, 0.125, 5], blender), 0.925)
self.assertEqual(signal_blend([2, 1, 4, 3, 5, 5, 2, 2, 10], blender), 5.25)
print('speed test')
import time
st = time.time()
blender = blender_parser('0+max(1,2,(3+4)*5, max(6, (7+8)*9), 10-11) * (12+13)')
res = []
for i in range(10000):
res = signal_blend([1, 1, 2, 3, 4, 5, 3, 4, 5, 6, 7, 8, 2, 3], blender)
et = time.time()
print(f'total time for RPN processing: {et - st}, got result: {res}')
blender = blender_parser("0 + 1 * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 7)
blender = blender_parser("(0 + 1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
blender = blender_parser("(0+1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
blender = blender_parser("(0 + 1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
# TODO: 目前对于-(1+2)这样的表达式还无法处理
# self.a_to_sell.set_blender('selecting', "-(0 + 1) * 2")
# self.assertEqual(self.a_to_sell.signal_blend([1, 2, 3]), -9)
blender = blender_parser("(0-1)/2 + 3")
print(f'RPN of notation: "(0-1)/2 + 3" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, 2, 3, 0.0], blender), -0.33333333)
blender = blender_parser("0 + 1 / 2")
print(f'RPN of notation: "0 + 1 / 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, math.pi, 4], blender), 1.78539816)
blender = blender_parser("(0 + 1) / 2")
print(f'RPN of notation: "(0 + 1) / 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(signal_blend([1, 2, 3], blender), 1)
blender = blender_parser("(0 + 1 * 2) / 3")
print(f'RPN of notation: "(0 + 1 * 2) / 3" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([3, math.e, 10, 10], blender), 3.0182818284590454)
blender = blender_parser("0 / 1 * 2")
print(f'RPN of notation: "0 / 1 * 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(signal_blend([1, 3, 6], blender), 2)
blender = blender_parser("(0 - 1 + 2) * 4")
print(f'RPN of notation: "(0 - 1 + 2) * 4" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, 1, -1, np.nan, math.pi], blender), -3.141592653589793)
blender = blender_parser("0 * 1")
print(f'RPN of notation: "0 * 1" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([math.pi, math.e], blender), 8.539734222673566)
blender = blender_parser('abs(3-sqrt(2) / cos(1))')
print(f'RPN of notation: "abs(3-sqrt(2) / cos(1))" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['abs(1)', '-', '/', 'cos(1)', '1', 'sqrt(1)', '2', '3'])
blender = blender_parser('0/max(2,1,3 + 5)+4')
print(f'RPN of notation: "0/max(2,1,3 + 5)+4" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '4', '/', 'max(3)', '+', '5', '3', '1', '2', '0'])
blender = blender_parser('1 + sum(1,2,3+3, sum(1, 2) + 3) *5')
print(f'RPN of notation: "1 + sum(1,2,3+3, sum(1, 2) + 3) *5" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '*', '5', 'sum(4)', '+', '3', 'sum(2)', '2', '1',
'+', '3', '3', '2', '1', '1'])
blender = blender_parser('1+sum(1,2,(3+5)*4, sum(3, (4+5)*6), 7-8) * (2+3)')
print(f'RPN of notation: "1+sum(1,2,(3+5)*4, sum(3, (4+5)*6), 7-8) * (2+3)" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '*', '+', '3', '2', 'sum(5)', '-', '8', '7',
'sum(2)', '*', '6', '+', '5', '4', '3', '*', '4',
'+', '5', '3', '2', '1', '1'])
# TODO: ndarray type of signals to be tested:
def test_set_opt_par(self):
""" test setting opt pars in batch"""
print(f'--------- Testing setting Opt Pars: set_opt_par -------')
op = qt.Operator('dma, random, crossline')
op.set_parameter('dma',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.strategies[0].pars, (5, 10, 5))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (35, 120, 10, 'buy'))
self.assertEqual(op.opt_tags, [1, 0, 0])
op.set_opt_par((5, 12, 9))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (35, 120, 10, 'buy'))
op.set_parameter('crossline',
pars=(5, 10, 5, 'sell'),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15), ('buy', 'sell', 'none')),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.opt_tags, [1, 0, 1])
op.set_opt_par((5, 12, 9, 8, 26, 9, 'buy'))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
op.set_opt_par((9, 200, 155, 8, 26, 9, 'buy', 5, 12, 9))
self.assertEqual(op.strategies[0].pars, (9, 200, 155))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
# test set_opt_par when opt_tag is set to be 2 (enumerate type of parameters)
op.set_parameter('crossline',
pars=(5, 10, 5, 'sell'),
opt_tag=2,
par_boes=((5, 10), (5, 15), (10, 15), ('buy', 'sell', 'none')),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.opt_tags, [1, 0, 2])
self.assertEqual(op.strategies[0].pars, (9, 200, 155))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (5, 10, 5, 'sell'))
op.set_opt_par((5, 12, 9, (8, 26, 9, 'buy')))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
# Test Errors
# Not enough values for parameter
op.set_parameter('crossline', opt_tag=1)
self.assertRaises(ValueError, op.set_opt_par, (5, 12, 9, 8))
# wrong type of input
self.assertRaises(AssertionError, op.set_opt_par, [5, 12, 9, 7, 15, 12, 'sell'])
def test_stg_attribute_get_and_set(self):
self.stg = qt.TimingCrossline()
self.stg_type = 'R-TIMING'
self.stg_name = "CROSSLINE"
self.stg_text = 'Moving average crossline strategy, determine long/short position according to the cross ' \
'point' \
' of long and short term moving average prices '
self.pars = (35, 120, 10, 'buy')
self.par_boes = [(10, 250), (10, 250), (1, 100), ('buy', 'sell', 'none')]
self.par_count = 4
self.par_types = ['discr', 'discr', 'conti', 'enum']
self.opt_tag = 0
self.data_types = ['close']
self.data_freq = 'd'
self.sample_freq = 'd'
self.window_length = 270
self.assertEqual(self.stg.stg_type, self.stg_type)
self.assertEqual(self.stg.stg_name, self.stg_name)
self.assertEqual(self.stg.stg_text, self.stg_text)
self.assertEqual(self.stg.pars, self.pars)
self.assertEqual(self.stg.par_types, self.par_types)
self.assertEqual(self.stg.par_boes, self.par_boes)
self.assertEqual(self.stg.par_count, self.par_count)
self.assertEqual(self.stg.opt_tag, self.opt_tag)
self.assertEqual(self.stg.data_freq, self.data_freq)
self.assertEqual(self.stg.sample_freq, self.sample_freq)
self.assertEqual(self.stg.data_types, self.data_types)
self.assertEqual(self.stg.window_length, self.window_length)
self.stg.stg_name = 'NEW NAME'
self.stg.stg_text = 'NEW TEXT'
self.assertEqual(self.stg.stg_name, 'NEW NAME')
self.assertEqual(self.stg.stg_text, 'NEW TEXT')
self.stg.pars = (1, 2, 3, 4)
self.assertEqual(self.stg.pars, (1, 2, 3, 4))
self.stg.par_count = 3
self.assertEqual(self.stg.par_count, 3)
self.stg.par_boes = [(1, 10), (1, 10), (1, 10), (1, 10)]
self.assertEqual(self.stg.par_boes, [(1, 10), (1, 10), (1, 10), (1, 10)])
self.stg.par_types = ['conti', 'conti', 'discr', 'enum']
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'enum'])
self.stg.par_types = 'conti, conti, discr, conti'
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'conti'])
self.stg.data_types = 'close, open'
self.assertEqual(self.stg.data_types, ['close', 'open'])
self.stg.data_types = ['close', 'high', 'low']
self.assertEqual(self.stg.data_types, ['close', 'high', 'low'])
self.stg.data_freq = 'w'
self.assertEqual(self.stg.data_freq, 'w')
self.stg.window_length = 300
self.assertEqual(self.stg.window_length, 300)
def test_rolling_timing(self):
stg = TestLSStrategy()
stg_pars = {'000100': (5, 10),
'000200': (5, 10),
'000300': (5, 6)}
stg.set_pars(stg_pars)
history_data = self.hp1.values
output = stg.generate(hist_data=history_data)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
lsmask = np.array([[0., 0., 1.],
[0., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 0.],
[1., 1., 0.],
[1., 1., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.]])
# TODO: Issue to be solved: the np.nan value are converted to 0 in the lsmask,这样做可能会有意想不到的后果
# TODO: 需要解决nan值的问题
self.assertEqual(output.shape, lsmask.shape)
self.assertTrue(np.allclose(output, lsmask, equal_nan=True))
def test_sel_timing(self):
stg = TestSelStrategy()
stg_pars = ()
stg.set_pars(stg_pars)
history_data = self.hp1['high, low, close', :, :]
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
def test_simple_timing(self):
stg = TestSigStrategy()
stg_pars = (0.2, 0.02, -0.02)
stg.set_pars(stg_pars)
history_data = self.hp1['close, open, high, low', :, 3:50]
output = stg.generate(hist_data=history_data, shares=self.shares, dates=self.date_indices)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
sigmatrix = np.array([[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0]])
side_by_side_array = np.array([[i, out_line, sig_line]
for
i, out_line, sig_line
in zip(range(len(output)), output, sigmatrix)])
print(f'output and signal matrix lined up side by side is \n'
f'{side_by_side_array}')
self.assertEqual(sigmatrix.shape, output.shape)
self.assertTrue(np.allclose(output, sigmatrix))
def test_sel_finance(self):
"""Test selecting_finance strategy, test all built-in strategy parameters"""
stg = SelectingFinanceIndicator()
stg_pars = (False, 'even', 'greater', 0, 0, 0.67)
stg.set_pars(stg_pars)
stg.window_length = 5
stg.data_freq = 'd'
stg.sample_freq = '10d'
stg.sort_ascending = False
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg._poq = 0.67
history_data = self.hp2.values
print(f'Start to test financial selection parameter {stg_pars}')
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get mininum factor
stg_pars = (True, 'even', 'less', 1, 1, 0.67)
stg.sort_ascending = True
stg.condition = 'less'
stg.lbound = 1
stg.ubound = 1
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'linear', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'linear'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'proportion', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'proportion'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
# test single factor, get max factor in linear weight, threshold 0.2
stg_pars = (False, 'even', 'greater', 0.2, 0.2, 0.67)
stg.sort_ascending = False
stg.weighting = 'even'
stg.condition = 'greater'
stg.lbound = 0.2
stg.ubound = 0.2
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
def test_tokenizer(self):
self.assertListEqual(_exp_to_token('1+1'),
['1', '+', '1'])
print(_exp_to_token('1+1'))
self.assertListEqual(_exp_to_token('1 & 1'),
['1', '&', '1'])
print(_exp_to_token('1&1'))
self.assertListEqual(_exp_to_token('1 and 1'),
['1', 'and', '1'])
print(_exp_to_token('1 and 1'))
self.assertListEqual(_exp_to_token('1 or 1'),
['1', 'or', '1'])
print(_exp_to_token('1 or 1'))
self.assertListEqual(_exp_to_token('(1 - 1 + -1) * pi'),
['(', '1', '-', '1', '+', '-1', ')', '*', 'pi'])
print(_exp_to_token('(1 - 1 + -1) * pi'))
self.assertListEqual(_exp_to_token('abs(5-sqrt(2) / cos(pi))'),
['abs(', '5', '-', 'sqrt(', '2', ')', '/', 'cos(', 'pi', ')', ')'])
print(_exp_to_token('abs(5-sqrt(2) / cos(pi))'))
self.assertListEqual(_exp_to_token('sin(pi) + 2.14'),
['sin(', 'pi', ')', '+', '2.14'])
print(_exp_to_token('sin(pi) + 2.14'))
self.assertListEqual(_exp_to_token('(1-2)/3.0 + 0.0000'),
['(', '1', '-', '2', ')', '/', '3.0', '+', '0.0000'])
print(_exp_to_token('(1-2)/3.0 + 0.0000'))
self.assertListEqual(_exp_to_token('-(1. + .2) * max(1, 3, 5)'),
['-', '(', '1.', '+', '.2', ')', '*', 'max(', '1', ',', '3', ',', '5', ')'])
print(_exp_to_token('-(1. + .2) * max(1, 3, 5)'))
self.assertListEqual(_exp_to_token('(x + e * 10) / 10'),
['(', 'x', '+', 'e', '*', '10', ')', '/', '10'])
print(_exp_to_token('(x + e * 10) / 10'))
self.assertListEqual(_exp_to_token('8.2/((-.1+abs3(3,4,5))*0.12)'),
['8.2', '/', '(', '(', '-.1', '+', 'abs3(', '3', ',', '4', ',', '5', ')', ')', '*', '0.12',
')'])
print(_exp_to_token('8.2/((-.1+abs3(3,4,5))*0.12)'))
self.assertListEqual(_exp_to_token('8.2/abs3(3,4,25.34 + 5)*0.12'),
['8.2', '/', 'abs3(', '3', ',', '4', ',', '25.34', '+', '5', ')', '*', '0.12'])
print(_exp_to_token('8.2/abs3(3,4,25.34 + 5)*0.12'))
class TestLog(unittest.TestCase):
def test_init(self):
pass
class TestConfig(unittest.TestCase):
"""测试Config对象以及QT_CONFIG变量的设置和获取值"""
def test_init(self):
pass
def test_invest(self):
pass
def test_pars_string_to_type(self):
_parse_string_kwargs('000300', 'asset_pool', _valid_qt_kwargs())
class TestHistoryPanel(unittest.TestCase):
def setUp(self):
print('start testing HistoryPanel object\n')
self.data = np.random.randint(10, size=(5, 10, 4))
self.index = pd.date_range(start='20200101', freq='d', periods=10)
self.index2 = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14']
self.index3 = '2016-07-01, 2016-07-04, 2016-07-05, 2016-07-06, 2016-07-07, ' \
'2016-07-08, 2016-07-11, 2016-07-12, 2016-07-13, 2016-07-14'
self.shares = '000100,000101,000102,000103,000104'
self.htypes = 'close,open,high,low'
self.data2 = np.random.randint(10, size=(10, 5))
self.data3 = np.random.randint(10, size=(10, 4))
self.data4 = np.random.randint(10, size=(10))
self.hp = qt.HistoryPanel(values=self.data, levels=self.shares, columns=self.htypes, rows=self.index)
self.hp2 = qt.HistoryPanel(values=self.data2, levels=self.shares, columns='close', rows=self.index)
self.hp3 = qt.HistoryPanel(values=self.data3, levels='000100', columns=self.htypes, rows=self.index2)
self.hp4 = qt.HistoryPanel(values=self.data4, levels='000100', columns='close', rows=self.index3)
self.hp5 = qt.HistoryPanel(values=self.data)
self.hp6 = qt.HistoryPanel(values=self.data, levels=self.shares, rows=self.index3)
def test_properties(self):
""" test all properties of HistoryPanel
"""
self.assertFalse(self.hp.is_empty)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.shape, (5, 10, 4))
self.assertSequenceEqual(self.hp.htypes, ['close', 'open', 'high', 'low'])
self.assertSequenceEqual(self.hp.shares, ['000100', '000101', '000102', '000103', '000104'])
self.assertSequenceEqual(list(self.hp.hdates), list(self.index))
self.assertDictEqual(self.hp.columns, {'close': 0, 'open': 1, 'high': 2, 'low': 3})
self.assertDictEqual(self.hp.levels, {'000100': 0, '000101': 1, '000102': 2, '000103': 3, '000104': 4})
row_dict = {Timestamp('2020-01-01 00:00:00', freq='D'): 0,
Timestamp('2020-01-02 00:00:00', freq='D'): 1,
Timestamp('2020-01-03 00:00:00', freq='D'): 2,
Timestamp('2020-01-04 00:00:00', freq='D'): 3,
Timestamp('2020-01-05 00:00:00', freq='D'): 4,
Timestamp('2020-01-06 00:00:00', freq='D'): 5,
Timestamp('2020-01-07 00:00:00', freq='D'): 6,
Timestamp('2020-01-08 00:00:00', freq='D'): 7,
Timestamp('2020-01-09 00:00:00', freq='D'): 8,
Timestamp('2020-01-10 00:00:00', freq='D'): 9}
self.assertDictEqual(self.hp.rows, row_dict)
def test_len(self):
""" test the function len(HistoryPanel)
:return:
"""
self.assertEqual(len(self.hp), 10)
def test_empty_history_panel(self):
"""测试空HP或者特殊HP如维度标签为纯数字的HP"""
test_hp = qt.HistoryPanel(self.data)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
self.assertEqual(test_hp.level_count, 5)
self.assertEqual(test_hp.row_count, 10)
self.assertEqual(test_hp.column_count, 4)
self.assertEqual(test_hp.shares, list(range(5)))
self.assertEqual(test_hp.hdates, list(pd.date_range(start='20200730', periods=10, freq='d')))
self.assertEqual(test_hp.htypes, list(range(4)))
self.assertTrue(np.allclose(test_hp.values, self.data))
print(f'shares: {test_hp.shares}\nhtypes: {test_hp.htypes}')
print(test_hp)
# HistoryPanel should be empty if no value is given
empty_hp = qt.HistoryPanel()
self.assertTrue(empty_hp.is_empty)
self.assertIsInstance(empty_hp, qt.HistoryPanel)
self.assertEqual(empty_hp.shape[0], 0)
self.assertEqual(empty_hp.shape[1], 0)
self.assertEqual(empty_hp.shape[2], 0)
self.assertEqual(empty_hp.level_count, 0)
self.assertEqual(empty_hp.row_count, 0)
self.assertEqual(empty_hp.column_count, 0)
# HistoryPanel should also be empty if empty value (np.array([])) is given
empty_hp = qt.HistoryPanel(np.empty((5, 0, 4)), levels=self.shares, columns=self.htypes)
self.assertTrue(empty_hp.is_empty)
self.assertIsInstance(empty_hp, qt.HistoryPanel)
self.assertEqual(empty_hp.shape[0], 0)
self.assertEqual(empty_hp.shape[1], 0)
self.assertEqual(empty_hp.shape[2], 0)
self.assertEqual(empty_hp.level_count, 0)
self.assertEqual(empty_hp.row_count, 0)
self.assertEqual(empty_hp.column_count, 0)
def test_create_history_panel(self):
""" test the creation of a HistoryPanel object by passing all data explicitly
"""
self.assertIsInstance(self.hp, qt.HistoryPanel)
self.assertEqual(self.hp.shape[0], 5)
self.assertEqual(self.hp.shape[1], 10)
self.assertEqual(self.hp.shape[2], 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(list(self.hp.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp2, qt.HistoryPanel)
self.assertEqual(self.hp2.shape[0], 5)
self.assertEqual(self.hp2.shape[1], 10)
self.assertEqual(self.hp2.shape[2], 1)
self.assertEqual(self.hp2.level_count, 5)
self.assertEqual(self.hp2.row_count, 10)
self.assertEqual(self.hp2.column_count, 1)
self.assertEqual(list(self.hp2.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp2.columns.keys()), ['close'])
self.assertEqual(list(self.hp2.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp3, qt.HistoryPanel)
self.assertEqual(self.hp3.shape[0], 1)
self.assertEqual(self.hp3.shape[1], 10)
self.assertEqual(self.hp3.shape[2], 4)
self.assertEqual(self.hp3.level_count, 1)
self.assertEqual(self.hp3.row_count, 10)
self.assertEqual(self.hp3.column_count, 4)
self.assertEqual(list(self.hp3.levels.keys()), ['000100'])
self.assertEqual(list(self.hp3.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp3.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.assertIsInstance(self.hp4, qt.HistoryPanel)
self.assertEqual(self.hp4.shape[0], 1)
self.assertEqual(self.hp4.shape[1], 10)
self.assertEqual(self.hp4.shape[2], 1)
self.assertEqual(self.hp4.level_count, 1)
self.assertEqual(self.hp4.row_count, 10)
self.assertEqual(self.hp4.column_count, 1)
self.assertEqual(list(self.hp4.levels.keys()), ['000100'])
self.assertEqual(list(self.hp4.columns.keys()), ['close'])
self.assertEqual(list(self.hp4.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.hp5.info()
self.assertIsInstance(self.hp5, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp5.values, self.data))
self.assertEqual(self.hp5.shape[0], 5)
self.assertEqual(self.hp5.shape[1], 10)
self.assertEqual(self.hp5.shape[2], 4)
self.assertEqual(self.hp5.level_count, 5)
self.assertEqual(self.hp5.row_count, 10)
self.assertEqual(self.hp5.column_count, 4)
self.assertEqual(list(self.hp5.levels.keys()), [0, 1, 2, 3, 4])
self.assertEqual(list(self.hp5.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp5.rows.keys())[0], pd.Timestamp('2020-07-30'))
self.hp6.info()
self.assertIsInstance(self.hp6, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp6.values, self.data))
self.assertEqual(self.hp6.shape[0], 5)
self.assertEqual(self.hp6.shape[1], 10)
self.assertEqual(self.hp6.shape[2], 4)
self.assertEqual(self.hp6.level_count, 5)
self.assertEqual(self.hp6.row_count, 10)
self.assertEqual(self.hp6.column_count, 4)
self.assertEqual(list(self.hp6.levels.keys()), ['000100', '000101', '000102', '000103', '000104'])
self.assertEqual(list(self.hp6.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp6.rows.keys())[0], pd.Timestamp('2016-07-01'))
print('test creating HistoryPanel with very limited data')
print('test creating HistoryPanel with 2D data')
temp_data = np.random.randint(10, size=(7, 3)).astype('float')
temp_hp = qt.HistoryPanel(temp_data)
# Error testing during HistoryPanel creating
# shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data,
levels=self.shares, columns='close', rows=self.index)
# valus is not np.ndarray
self.assertRaises(TypeError,
qt.HistoryPanel,
list(self.data))
# dimension/shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data2,
levels='000100', columns=self.htypes, rows=self.index)
# value dimension over 3
self.assertRaises(AssertionError,
qt.HistoryPanel,
np.random.randint(10, size=(5, 10, 4, 2)))
# lebel value not valid
self.assertRaises(ValueError,
qt.HistoryPanel,
self.data2,
levels=self.shares, columns='close',
rows='a,b,c,d,e,f,g,h,i,j')
def test_history_panel_slicing(self):
"""测试HistoryPanel的各种切片方法
包括通过标签名称切片,通过数字切片,通过逗号分隔的标签名称切片,通过冒号分隔的标签名称切片等切片方式"""
self.assertTrue(np.allclose(self.hp['close'], self.data[:, :, 0:1]))
self.assertTrue(np.allclose(self.hp['close,open'], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp[['close', 'open']], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp['close:high'], self.data[:, :, 0:3]))
self.assertTrue(np.allclose(self.hp['close,high'], self.data[:, :, [0, 2]]))
self.assertTrue(np.allclose(self.hp[:, '000100'], self.data[0:1, :, ]))
self.assertTrue(np.allclose(self.hp[:, '000100,000101'], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, ['000100', '000101']], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, '000100:000102'], self.data[0:3, :]))
self.assertTrue(np.allclose(self.hp[:, '000100,000102'], self.data[[0, 2], :]))
self.assertTrue(np.allclose(self.hp['close,open', '000100,000102'], self.data[[0, 2], :, 0:2]))
print('start testing HistoryPanel')
data = np.random.randint(10, size=(10, 5))
# index = pd.date_range(start='20200101', freq='d', periods=10)
shares = '000100,000101,000102,000103,000104'
dtypes = 'close'
df = pd.DataFrame(data)
print('=========================\nTesting HistoryPanel creation from DataFrame')
hp = qt.dataframe_to_hp(df=df, shares=shares, htypes=dtypes)
hp.info()
hp = qt.dataframe_to_hp(df=df, shares='000100', htypes='close, open, high, low, middle', column_type='htypes')
hp.info()
print('=========================\nTesting HistoryPanel creation from initialization')
data = np.random.randint(10, size=(5, 10, 4)).astype('float')
index = pd.date_range(start='20200101', freq='d', periods=10)
dtypes = 'close, open, high,low'
data[0, [5, 6, 9], [0, 1, 3]] = np.nan
data[1:4, [4, 7, 6, 2], [1, 1, 3, 0]] = np.nan
data[4:5, [2, 9, 1, 2], [0, 3, 2, 1]] = np.nan
hp = qt.HistoryPanel(data, levels=shares, columns=dtypes, rows=index)
hp.info()
print('==========================\n输出close类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close', :, :], data[:, :, 0:1], equal_nan=True))
print(f'==========================\n输出close和open类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1], :, :], data[:, :, 0:2], equal_nan=True))
print(f'==========================\n输出第一只股票的所有类型历史数据\n')
self.assertTrue(np.allclose(hp[:, [0], :], data[0:1, :, :], equal_nan=True))
print('==========================\n输出第0、1、2个htype对应的所有股票全部历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1, 2]], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[['close', 'high']], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出0、1两个htype的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1]], data[:, :, 0:2], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close,high'], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出close起到high止的三个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close:high'], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出0、1、3三个股票的全部历史数据\n')
self.assertTrue(np.allclose(hp[:, [0, 1, 3]], data[[0, 1, 3], :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, ['000100', '000102']], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出0、1、2三个股票的历史数据\n', hp[:, 0: 3])
self.assertTrue(np.allclose(hp[:, 0: 3], data[0:3, :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100, 000102'], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出所有股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, :, 0:8], data[:, 0:8, :], equal_nan=True))
print('==========================\n输出000100股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100', 0:8], data[0, 0:8, :], equal_nan=True))
print('==========================\nstart testing multy axis slicing of HistoryPanel object')
print('==========================\n输出000100、000120两只股票的close、open两组历史数据\n',
hp['close,open', ['000100', '000102']])
print('==========================\n输出000100、000120两只股票的close到open三组历史数据\n',
hp['close,open', '000100, 000102'])
print(f'historyPanel: hp:\n{hp}')
print(f'data is:\n{data}')
hp.htypes = 'open,high,low,close'
hp.info()
hp.shares = ['000300', '600227', '600222', '000123', '000129']
hp.info()
def test_segment(self):
"""测试历史数据片段的获取"""
test_hp = qt.HistoryPanel(self.data,
levels=self.shares,
columns=self.htypes,
rows=self.index2)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
print(f'Test segment with None parameters')
seg1 = test_hp.segment()
seg2 = test_hp.segment('20150202')
seg3 = test_hp.segment(end_date='20201010')
self.assertIsInstance(seg1, qt.HistoryPanel)
self.assertIsInstance(seg2, qt.HistoryPanel)
self.assertIsInstance(seg3, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp.values
))
self.assertTrue(np.allclose(
seg2.values, test_hp.values
))
self.assertTrue(np.allclose(
seg3.values, test_hp.values
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
self.assertEqual(seg2.htypes, test_hp.htypes)
self.assertEqual(seg2.shares, test_hp.shares)
self.assertEqual(seg3.htypes, test_hp.htypes)
self.assertEqual(seg3.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
self.assertEqual(seg2.hdates, test_hp.hdates)
self.assertEqual(seg3.hdates, test_hp.hdates)
print(f'Test segment with proper dates')
seg1 = test_hp.segment()
seg2 = test_hp.segment('20160704')
seg3 = test_hp.segment(start_date='2016-07-05',
end_date='20160708')
self.assertIsInstance(seg1, qt.HistoryPanel)
self.assertIsInstance(seg2, qt.HistoryPanel)
self.assertIsInstance(seg3, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp[:, :, :]
))
self.assertTrue(np.allclose(
seg2.values, test_hp[:, :, 1:10]
))
self.assertTrue(np.allclose(
seg3.values, test_hp[:, :, 2:6]
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
self.assertEqual(seg2.htypes, test_hp.htypes)
self.assertEqual(seg2.shares, test_hp.shares)
self.assertEqual(seg3.htypes, test_hp.htypes)
self.assertEqual(seg3.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
self.assertEqual(seg2.hdates, test_hp.hdates[1:10])
self.assertEqual(seg3.hdates, test_hp.hdates[2:6])
print(f'Test segment with non-existing but in range dates')
seg1 = test_hp.segment()
seg2 = test_hp.segment('20160703')
seg3 = test_hp.segment(start_date='2016-07-03',
end_date='20160710')
self.assertIsInstance(seg1, qt.HistoryPanel)
self.assertIsInstance(seg2, qt.HistoryPanel)
self.assertIsInstance(seg3, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp[:, :, :]
))
self.assertTrue(np.allclose(
seg2.values, test_hp[:, :, 1:10]
))
self.assertTrue(np.allclose(
seg3.values, test_hp[:, :, 1:6]
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
self.assertEqual(seg2.htypes, test_hp.htypes)
self.assertEqual(seg2.shares, test_hp.shares)
self.assertEqual(seg3.htypes, test_hp.htypes)
self.assertEqual(seg3.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
self.assertEqual(seg2.hdates, test_hp.hdates[1:10])
self.assertEqual(seg3.hdates, test_hp.hdates[1:6])
print(f'Test segment with out-of-range dates')
seg1 = test_hp.segment(start_date='2016-05-03',
end_date='20160910')
self.assertIsInstance(seg1, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp[:, :, :]
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
def test_slice(self):
"""测试历史数据切片的获取"""
test_hp = qt.HistoryPanel(self.data,
levels=self.shares,
columns=self.htypes,
rows=self.index2)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
print(f'Test slice with shares')
share = '000101'
slc = test_hp.slice(shares=share)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, ['000101'])
self.assertEqual(slc.htypes, test_hp.htypes)
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp[:, '000101']))
share = '000101, 000103'
slc = test_hp.slice(shares=share)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, ['000101', '000103'])
self.assertEqual(slc.htypes, test_hp.htypes)
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp[:, '000101, 000103']))
print(f'Test slice with htypes')
htype = 'open'
slc = test_hp.slice(htypes=htype)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, test_hp.shares)
self.assertEqual(slc.htypes, ['open'])
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp['open']))
htype = 'open, close'
slc = test_hp.slice(htypes=htype)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, test_hp.shares)
self.assertEqual(slc.htypes, ['open', 'close'])
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp['open, close']))
# test that slicing of "open, close" does NOT equal to "close, open"
self.assertFalse(np.allclose(slc.values, test_hp['close, open']))
print(f'Test slicing with both htypes and shares')
share = '000103, 000101'
htype = 'high, low, close'
slc = test_hp.slice(shares=share, htypes=htype)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, ['000103', '000101'])
self.assertEqual(slc.htypes, ['high', 'low', 'close'])
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp['high, low, close', '000103, 000101']))
print(f'Test Error cases')
# duplicated input
htype = 'open, close, open'
self.assertRaises(AssertionError, test_hp.slice, htypes=htype)
def test_relabel(self):
new_shares_list = ['000001', '000002', '000003', '000004', '000005']
new_shares_str = '000001, 000002, 000003, 000004, 000005'
new_htypes_list = ['close', 'volume', 'value', 'exchange']
new_htypes_str = 'close, volume, value, exchange'
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_list)
print(temp_hp.info())
print(temp_hp.htypes)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.htypes, temp_hp.htypes)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.shares, new_shares_list)
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_str)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.htypes, temp_hp.htypes)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.shares, new_shares_list)
temp_hp = self.hp.copy()
temp_hp.re_label(htypes=new_htypes_list)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.shares, temp_hp.shares)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.htypes, new_htypes_list)
temp_hp = self.hp.copy()
temp_hp.re_label(htypes=new_htypes_str)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.shares, temp_hp.shares)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.htypes, new_htypes_list)
print(f'test errors raising')
temp_hp = self.hp.copy()
self.assertRaises(AssertionError, temp_hp.re_label, htypes=new_shares_str)
self.assertRaises(TypeError, temp_hp.re_label, htypes=123)
self.assertRaises(AssertionError, temp_hp.re_label, htypes='wrong input!')
def test_csv_to_hp(self):
pass
def test_hdf_to_hp(self):
pass
def test_hp_join(self):
# TODO: 这里需要加强,需要用具体的例子确认hp_join的结果正确
# TODO: 尤其是不同的shares、htypes、hdates,以及它们在顺
# TODO: 序不同的情况下是否能正确地组合
print(f'join two simple HistoryPanels with same shares')
temp_hp = self.hp.join(self.hp2, same_shares=True)
self.assertIsInstance(temp_hp, qt.HistoryPanel)
def test_df_to_hp(self):
print(f'test converting DataFrame to HistoryPanel')
data = np.random.randint(10, size=(10, 5))
df1 = pd.DataFrame(data)
df2 = pd.DataFrame(data, columns=str_to_list(self.shares))
df3 = pd.DataFrame(data[:, 0:4])
df4 = pd.DataFrame(data[:, 0:4], columns=str_to_list(self.htypes))
hp = qt.dataframe_to_hp(df1, htypes='close')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, [0, 1, 2, 3, 4])
self.assertEqual(hp.htypes, ['close'])
self.assertEqual(hp.hdates, [pd.Timestamp('1970-01-01 00:00:00'),
pd.Timestamp('1970-01-01 00:00:00.000000001'),
pd.Timestamp('1970-01-01 00:00:00.000000002'),
pd.Timestamp('1970-01-01 00:00:00.000000003'),
pd.Timestamp('1970-01-01 00:00:00.000000004'),
pd.Timestamp('1970-01-01 00:00:00.000000005'),
pd.Timestamp('1970-01-01 00:00:00.000000006'),
pd.Timestamp('1970-01-01 00:00:00.000000007'),
pd.Timestamp('1970-01-01 00:00:00.000000008'),
pd.Timestamp('1970-01-01 00:00:00.000000009')])
hp = qt.dataframe_to_hp(df2, shares=self.shares, htypes='close')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, str_to_list(self.shares))
self.assertEqual(hp.htypes, ['close'])
hp = qt.dataframe_to_hp(df3, shares='000100', column_type='htypes')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, ['000100'])
self.assertEqual(hp.htypes, [0, 1, 2, 3])
hp = qt.dataframe_to_hp(df4, shares='000100', htypes=self.htypes, column_type='htypes')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, ['000100'])
self.assertEqual(hp.htypes, str_to_list(self.htypes))
hp.info()
self.assertRaises(KeyError, qt.dataframe_to_hp, df1)
def test_to_dataframe(self):
""" 测试HistoryPanel对象的to_dataframe方法
"""
print(f'START TEST == test_to_dataframe')
print(f'test converting test hp to dataframe with share == "000102":')
df_test = self.hp.to_dataframe(share='000102')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.htypes), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp[:, '000102'], values))
print(f'test DataFrame conversion with share == "000100"')
df_test = self.hp.to_dataframe(share='000100')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.htypes), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp[:, '000100'], values))
print(f'test DataFrame conversion error: type incorrect')
self.assertRaises(AssertionError, self.hp.to_dataframe, share=3.0)
print(f'test DataFrame error raising with share not found error')
self.assertRaises(KeyError, self.hp.to_dataframe, share='000300')
print(f'test DataFrame conversion with htype == "close"')
df_test = self.hp.to_dataframe(htype='close')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp['close'].T, values))
print(f'test DataFrame conversion with htype == "high"')
df_test = self.hp.to_dataframe(htype='high')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp['high'].T, values))
print(f'test DataFrame conversion with htype == "high" and dropna')
v = self.hp.values.astype('float')
v[:, 3, :] = np.nan
v[:, 4, :] = np.inf
test_hp = qt.HistoryPanel(v, levels=self.shares, columns=self.htypes, rows=self.index)
df_test = test_hp.to_dataframe(htype='high', dropna=True)
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates[:3]) + list(self.hp.hdates[4:]), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
target_values = test_hp['high'].T
target_values = target_values[np.where(~np.isnan(target_values))].reshape(9, 5)
self.assertTrue(np.allclose(target_values, values))
print(f'test DataFrame conversion with htype == "high", dropna and treat infs as na')
v = self.hp.values.astype('float')
v[:, 3, :] = np.nan
v[:, 4, :] = np.inf
test_hp = qt.HistoryPanel(v, levels=self.shares, columns=self.htypes, rows=self.index)
df_test = test_hp.to_dataframe(htype='high', dropna=True, inf_as_na=True)
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates[:3]) + list(self.hp.hdates[5:]), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
target_values = test_hp['high'].T
target_values = target_values[np.where(~np.isnan(target_values) & ~np.isinf(target_values))].reshape(8, 5)
self.assertTrue(np.allclose(target_values, values))
print(f'test DataFrame conversion error: type incorrect')
self.assertRaises(AssertionError, self.hp.to_dataframe, htype=pd.DataFrame())
print(f'test DataFrame error raising with share not found error')
self.assertRaises(KeyError, self.hp.to_dataframe, htype='non_type')
print(f'Raises ValueError when both or none parameter is given')
self.assertRaises(KeyError, self.hp.to_dataframe)
self.assertRaises(KeyError, self.hp.to_dataframe, share='000100', htype='close')
def test_to_df_dict(self):
"""测试HistoryPanel公有方法to_df_dict"""
print('test convert history panel slice by share')
df_dict = self.hp.to_df_dict('share')
self.assertEqual(self.hp.shares, list(df_dict.keys()))
df_dict = self.hp.to_df_dict()
self.assertEqual(self.hp.shares, list(df_dict.keys()))
print('test convert historypanel slice by htype ')
df_dict = self.hp.to_df_dict('htype')
self.assertEqual(self.hp.htypes, list(df_dict.keys()))
print('test raise assertion error')
self.assertRaises(AssertionError, self.hp.to_df_dict, by='random text')
self.assertRaises(AssertionError, self.hp.to_df_dict, by=3)
print('test empty hp')
df_dict = qt.HistoryPanel().to_df_dict('share')
self.assertEqual(df_dict, {})
def test_stack_dataframes(self):
print('test stack dataframes in a list')
df1 = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [2, 3, 4, 5], 'c': [3, 4, 5, 6]})
df1.index = ['20200101', '20200102', '20200103', '20200104']
df2 = pd.DataFrame({'b': [4, 3, 2, 1], 'd': [1, 1, 1, 1], 'c': [6, 5, 4, 3]})
df2.index = ['20200101', '20200102', '20200104', '20200105']
df3 = pd.DataFrame({'a': [6, 6, 6, 6], 'd': [4, 4, 4, 4], 'b': [2, 4, 6, 8]})
df3.index = ['20200101', '20200102', '20200103', '20200106']
values1 = np.array([[[1., 2., 3., np.nan],
[2., 3., 4., np.nan],
[3., 4., 5., np.nan],
[4., 5., 6., np.nan],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan]],
[[np.nan, 4., 6., 1.],
[np.nan, 3., 5., 1.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 2., 4., 1.],
[np.nan, 1., 3., 1.],
[np.nan, np.nan, np.nan, np.nan]],
[[6., 2., np.nan, 4.],
[6., 4., np.nan, 4.],
[6., 6., np.nan, 4.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[6., 8., np.nan, 4.]]])
values2 = np.array([[[1., np.nan, 6.],
[2., np.nan, 6.],
[3., np.nan, 6.],
[4., np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 6.]],
[[2., 4., 2.],
[3., 3., 4.],
[4., np.nan, 6.],
[5., 2., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 8.]],
[[3., 6., np.nan],
[4., 5., np.nan],
[5., np.nan, np.nan],
[6., 4., np.nan],
[np.nan, 3., np.nan],
[np.nan, np.nan, np.nan]],
[[np.nan, 1., 4.],
[np.nan, 1., 4.],
[np.nan, np.nan, 4.],
[np.nan, 1., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 4.]]])
print(df1.rename(index=pd.to_datetime))
print(df2.rename(index=pd.to_datetime))
print(df3.rename(index=pd.to_datetime))
hp1 = stack_dataframes([df1, df2, df3], stack_along='shares',
shares=['000100', '000200', '000300'])
hp2 = stack_dataframes([df1, df2, df3], stack_along='shares',
shares='000100, 000300, 000200')
print('hp1 is:\n', hp1)
print('hp2 is:\n', hp2)
self.assertEqual(hp1.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp1.shares, ['000100', '000200', '000300'])
self.assertTrue(np.allclose(hp1.values, values1, equal_nan=True))
self.assertEqual(hp2.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp2.shares, ['000100', '000300', '000200'])
self.assertTrue(np.allclose(hp2.values, values1, equal_nan=True))
hp3 = stack_dataframes([df1, df2, df3], stack_along='htypes',
htypes=['close', 'high', 'low'])
hp4 = stack_dataframes([df1, df2, df3], stack_along='htypes',
htypes='open, close, high')
print('hp3 is:\n', hp3.values)
print('hp4 is:\n', hp4.values)
self.assertEqual(hp3.htypes, ['close', 'high', 'low'])
self.assertEqual(hp3.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp3.values, values2, equal_nan=True))
self.assertEqual(hp4.htypes, ['open', 'close', 'high'])
self.assertEqual(hp4.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp4.values, values2, equal_nan=True))
print('test stack dataframes in a dict')
df1 = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [2, 3, 4, 5], 'c': [3, 4, 5, 6]})
df1.index = ['20200101', '20200102', '20200103', '20200104']
df2 = pd.DataFrame({'b': [4, 3, 2, 1], 'd': [1, 1, 1, 1], 'c': [6, 5, 4, 3]})
df2.index = ['20200101', '20200102', '20200104', '20200105']
df3 = pd.DataFrame({'a': [6, 6, 6, 6], 'd': [4, 4, 4, 4], 'b': [2, 4, 6, 8]})
df3.index = ['20200101', '20200102', '20200103', '20200106']
values1 = np.array([[[1., 2., 3., np.nan],
[2., 3., 4., np.nan],
[3., 4., 5., np.nan],
[4., 5., 6., np.nan],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan]],
[[np.nan, 4., 6., 1.],
[np.nan, 3., 5., 1.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 2., 4., 1.],
[np.nan, 1., 3., 1.],
[np.nan, np.nan, np.nan, np.nan]],
[[6., 2., np.nan, 4.],
[6., 4., np.nan, 4.],
[6., 6., np.nan, 4.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[6., 8., np.nan, 4.]]])
values2 = np.array([[[1., np.nan, 6.],
[2., np.nan, 6.],
[3., np.nan, 6.],
[4., np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 6.]],
[[2., 4., 2.],
[3., 3., 4.],
[4., np.nan, 6.],
[5., 2., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 8.]],
[[3., 6., np.nan],
[4., 5., np.nan],
[5., np.nan, np.nan],
[6., 4., np.nan],
[np.nan, 3., np.nan],
[np.nan, np.nan, np.nan]],
[[np.nan, 1., 4.],
[np.nan, 1., 4.],
[np.nan, np.nan, 4.],
[np.nan, 1., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 4.]]])
print(df1.rename(index=pd.to_datetime))
print(df2.rename(index=pd.to_datetime))
print(df3.rename(index=pd.to_datetime))
hp1 = stack_dataframes(dfs={'000001.SZ': df1, '000002.SZ': df2, '000003.SZ': df3},
stack_along='shares')
hp2 = stack_dataframes(dfs={'000001.SZ': df1, '000002.SZ': df2, '000003.SZ': df3},
stack_along='shares',
shares='000100, 000300, 000200')
print('hp1 is:\n', hp1)
print('hp2 is:\n', hp2)
self.assertEqual(hp1.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp1.shares, ['000001.SZ', '000002.SZ', '000003.SZ'])
self.assertTrue(np.allclose(hp1.values, values1, equal_nan=True))
self.assertEqual(hp2.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp2.shares, ['000100', '000300', '000200'])
self.assertTrue(np.allclose(hp2.values, values1, equal_nan=True))
hp3 = stack_dataframes(dfs={'close': df1, 'high': df2, 'low': df3},
stack_along='htypes')
hp4 = stack_dataframes(dfs={'close': df1, 'low': df2, 'high': df3},
stack_along='htypes',
htypes='open, close, high')
print('hp3 is:\n', hp3.values)
print('hp4 is:\n', hp4.values)
self.assertEqual(hp3.htypes, ['close', 'high', 'low'])
self.assertEqual(hp3.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp3.values, values2, equal_nan=True))
self.assertEqual(hp4.htypes, ['open', 'close', 'high'])
self.assertEqual(hp4.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp4.values, values2, equal_nan=True))
def test_to_csv(self):
pass
def test_to_hdf(self):
pass
def test_fill_na(self):
"""测试填充无效值"""
print(self.hp)
new_values = self.hp.values.astype(float)
new_values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]] = np.nan
print(new_values)
temp_hp = qt.HistoryPanel(values=new_values, levels=self.hp.levels, rows=self.hp.rows, columns=self.hp.columns)
self.assertTrue(np.allclose(temp_hp.values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]], np.nan, equal_nan=True))
temp_hp.fillna(2.3)
filled_values = new_values.copy()
filled_values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]] = 2.3
self.assertTrue(np.allclose(temp_hp.values,
filled_values, equal_nan=True))
def test_fill_inf(self):
"""测试填充无限值"""
def test_get_history_panel(self):
# TODO: implement this test case
# test get only one line of data
pass
def test_get_price_type_raw_data(self):
shares = '000039.SZ, 600748.SH, 000040.SZ'
start = '20200101'
end = '20200131'
htypes = 'open, high, low, close'
target_price_000039 = [[9.45, 9.49, 9.12, 9.17],
[9.46, 9.56, 9.4, 9.5],
[9.7, 9.76, 9.5, 9.51],
[9.7, 9.75, 9.7, 9.72],
[9.73, 9.77, 9.7, 9.73],
[9.83, 9.85, 9.71, 9.72],
[9.85, 9.85, 9.75, 9.79],
[9.96, 9.96, 9.83, 9.86],
[9.87, 9.94, 9.77, 9.93],
[9.82, 9.9, 9.76, 9.87],
[9.8, 9.85, 9.77, 9.82],
[9.84, 9.86, 9.71, 9.72],
[9.83, 9.93, 9.81, 9.86],
[9.7, 9.87, 9.7, 9.82],
[9.83, 9.86, 9.69, 9.79],
[9.8, 9.94, 9.8, 9.86]]
target_price_600748 = [[5.68, 5.68, 5.32, 5.37],
[5.62, 5.68, 5.46, 5.65],
[5.72, 5.72, 5.61, 5.62],
[5.76, 5.77, 5.6, 5.73],
[5.78, 5.84, 5.73, 5.75],
[5.89, 5.91, 5.76, 5.77],
[6.03, 6.04, 5.87, 5.89],
[5.94, 6.07, 5.94, 6.02],
[5.96, 5.98, 5.88, 5.97],
[6.04, 6.06, 5.95, 5.96],
[5.98, 6.04, 5.96, 6.03],
[6.1, 6.11, 5.89, 5.94],
[6.02, 6.12, 6., 6.1],
[5.96, 6.05, 5.88, 6.01],
[6.03, 6.03, 5.95, 5.99],
[6.02, 6.12, 5.99, 5.99]]
target_price_000040 = [[3.63, 3.83, 3.63, 3.65],
[3.99, 4.07, 3.97, 4.03],
[4.1, 4.11, 3.93, 3.95],
[4.12, 4.13, 4.06, 4.11],
[4.13, 4.19, 4.07, 4.13],
[4.27, 4.28, 4.11, 4.12],
[4.37, 4.38, 4.25, 4.29],
[4.34, 4.5, 4.32, 4.41],
[4.28, 4.35, 4.2, 4.34],
[4.41, 4.43, 4.29, 4.31],
[4.42, 4.45, 4.36, 4.41],
[4.51, 4.56, 4.33, 4.35],
[4.35, 4.55, 4.31, 4.55],
[4.3, 4.41, 4.22, 4.36],
[4.27, 4.44, 4.23, 4.34],
[4.23, 4.27, 4.18, 4.25]]
print(f'test get price type raw data with single thread')
df_list = get_price_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, freq='d')
self.assertIsInstance(df_list, dict)
self.assertEqual(len(df_list), 3)
self.assertTrue(np.allclose(df_list['000039.SZ'].values, np.array(target_price_000039)))
self.assertTrue(np.allclose(df_list['600748.SH'].values, np.array(target_price_600748)))
self.assertTrue(np.allclose(df_list['000040.SZ'].values, np.array(target_price_000040)))
print(f'in get financial report type raw data, got DataFrames: \n"000039.SZ":\n'
f'{df_list["000039.SZ"]}\n"600748.SH":\n'
f'{df_list["600748.SH"]}\n"000040.SZ":\n{df_list["000040.SZ"]}')
print(f'test get price type raw data with with multi threads')
df_list = get_price_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, freq='d', parallel=10)
self.assertIsInstance(df_list, dict)
self.assertEqual(len(df_list), 3)
self.assertTrue(np.allclose(df_list['000039.SZ'].values, np.array(target_price_000039)))
self.assertTrue(np.allclose(df_list['600748.SH'].values, np.array(target_price_600748)))
self.assertTrue(np.allclose(df_list['000040.SZ'].values, np.array(target_price_000040)))
print(f'in get financial report type raw data, got DataFrames: \n"000039.SZ":\n'
f'{df_list["000039.SZ"]}\n"600748.SH":\n'
f'{df_list["600748.SH"]}\n"000040.SZ":\n{df_list["000040.SZ"]}')
def test_get_financial_report_type_raw_data(self):
shares = '000039.SZ, 600748.SH, 000040.SZ'
start = '20160101'
end = '20201231'
htypes = 'eps,basic_eps,diluted_eps,total_revenue,revenue,total_share,' \
'cap_rese,undistr_porfit,surplus_rese,net_profit'
target_eps_000039 = [[1.41],
[0.1398],
[-0.0841],
[-0.1929],
[0.37],
[0.1357],
[0.1618],
[0.1191],
[1.11],
[0.759],
[0.3061],
[0.1409],
[0.81],
[0.4187],
[0.2554],
[0.1624],
[0.14],
[-0.0898],
[-0.1444],
[0.1291]]
target_eps_600748 = [[0.41],
[0.22],
[0.22],
[0.09],
[0.42],
[0.23],
[0.22],
[0.09],
[0.36],
[0.16],
[0.15],
[0.07],
[0.47],
[0.19],
[0.12],
[0.07],
[0.32],
[0.22],
[0.14],
[0.07]]
target_eps_000040 = [[-0.6866],
[-0.134],
[-0.189],
[-0.036],
[-0.6435],
[0.05],
[0.062],
[0.0125],
[0.8282],
[1.05],
[0.985],
[0.811],
[0.41],
[0.242],
[0.113],
[0.027],
[0.19],
[0.17],
[0.17],
[0.064]]
target_basic_eps_000039 = [[1.3980000e-01, 1.3980000e-01, 6.3591954e+10, 6.3591954e+10],
[-8.4100000e-02, -8.4100000e-02, 3.9431807e+10, 3.9431807e+10],
[-1.9290000e-01, -1.9290000e-01, 1.5852177e+10, 1.5852177e+10],
[3.7000000e-01, 3.7000000e-01, 8.5815341e+10, 8.5815341e+10],
[1.3570000e-01, 1.3430000e-01, 6.1660271e+10, 6.1660271e+10],
[1.6180000e-01, 1.6040000e-01, 4.2717729e+10, 4.2717729e+10],
[1.1910000e-01, 1.1900000e-01, 1.9099547e+10, 1.9099547e+10],
[1.1100000e+00, 1.1000000e+00, 9.3497622e+10, 9.3497622e+10],
[7.5900000e-01, 7.5610000e-01, 6.6906147e+10, 6.6906147e+10],
[3.0610000e-01, 3.0380000e-01, 4.3560398e+10, 4.3560398e+10],
[1.4090000e-01, 1.4050000e-01, 1.9253639e+10, 1.9253639e+10],
[8.1000000e-01, 8.1000000e-01, 7.6299930e+10, 7.6299930e+10],
[4.1870000e-01, 4.1710000e-01, 5.3962706e+10, 5.3962706e+10],
[2.5540000e-01, 2.5440000e-01, 3.3387152e+10, 3.3387152e+10],
[1.6240000e-01, 1.6200000e-01, 1.4675987e+10, 1.4675987e+10],
[1.4000000e-01, 1.4000000e-01, 5.1111652e+10, 5.1111652e+10],
[-8.9800000e-02, -8.9800000e-02, 3.4982614e+10, 3.4982614e+10],
[-1.4440000e-01, -1.4440000e-01, 2.3542843e+10, 2.3542843e+10],
[1.2910000e-01, 1.2860000e-01, 1.0412416e+10, 1.0412416e+10],
[7.2000000e-01, 7.1000000e-01, 5.8685804e+10, 5.8685804e+10]]
target_basic_eps_600748 = [[2.20000000e-01, 2.20000000e-01, 5.29423397e+09, 5.29423397e+09],
[2.20000000e-01, 2.20000000e-01, 4.49275653e+09, 4.49275653e+09],
[9.00000000e-02, 9.00000000e-02, 1.59067065e+09, 1.59067065e+09],
[4.20000000e-01, 4.20000000e-01, 8.86555586e+09, 8.86555586e+09],
[2.30000000e-01, 2.30000000e-01, 5.44850143e+09, 5.44850143e+09],
[2.20000000e-01, 2.20000000e-01, 4.34978927e+09, 4.34978927e+09],
[9.00000000e-02, 9.00000000e-02, 1.73793793e+09, 1.73793793e+09],
[3.60000000e-01, 3.60000000e-01, 8.66375241e+09, 8.66375241e+09],
[1.60000000e-01, 1.60000000e-01, 4.72875116e+09, 4.72875116e+09],
[1.50000000e-01, 1.50000000e-01, 3.76879016e+09, 3.76879016e+09],
[7.00000000e-02, 7.00000000e-02, 1.31785454e+09, 1.31785454e+09],
[4.70000000e-01, 4.70000000e-01, 7.23391685e+09, 7.23391685e+09],
[1.90000000e-01, 1.90000000e-01, 3.76072215e+09, 3.76072215e+09],
[1.20000000e-01, 1.20000000e-01, 2.35845364e+09, 2.35845364e+09],
[7.00000000e-02, 7.00000000e-02, 1.03831865e+09, 1.03831865e+09],
[3.20000000e-01, 3.20000000e-01, 6.48880919e+09, 6.48880919e+09],
[2.20000000e-01, 2.20000000e-01, 3.72209142e+09, 3.72209142e+09],
[1.40000000e-01, 1.40000000e-01, 2.22563924e+09, 2.22563924e+09],
[7.00000000e-02, 7.00000000e-02, 8.96647052e+08, 8.96647052e+08],
[4.80000000e-01, 4.80000000e-01, 6.61917508e+09, 6.61917508e+09]]
target_basic_eps_000040 = [[-1.34000000e-01, -1.34000000e-01, 2.50438755e+09, 2.50438755e+09],
[-1.89000000e-01, -1.89000000e-01, 1.32692347e+09, 1.32692347e+09],
[-3.60000000e-02, -3.60000000e-02, 5.59073338e+08, 5.59073338e+08],
[-6.43700000e-01, -6.43700000e-01, 6.80576162e+09, 6.80576162e+09],
[5.00000000e-02, 5.00000000e-02, 6.38891620e+09, 6.38891620e+09],
[6.20000000e-02, 6.20000000e-02, 5.23267082e+09, 5.23267082e+09],
[1.25000000e-02, 1.25000000e-02, 2.22420874e+09, 2.22420874e+09],
[8.30000000e-01, 8.30000000e-01, 8.67628947e+09, 8.67628947e+09],
[1.05000000e+00, 1.05000000e+00, 5.29431716e+09, 5.29431716e+09],
[9.85000000e-01, 9.85000000e-01, 3.56822382e+09, 3.56822382e+09],
[8.11000000e-01, 8.11000000e-01, 1.06613439e+09, 1.06613439e+09],
[4.10000000e-01, 4.10000000e-01, 8.13102532e+09, 8.13102532e+09],
[2.42000000e-01, 2.42000000e-01, 5.17971521e+09, 5.17971521e+09],
[1.13000000e-01, 1.13000000e-01, 3.21704120e+09, 3.21704120e+09],
[2.70000000e-02, 2.70000000e-02, 8.41966738e+08, 8.24272235e+08],
[1.90000000e-01, 1.90000000e-01, 3.77350171e+09, 3.77350171e+09],
[1.70000000e-01, 1.70000000e-01, 2.38643892e+09, 2.38643892e+09],
[1.70000000e-01, 1.70000000e-01, 1.29127117e+09, 1.29127117e+09],
[6.40000000e-02, 6.40000000e-02, 6.03256858e+08, 6.03256858e+08],
[1.30000000e-01, 1.30000000e-01, 1.66572918e+09, 1.66572918e+09]]
target_total_share_000039 = [[3.5950140e+09, 4.8005360e+09, 2.1573660e+10, 3.5823430e+09],
[3.5860750e+09, 4.8402300e+09, 2.0750827e+10, 3.5823430e+09],
[3.5860750e+09, 4.9053550e+09, 2.0791307e+10, 3.5823430e+09],
[3.5845040e+09, 4.8813110e+09, 2.1482857e+10, 3.5823430e+09],
[3.5831490e+09, 4.9764250e+09, 2.0926816e+10, 3.2825850e+09],
[3.5825310e+09, 4.8501270e+09, 2.1020418e+10, 3.2825850e+09],
[2.9851110e+09, 5.4241420e+09, 2.2438350e+10, 3.2825850e+09],
[2.9849890e+09, 4.1284000e+09, 2.2082769e+10, 3.2825850e+09],
[2.9849610e+09, 4.0838010e+09, 2.1045994e+10, 3.2815350e+09],
[2.9849560e+09, 4.2491510e+09, 1.9694345e+10, 3.2815350e+09],
[2.9846970e+09, 4.2351600e+09, 2.0016361e+10, 3.2815350e+09],
[2.9828890e+09, 4.2096630e+09, 1.9734494e+10, 3.2815350e+09],
[2.9813960e+09, 3.4564240e+09, 1.8562738e+10, 3.2793790e+09],
[2.9803530e+09, 3.0759650e+09, 1.8076208e+10, 3.2793790e+09],
[2.9792680e+09, 3.1376690e+09, 1.7994776e+10, 3.2793790e+09],
[2.9785770e+09, 3.1265850e+09, 1.7495053e+10, 3.2793790e+09],
[2.9783640e+09, 3.1343850e+09, 1.6740840e+10, 3.2035780e+09],
[2.9783590e+09, 3.1273880e+09, 1.6578389e+10, 3.2035780e+09],
[2.9782780e+09, 3.1169280e+09, 1.8047639e+10, 3.2035780e+09],
[2.9778200e+09, 3.1818630e+09, 1.7663145e+10, 3.2035780e+09]]
target_total_share_600748 = [[1.84456289e+09, 2.60058426e+09, 5.72443733e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.72096899e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.65738237e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.50257806e+09, 4.58026529e+08],
[1.84456289e+09, 2.59868164e+09, 5.16741523e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 5.14677280e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 4.94955591e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 4.79001451e+09, 4.44998882e+08],
[1.84456289e+09, 3.11401684e+09, 4.46326988e+09, 4.01064256e+08],
[1.84456289e+09, 3.11596723e+09, 4.45419136e+09, 4.01064256e+08],
[1.84456289e+09, 3.11596723e+09, 4.39652948e+09, 4.01064256e+08],
[1.84456289e+09, 3.18007783e+09, 4.26608403e+09, 4.01064256e+08],
[1.84456289e+09, 3.10935622e+09, 3.78417688e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.65806574e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.62063090e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.50063915e+09, 3.65651701e+08],
[1.41889453e+09, 3.55940850e+09, 3.22272993e+09, 3.62124939e+08],
[1.41889453e+09, 3.56129650e+09, 3.11477476e+09, 3.62124939e+08],
[1.41889453e+09, 3.59632888e+09, 3.06836903e+09, 3.62124939e+08],
[1.08337087e+09, 3.37400726e+07, 3.00918704e+09, 3.62124939e+08]]
target_total_share_000040 = [[1.48687387e+09, 1.06757900e+10, 8.31900755e+08, 2.16091994e+08],
[1.48687387e+09, 1.06757900e+10, 7.50177302e+08, 2.16091994e+08],
[1.48687387e+09, 1.06757899e+10, 9.90255974e+08, 2.16123282e+08],
[1.48687387e+09, 1.06757899e+10, 1.03109866e+09, 2.16091994e+08],
[1.48687387e+09, 1.06757910e+10, 2.07704745e+09, 2.16123282e+08],
[1.48687387e+09, 1.06757910e+10, 2.09608665e+09, 2.16123282e+08],
[1.48687387e+09, 1.06803833e+10, 2.13354083e+09, 2.16123282e+08],
[1.48687387e+09, 1.06804090e+10, 2.11489364e+09, 2.16123282e+08],
[1.33717327e+09, 8.87361727e+09, 2.42939924e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 2.34220254e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 2.16390368e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 1.07961915e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 8.58866066e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 6.87024393e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 5.71554565e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 5.54241222e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361726e+09, 5.10059576e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361726e+09, 4.59351639e+08, 1.88489589e+08],
[4.69593364e+08, 2.78355875e+08, 4.13430814e+08, 1.88489589e+08],
[4.69593364e+08, 2.74235459e+08, 3.83557678e+08, 1.88489589e+08]]
target_net_profit_000039 = [[np.nan],
[2.422180e+08],
[np.nan],
[2.510113e+09],
[np.nan],
[1.102220e+09],
[np.nan],
[4.068455e+09],
[np.nan],
[1.315957e+09],
[np.nan],
[3.158415e+09],
[np.nan],
[1.066509e+09],
[np.nan],
[7.349830e+08],
[np.nan],
[-5.411600e+08],
[np.nan],
[2.271961e+09]]
target_net_profit_600748 = [[np.nan],
[4.54341757e+08],
[np.nan],
[9.14476670e+08],
[np.nan],
[5.25360283e+08],
[np.nan],
[9.24502415e+08],
[np.nan],
[4.66560302e+08],
[np.nan],
[9.15265285e+08],
[np.nan],
[2.14639674e+08],
[np.nan],
[7.45093049e+08],
[np.nan],
[2.10967312e+08],
[np.nan],
[6.04572711e+08]]
target_net_profit_000040 = [[np.nan],
[-2.82458846e+08],
[np.nan],
[-9.57130872e+08],
[np.nan],
[9.22114527e+07],
[np.nan],
[1.12643819e+09],
[np.nan],
[1.31715269e+09],
[np.nan],
[5.39940093e+08],
[np.nan],
[1.51440838e+08],
[np.nan],
[1.75339071e+08],
[np.nan],
[8.04740415e+07],
[np.nan],
[6.20445815e+07]]
print('test get financial data, in multi thread mode')
df_list = get_financial_report_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, parallel=4)
self.assertIsInstance(df_list, tuple)
self.assertEqual(len(df_list), 4)
self.assertEqual(len(df_list[0]), 3)
self.assertEqual(len(df_list[1]), 3)
self.assertEqual(len(df_list[2]), 3)
self.assertEqual(len(df_list[3]), 3)
# 检查确认所有数据类型正确
self.assertTrue(all(isinstance(item, pd.DataFrame) for subdict in df_list for item in subdict.values()))
# 检查是否有空数据
print(all(item.empty for subdict in df_list for item in subdict.values()))
# 检查获取的每组数据正确,且所有数据的顺序一致, 如果取到空数据,则忽略
if df_list[0]['000039.SZ'].empty:
print(f'income data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000039.SZ'].values, target_basic_eps_000039))
if df_list[0]['600748.SH'].empty:
print(f'income data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['600748.SH'].values, target_basic_eps_600748))
if df_list[0]['000040.SZ'].empty:
print(f'income data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000040.SZ'].values, target_basic_eps_000040))
if df_list[1]['000039.SZ'].empty:
print(f'indicator data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000039.SZ'].values, target_eps_000039))
if df_list[1]['600748.SH'].empty:
print(f'indicator data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['600748.SH'].values, target_eps_600748))
if df_list[1]['000040.SZ'].empty:
print(f'indicator data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000040.SZ'].values, target_eps_000040))
if df_list[2]['000039.SZ'].empty:
print(f'balance data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000039.SZ'].values, target_total_share_000039))
if df_list[2]['600748.SH'].empty:
print(f'balance data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['600748.SH'].values, target_total_share_600748))
if df_list[2]['000040.SZ'].empty:
print(f'balance data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000040.SZ'].values, target_total_share_000040))
if df_list[3]['000039.SZ'].empty:
print(f'cash flow data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000039.SZ'].values, target_net_profit_000039, equal_nan=True))
if df_list[3]['600748.SH'].empty:
print(f'cash flow data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['600748.SH'].values, target_net_profit_600748, equal_nan=True))
if df_list[3]['000040.SZ'].empty:
print(f'cash flow data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000040.SZ'].values, target_net_profit_000040, equal_nan=True))
print('test get financial data, in single thread mode')
df_list = get_financial_report_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, parallel=0)
self.assertIsInstance(df_list, tuple)
self.assertEqual(len(df_list), 4)
self.assertEqual(len(df_list[0]), 3)
self.assertEqual(len(df_list[1]), 3)
self.assertEqual(len(df_list[2]), 3)
self.assertEqual(len(df_list[3]), 3)
# 检查确认所有数据类型正确
self.assertTrue(all(isinstance(item, pd.DataFrame) for subdict in df_list for item in subdict.values()))
# 检查是否有空数据,因为网络问题,有可能会取到空数据
self.assertFalse(all(item.empty for subdict in df_list for item in subdict.values()))
# 检查获取的每组数据正确,且所有数据的顺序一致, 如果取到空数据,则忽略
if df_list[0]['000039.SZ'].empty:
print(f'income data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000039.SZ'].values, target_basic_eps_000039))
if df_list[0]['600748.SH'].empty:
print(f'income data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['600748.SH'].values, target_basic_eps_600748))
if df_list[0]['000040.SZ'].empty:
print(f'income data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000040.SZ'].values, target_basic_eps_000040))
if df_list[1]['000039.SZ'].empty:
print(f'indicator data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000039.SZ'].values, target_eps_000039))
if df_list[1]['600748.SH'].empty:
print(f'indicator data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['600748.SH'].values, target_eps_600748))
if df_list[1]['000040.SZ'].empty:
print(f'indicator data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000040.SZ'].values, target_eps_000040))
if df_list[2]['000039.SZ'].empty:
print(f'balance data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000039.SZ'].values, target_total_share_000039))
if df_list[2]['600748.SH'].empty:
print(f'balance data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['600748.SH'].values, target_total_share_600748))
if df_list[2]['000040.SZ'].empty:
print(f'balance data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000040.SZ'].values, target_total_share_000040))
if df_list[3]['000039.SZ'].empty:
print(f'cash flow data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000039.SZ'].values, target_net_profit_000039, equal_nan=True))
if df_list[3]['600748.SH'].empty:
print(f'cash flow data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['600748.SH'].values, target_net_profit_600748, equal_nan=True))
if df_list[3]['000040.SZ'].empty:
print(f'cash flow data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000040.SZ'].values, target_net_profit_000040, equal_nan=True))
def test_get_composite_type_raw_data(self):
pass
class TestUtilityFuncs(unittest.TestCase):
def setUp(self):
pass
def test_time_string_format(self):
print('Testing qt.time_string_format() function:')
t = 3.14
self.assertEqual(time_str_format(t), '3s 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '3s ')
self.assertEqual(time_str_format(t, short_form=True), '3"140')
self.assertEqual(time_str_format(t, estimation=True, short_form=True), '3"')
t = 300.14
self.assertEqual(time_str_format(t), '5min 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '5min ')
self.assertEqual(time_str_format(t, short_form=True), "5'140")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "5'")
t = 7435.0014
self.assertEqual(time_str_format(t), '2hrs 3min 55s 1.4ms')
self.assertEqual(time_str_format(t, estimation=True), '2hrs ')
self.assertEqual(time_str_format(t, short_form=True), "2H3'55\"001")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "2H")
t = 88425.0509
self.assertEqual(time_str_format(t), '1days 33min 45s 50.9ms')
self.assertEqual(time_str_format(t, estimation=True), '1days ')
self.assertEqual(time_str_format(t, short_form=True), "1D33'45\"051")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "1D")
def test_str_to_list(self):
self.assertEqual(str_to_list('a,b,c,d,e'), ['a', 'b', 'c', 'd', 'e'])
self.assertEqual(str_to_list('a, b, c '), ['a', 'b', 'c'])
self.assertEqual(str_to_list('a, b: c', sep_char=':'), ['a,b', 'c'])
self.assertEqual(str_to_list('abc'), ['abc'])
self.assertEqual(str_to_list(''), [])
self.assertRaises(AssertionError, str_to_list, 123)
def test_list_or_slice(self):
str_dict = {'close': 0, 'open': 1, 'high': 2, 'low': 3}
self.assertEqual(list_or_slice(slice(1, 2, 1), str_dict), slice(1, 2, 1))
self.assertEqual(list_or_slice('open', str_dict), [1])
self.assertEqual(list(list_or_slice('close, high, low', str_dict)), [0, 2, 3])
self.assertEqual(list(list_or_slice('close:high', str_dict)), [0, 1, 2])
self.assertEqual(list(list_or_slice(['open'], str_dict)), [1])
self.assertEqual(list(list_or_slice(['open', 'high'], str_dict)), [1, 2])
self.assertEqual(list(list_or_slice(0, str_dict)), [0])
self.assertEqual(list(list_or_slice([0, 2], str_dict)), [0, 2])
self.assertEqual(list(list_or_slice([True, False, True, False], str_dict)), [0, 2])
def test_labels_to_dict(self):
target_list = [0, 1, 10, 100]
target_dict = {'close': 0, 'open': 1, 'high': 2, 'low': 3}
target_dict2 = {'close': 0, 'open': 2, 'high': 1, 'low': 3}
self.assertEqual(labels_to_dict('close, open, high, low', target_list), target_dict)
self.assertEqual(labels_to_dict(['close', 'open', 'high', 'low'], target_list), target_dict)
self.assertEqual(labels_to_dict('close, high, open, low', target_list), target_dict2)
self.assertEqual(labels_to_dict(['close', 'high', 'open', 'low'], target_list), target_dict2)
def test_input_to_list(self):
""" test util function input_to_list()"""
self.assertEqual(input_to_list(5, 3), [5, 5, 5])
self.assertEqual(input_to_list(5, 3, 0), [5, 5, 5])
self.assertEqual(input_to_list([5], 3, 0), [5, 0, 0])
self.assertEqual(input_to_list([5, 4], 3, 0), [5, 4, 0])
def test_regulate_date_format(self):
self.assertEqual(regulate_date_format('2019/11/06'), '20191106')
self.assertEqual(regulate_date_format('2019-11-06'), '20191106')
self.assertEqual(regulate_date_format('20191106'), '20191106')
self.assertEqual(regulate_date_format('191106'), '20061119')
self.assertEqual(regulate_date_format('830522'), '19830522')
self.assertEqual(regulate_date_format(datetime.datetime(2010, 3, 15)), '20100315')
self.assertEqual(regulate_date_format(pd.Timestamp('2010.03.15')), '20100315')
self.assertRaises(ValueError, regulate_date_format, 'abc')
self.assertRaises(ValueError, regulate_date_format, '2019/13/43')
def test_list_to_str_format(self):
self.assertEqual(list_to_str_format(['close', 'open', 'high', 'low']),
'close,open,high,low')
self.assertEqual(list_to_str_format(['letters', ' ', '123 4', 123, ' kk l']),
'letters,,1234,kkl')
self.assertEqual(list_to_str_format('a string input'),
'a,string,input')
self.assertEqual(list_to_str_format('already,a,good,string'),
'already,a,good,string')
self.assertRaises(AssertionError, list_to_str_format, 123)
def test_is_trade_day(self):
"""test if the funcion maybe_trade_day() and is_market_trade_day() works properly
"""
date_trade = '20210401'
date_holiday = '20210102'
date_weekend = '20210424'
date_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertTrue(maybe_trade_day(date_trade))
self.assertFalse(maybe_trade_day(date_holiday))
self.assertFalse(maybe_trade_day(date_weekend))
self.assertTrue(maybe_trade_day(date_seems_trade_day))
self.assertTrue(maybe_trade_day(date_too_early))
self.assertTrue(maybe_trade_day(date_too_late))
self.assertTrue(maybe_trade_day(date_christmas))
self.assertTrue(is_market_trade_day(date_trade))
self.assertFalse(is_market_trade_day(date_holiday))
self.assertFalse(is_market_trade_day(date_weekend))
self.assertFalse(is_market_trade_day(date_seems_trade_day))
self.assertFalse(is_market_trade_day(date_too_early))
self.assertFalse(is_market_trade_day(date_too_late))
self.assertTrue(is_market_trade_day(date_christmas))
self.assertFalse(is_market_trade_day(date_christmas, exchange='XHKG'))
date_trade = pd.to_datetime('20210401')
date_holiday = pd.to_datetime('20210102')
date_weekend = pd.to_datetime('20210424')
self.assertTrue(maybe_trade_day(date_trade))
self.assertFalse(maybe_trade_day(date_holiday))
self.assertFalse(maybe_trade_day(date_weekend))
def test_weekday_name(self):
""" test util func weekday_name()"""
self.assertEqual(weekday_name(0), 'Monday')
self.assertEqual(weekday_name(1), 'Tuesday')
self.assertEqual(weekday_name(2), 'Wednesday')
self.assertEqual(weekday_name(3), 'Thursday')
self.assertEqual(weekday_name(4), 'Friday')
self.assertEqual(weekday_name(5), 'Saturday')
self.assertEqual(weekday_name(6), 'Sunday')
def test_list_truncate(self):
""" test util func list_truncate()"""
l = [1,2,3,4,5]
ls = list_truncate(l, 2)
self.assertEqual(ls[0], [1, 2])
self.assertEqual(ls[1], [3, 4])
self.assertEqual(ls[2], [5])
self.assertRaises(AssertionError, list_truncate, l, 0)
self.assertRaises(AssertionError, list_truncate, 12, 0)
self.assertRaises(AssertionError, list_truncate, 0, l)
def test_maybe_trade_day(self):
""" test util function maybe_trade_day()"""
self.assertTrue(maybe_trade_day('20220104'))
self.assertTrue(maybe_trade_day('2021-12-31'))
self.assertTrue(maybe_trade_day(pd.to_datetime('2020/03/06')))
self.assertFalse(maybe_trade_day('2020-01-01'))
self.assertFalse(maybe_trade_day('2020/10/06'))
self.assertRaises(TypeError, maybe_trade_day, 'aaa')
def test_prev_trade_day(self):
"""test the function prev_trade_day()
"""
date_trade = '20210401'
date_holiday = '20210102'
prev_holiday = pd.to_datetime(date_holiday) - pd.Timedelta(2, 'd')
date_weekend = '20210424'
prev_weekend = pd.to_datetime(date_weekend) - pd.Timedelta(1, 'd')
date_seems_trade_day = '20210217'
prev_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertEqual(pd.to_datetime(prev_trade_day(date_trade)),
pd.to_datetime(date_trade))
self.assertEqual(pd.to_datetime(prev_trade_day(date_holiday)),
pd.to_datetime(prev_holiday))
self.assertEqual(pd.to_datetime(prev_trade_day(date_weekend)),
pd.to_datetime(prev_weekend))
self.assertEqual(pd.to_datetime(prev_trade_day(date_seems_trade_day)),
pd.to_datetime(prev_seems_trade_day))
self.assertEqual(pd.to_datetime(prev_trade_day(date_too_early)),
pd.to_datetime(date_too_early))
self.assertEqual(pd.to_datetime(prev_trade_day(date_too_late)),
pd.to_datetime(date_too_late))
self.assertEqual(pd.to_datetime(prev_trade_day(date_christmas)),
pd.to_datetime(date_christmas))
def test_next_trade_day(self):
""" test the function next_trade_day()
"""
date_trade = '20210401'
date_holiday = '20210102'
next_holiday = pd.to_datetime(date_holiday) + pd.Timedelta(2, 'd')
date_weekend = '20210424'
next_weekend = pd.to_datetime(date_weekend) + pd.Timedelta(2, 'd')
date_seems_trade_day = '20210217'
next_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertEqual(pd.to_datetime(next_trade_day(date_trade)),
pd.to_datetime(date_trade))
self.assertEqual(pd.to_datetime(next_trade_day(date_holiday)),
pd.to_datetime(next_holiday))
self.assertEqual(pd.to_datetime(next_trade_day(date_weekend)),
pd.to_datetime(next_weekend))
self.assertEqual(pd.to_datetime(next_trade_day(date_seems_trade_day)),
pd.to_datetime(next_seems_trade_day))
self.assertEqual(pd.to_datetime(next_trade_day(date_too_early)),
pd.to_datetime(date_too_early))
self.assertEqual(pd.to_datetime(next_trade_day(date_too_late)),
pd.to_datetime(date_too_late))
self.assertEqual(pd.to_datetime(next_trade_day(date_christmas)),
pd.to_datetime(date_christmas))
def test_prev_market_trade_day(self):
""" test the function prev_market_trade_day()
"""
date_trade = '20210401'
date_holiday = '20210102'
prev_holiday = pd.to_datetime(date_holiday) - pd.Timedelta(2, 'd')
date_weekend = '20210424'
prev_weekend = pd.to_datetime(date_weekend) - pd.Timedelta(1, 'd')
date_seems_trade_day = '20210217'
prev_seems_trade_day = pd.to_datetime(date_seems_trade_day) - pd.Timedelta(7, 'd')
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
prev_christmas_xhkg = '20201224'
self.assertEqual(pd.to_datetime(prev_market_trade_day(date_trade)),
pd.to_datetime(date_trade))
self.assertEqual(pd.to_datetime(prev_market_trade_day(date_holiday)),
pd.to_datetime(prev_holiday))
self.assertEqual(pd.to_datetime(prev_market_trade_day(date_weekend)),
pd.to_datetime(prev_weekend))
self.assertEqual(pd.to_datetime(prev_market_trade_day(date_seems_trade_day)),
pd.to_datetime(prev_seems_trade_day))
self.assertEqual(pd.to_datetime(prev_market_trade_day(date_too_early)),
None)
self.assertEqual(pd.to_datetime(prev_market_trade_day(date_too_late)),
None)
self.assertEqual(pd.to_datetime(prev_market_trade_day(date_christmas, 'SSE')),
pd.to_datetime(date_christmas))
self.assertEqual(pd.to_datetime(prev_market_trade_day(date_christmas, 'XHKG')),
pd.to_datetime(prev_christmas_xhkg))
def test_next_market_trade_day(self):
""" test the function next_market_trade_day()
"""
date_trade = '20210401'
date_holiday = '20210102'
next_holiday = pd.to_datetime(date_holiday) + pd.Timedelta(2, 'd')
date_weekend = '20210424'
next_weekend = pd.to_datetime(date_weekend) + pd.Timedelta(2, 'd')
date_seems_trade_day = '20210217'
next_seems_trade_day =
|
pd.to_datetime(date_seems_trade_day)
|
pandas.to_datetime
|
from datetime import datetime, timedelta
import time
import requests
import json
from matplotlib.pylab import date2num
from matplotlib import pyplot as plt
import mpl_finance as mpf
from pandas import DataFrame
import talib as ta
import numpy as np
import sys
sys.path.append('..')
import DictCode as dc
plt.rcParams['font.family'] = 'sans-serif' #用来正常显示中文
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示负号
def get_candles_data(url,contractSize):
print(url)
response = requests.get(url)
data_arr = response.text.replace("[[",'').replace("]]",'').replace("\"","").split("],[")
close = []
high = []
low = []
tradeTime = []
for item_str in reversed(data_arr):
item = item_str.split(",")
sdatetime_num = date2num(datetime.strptime(item[0].replace("T",' ').replace('.000Z',''),'%Y-%m-%d'))
# datas = (sdatetime_num,float(item[1]),float(item[2]),float(item[3]),float(item[4])) # 按照 candlestick_ohlc 要求的数据结构准备数据
# quotes.append(datas)
tradeTime.append(sdatetime_num)
high.append(float(item[2])*contractSize)
low.append(float(item[3])*contractSize)
close.append(float(item[4])*contractSize)
dt_dict = {'tradeTime':tradeTime,
'high':high,
'low':low,
'close':close}
data_df =
|
DataFrame(dt_dict)
|
pandas.DataFrame
|
from unittest import TestCase
import pandas as pd
import numpy as np
import pandas_validator as pv
class DataFrameValidatorFixture(pv.DataFrameValidator):
"""Fixture for testing the validation of column type."""
integer_field = pv.IntegerColumnValidator('i')
float_field = pv.FloatColumnValidator('f')
class DataFrameValidatorTest(TestCase):
"""Testing the validation of column type."""
def setUp(self):
self.validator = DataFrameValidatorFixture()
def test_valid(self):
df = pd.DataFrame({'i': [0, 1], 'f': [0., 1.]})
self.assertTrue(self.validator.is_valid(df))
def test_invalid_when_given_integer_series_to_float_column_validator(self):
df = pd.DataFrame({'i': [0, 1], 'f': [0, 1]})
self.assertFalse(self.validator.is_valid(df))
class DataFrameValidatorFixtureWithSize(pv.DataFrameValidator):
"""Fixture for testing the validation of column and row number."""
row_num = 3
column_num = 2
class DataFrameValidatorSizeTest(TestCase):
"""Testing the validation of column and row number."""
def setUp(self):
self.validator = DataFrameValidatorFixtureWithSize()
def test_valid_when_matches_row_numbers(self):
df = pd.DataFrame({'x': [0, 1, 2], 'y': [1., 2., 3.]})
self.assertTrue(self.validator.is_valid(df))
def test_invalid_when_not_matches_row_numbers(self):
df = pd.DataFrame({'x': [0, 1], 'y': [1., 2.]})
self.assertFalse(self.validator.is_valid(df))
def test_invalid_when_not_matches_column_numbers(self):
df = pd.DataFrame({'x': [0, 1, 2], 'y': [1., 2., 3.], 'z': [1, 2, 3]})
self.assertFalse(self.validator.is_valid(df))
class DataFrameValidatorFixtureWithIndex(pv.DataFrameValidator):
"""Fixture for testing the validation of index validator."""
index = pv.IndexValidator(size=3, type=np.int64)
class DataFrameValidatorIndexTest(TestCase):
"""Testing the validation of index size and type."""
def setUp(self):
self.validator = DataFrameValidatorFixtureWithIndex()
def test_valid_when_matches_index_size_and_type(self):
df = pd.DataFrame([0, 1, 2])
self.assertTrue(self.validator.is_valid(df))
def test_invalid_when_not_matches_index_size(self):
df = pd.DataFrame([0, 1, 2, 3])
self.assertFalse(self.validator.is_valid(df))
def test_invalid_when_not_matches_index_type(self):
df = pd.DataFrame([0, 1, 2], index=['a', 'b', 'c'])
self.assertFalse(self.validator.is_valid(df))
class DataFrameValidatorFixtureWithColumns(pv.DataFrameValidator):
"""Fixture for testing the validation of columns validator."""
columns = pv.ColumnsValidator(size=2, type=np.object_)
class DataFrameValidatorColumnsIndexTest(TestCase):
"""Testing the validation of columns size and type"""
def setUp(self):
self.validator = DataFrameValidatorFixtureWithColumns()
def test_valid_when_matches_columns_size_and_type(self):
df = pd.DataFrame({'x': [0, 1, 2], 'y': [1., 2., 3.]})
self.assertTrue(self.validator.is_valid(df))
def test_invalid_when_not_matches_columns_size(self):
df = pd.DataFrame({'x': [0, 1, 2], 'y': [1., 2., 3.], 'z': [1, 2, 3]})
self.assertFalse(self.validator.is_valid(df))
def test_invalid_when_not_matches_columns_type(self):
df =
|
pd.DataFrame([[0, 1, 2], [1., 2., 3.]])
|
pandas.DataFrame
|
import pandas as pd
import json
import bids
import matplotlib.pyplot as plt
import plotje
# Download data from here: <NAME>. et al. Crowdsourced MRI quality metrics
# and expert quality annotations for training of humans and machines. Sci Data 6, 30 (2019).
# Then run make_distributions.py to summarize the data from this snapshot
summary_path = './data/summary/bold_curated'
dataset = '/home/william/datasets/es-fmri_v2/'
dfd = pd.read_csv(summary_path + qc + '_summary.csv', index_col=[0])
layout = bids.BIDSLayout(dataset)
layout.add_derivatives(dataset + '/derivatives/')
layout = layout.to_df()
keeprow = []
for i, n in layout.iterrows():
if 'mriqc_output' in n['path'] and n['path'].endswith('.json'):
keeprow.append(i)
layout = layout.loc[keeprow]
layout_bold = layout[layout['suffix'] == 'bold']
params = [('pre', 'rest', 'preop'),
('es', 'es', 'postop')]
qcmet = {}
qcdesc = {}
for p in params:
qcmet[p[0]] = {}
qcdesc[p[0]] = {}
for n,_ in dfd.iteritems():
qcmet[p[0]][n] = []
layout_tmp = layout[layout['task'] == p[1]]
layout_tmp = layout_tmp[layout_tmp['session'] == p[2]]
for _, f in layout_tmp.iterrows():
with open(f['path']) as json_data:
d = json.load(json_data)
for n,_ in dfd.iteritems():
qcmet[p[0]][n].append(d[n])
for n,_ in dfd.iteritems():
qcdesc[p[0]][n] =
|
pd.Series(qcmet[p[0]][n])
|
pandas.Series
|
"""
Momentum module containing methods to generate momentum features
including RSI and rolling price return rank (monthly and yearly)
"""
import pandas as pd
from talib import RSI
import numpy as np
import matplotlib.pyplot as plt
import math
START_DATE = '2011-01-03'
END_DATE = '2019-04-03'
RANK_RECALCULATE = 1
YEARLY_TRADING_DAYS = 252
MONTHLY_TRADING_DAYS = 21
def get_stock_rsi_daily(time_series_df, ticker):
"""
compute rolling RSI of stock prices using talib
"""
close = get_time_series_adjusted_close(time_series_df, ticker)
rsi = RSI(close, timeperiod=20)
rsi_series = pd.Series(rsi)
tmpDf = pd.DataFrame(data=rsi_series, columns=['RSI'])
time_series_df.loc[ticker, 'RSI'] = tmpDf['RSI'].values
return time_series_df
def get_stock_percent_off_52_week_high():
pass
def update_rank_dataframe(df, stock_returns, period, date):
"""
For each stock trading day in the data range, update the
rolling return rank based on the new monthly & yearly
percent change value
"""
stock_period = str(period) + "_Return"
rank_period = str(period) + "_Return_Rank"
df_tmp = df.reset_index(level=0)
returns_df = pd.DataFrame.from_dict(stock_returns, orient='index', columns=[stock_period])
returns_df.sort_values(by=[stock_period], ascending=False, inplace=True)
returns_df.reset_index(level=0, inplace=True)
returns_df[period] = returns_df.index
returns_df.columns = ['Symbol', stock_period, rank_period]
daily_adjusted_rank_df = pd.DataFrame()
daily_adjusted_rank_df = pd.merge(df_tmp.loc[date], returns_df, on='Symbol', how='left')
daily_adjusted_rank_df['Date'] = date
daily_adjusted_rank_df.set_index(['Symbol', 'Date'], inplace=True)
return daily_adjusted_rank_df
def update_with_null_return_rankings(df, stocks_dict, period, date):
"""
Since the early dates do not have enough data to compute monthly/yearly
percent chages, copy the data frame values
"""
stock_period = str(period) + "_Return"
rank_period = str(period) + "_Return_Rank"
df_tmp = df.reset_index(level=0)
daily_rank_df = pd.DataFrame.from_dict(stocks_dict, orient='index', columns=[stock_period])
daily_rank_df.reset_index(level=0, inplace=True)
daily_rank_df[rank_period] = np.nan
daily_rank_df.columns = ['Symbol', stock_period, rank_period]
updated_daily_subset_df = pd.DataFrame()
updated_daily_subset_df = pd.merge(df_tmp.loc[date], daily_rank_df, on='Symbol', how='left')
updated_daily_subset_df['Date'] = date
updated_daily_subset_df.set_index(['Symbol', 'Date'], inplace=True)
return updated_daily_subset_df
def get_daily_adjusted_stock_return_rankings(df, ticker_list, date_list):
"""
The input df dataframe must contain monthly & yearly stock percent changes
to compute a rolling return rank updated daily
"""
global yearl_rank_df, monthly_rank_df
yearly_rank_df = pd.DataFrame()
monthly_rank_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.generic import ABCIndexClass
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_float, is_float_dtype, is_integer, is_scalar
from pandas.core.arrays import IntegerArray, integer_array
from pandas.core.arrays.integer import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
from pandas.tests.extension.base import BaseOpsUtil
def make_data():
return list(range(8)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100]
@pytest.fixture(
params=[
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
]
)
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
return integer_array(make_data(), dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return integer_array([np.nan, 1], dtype=dtype)
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
"""Parametrized fixture giving 'data' and 'data_missing'"""
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
def test_dtypes(dtype):
# smoke tests on auto dtype construction
if dtype.is_signed_integer:
assert np.dtype(dtype.type).kind == "i"
else:
assert np.dtype(dtype.type).kind == "u"
assert dtype.name is not None
@pytest.mark.parametrize(
"dtype, expected",
[
(Int8Dtype(), "Int8Dtype()"),
(Int16Dtype(), "Int16Dtype()"),
(Int32Dtype(), "Int32Dtype()"),
(Int64Dtype(), "Int64Dtype()"),
(UInt8Dtype(), "UInt8Dtype()"),
(UInt16Dtype(), "UInt16Dtype()"),
(UInt32Dtype(), "UInt32Dtype()"),
(UInt64Dtype(), "UInt64Dtype()"),
],
)
def test_repr_dtype(dtype, expected):
assert repr(dtype) == expected
def test_repr_array():
result = repr(integer_array([1, None, 3]))
expected = "<IntegerArray>\n[1, <NA>, 3]\nLength: 3, dtype: Int64"
assert result == expected
def test_repr_array_long():
data = integer_array([1, 2, None] * 1000)
expected = (
"<IntegerArray>\n"
"[ 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>, 1,\n"
" ...\n"
" <NA>, 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>]\n"
"Length: 3000, dtype: Int64"
)
result = repr(data)
assert result == expected
class TestConstructors:
def test_uses_pandas_na(self):
a = pd.array([1, None], dtype=pd.Int64Dtype())
assert a[1] is pd.NA
def test_from_dtype_from_float(self, data):
# construct from our dtype & string dtype
dtype = data.dtype
# from float
expected = pd.Series(data)
result = pd.Series(
data.to_numpy(na_value=np.nan, dtype="float"), dtype=str(dtype)
)
tm.assert_series_equal(result, expected)
# from int / list
expected = pd.Series(data)
result = pd.Series(np.array(data).tolist(), dtype=str(dtype))
tm.assert_series_equal(result, expected)
# from int / array
expected = pd.Series(data).dropna().reset_index(drop=True)
dropped = np.array(data.dropna()).astype(np.dtype((dtype.type)))
result = pd.Series(dropped, dtype=str(dtype))
tm.assert_series_equal(result, expected)
class TestArithmeticOps(BaseOpsUtil):
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
def _check_op(self, s, op_name, other, exc=None):
op = self.get_op_from_name(op_name)
result = op(s, other)
# compute expected
mask = s.isna()
# if s is a DataFrame, squeeze to a Series
# for comparison
if isinstance(s, pd.DataFrame):
result = result.squeeze()
s = s.squeeze()
mask = mask.squeeze()
# other array is an Integer
if isinstance(other, IntegerArray):
omask = getattr(other, "mask", None)
mask = getattr(other, "data", other)
if omask is not None:
mask |= omask
# 1 ** na is na, so need to unmask those
if op_name == "__pow__":
mask = np.where(~s.isna() & (s == 1), False, mask)
elif op_name == "__rpow__":
other_is_one = other == 1
if isinstance(other_is_one, pd.Series):
other_is_one = other_is_one.fillna(False)
mask = np.where(other_is_one, False, mask)
# float result type or float op
if (
is_float_dtype(other)
or is_float(other)
or op_name in ["__rtruediv__", "__truediv__", "__rdiv__", "__div__"]
):
rs = s.astype("float")
expected = op(rs, other)
self._check_op_float(result, expected, mask, s, op_name, other)
# integer result type
else:
rs = pd.Series(s.values._data, name=s.name)
expected = op(rs, other)
self._check_op_integer(result, expected, mask, s, op_name, other)
def _check_op_float(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in float dtypes
expected[mask] = np.nan
if "floordiv" in op_name:
# Series op sets 1//0 to np.inf, which IntegerArray does not do (yet)
mask2 = np.isinf(expected) & np.isnan(result)
expected[mask2] = np.nan
tm.assert_series_equal(result, expected)
def _check_op_integer(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in integer dtypes
# to compare properly, we convert the expected
# to float, mask to nans and convert infs
# if we have uints then we process as uints
# then convert to float
# and we ultimately want to create a IntArray
# for comparisons
fill_value = 0
# mod/rmod turn floating 0 into NaN while
# integer works as expected (no nan)
if op_name in ["__mod__", "__rmod__"]:
if is_scalar(other):
if other == 0:
expected[s.values == 0] = 0
else:
expected = expected.fillna(0)
else:
expected[
(s.values == 0).fillna(False)
& ((expected == 0).fillna(False) | expected.isna())
] = 0
try:
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
except ValueError:
expected = expected.astype(float)
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
expected[mask] = pd.NA
# assert that the expected astype is ok
# (skip for unsigned as they have wrap around)
if not s.dtype.is_unsigned_integer:
original = pd.Series(original)
# we need to fill with 0's to emulate what an astype('int') does
# (truncation) for certain ops
if op_name in ["__rtruediv__", "__rdiv__"]:
mask |= original.isna()
original = original.fillna(0).astype("int")
original = original.astype("float")
original[mask] = np.nan
tm.assert_series_equal(original, expected.astype("float"))
# assert our expected result
tm.assert_series_equal(result, expected)
def test_arith_integer_array(self, data, all_arithmetic_operators):
# we operate with a rhs of an integer array
op = all_arithmetic_operators
s = pd.Series(data)
rhs = pd.Series([1] * len(data), dtype=data.dtype)
rhs.iloc[-1] = np.nan
self._check_op(s, op, rhs)
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# scalar
op = all_arithmetic_operators
s = pd.Series(data)
self._check_op(s, op, 1, exc=TypeError)
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op = all_arithmetic_operators
df = pd.DataFrame({"A": data})
self._check_op(df, op, 1, exc=TypeError)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op = all_arithmetic_operators
s = pd.Series(data)
other = np.ones(len(s), dtype=s.dtype.type)
self._check_op(s, op, other, exc=TypeError)
def test_arith_coerce_scalar(self, data, all_arithmetic_operators):
op = all_arithmetic_operators
s = pd.Series(data)
other = 0.01
self._check_op(s, op, other)
@pytest.mark.parametrize("other", [1.0, np.array(1.0)])
def test_arithmetic_conversion(self, all_arithmetic_operators, other):
# if we have a float operand we should have a float result
# if that is equal to an integer
op = self.get_op_from_name(all_arithmetic_operators)
s = pd.Series([1, 2, 3], dtype="Int64")
result = op(s, other)
assert result.dtype is np.dtype("float")
def test_arith_len_mismatch(self, all_arithmetic_operators):
# operating with a list-like with non-matching length raises
op = self.get_op_from_name(all_arithmetic_operators)
other = np.array([1.0])
s = pd.Series([1, 2, 3], dtype="Int64")
with pytest.raises(ValueError, match="Lengths must match"):
op(s, other)
@pytest.mark.parametrize("other", [0, 0.5])
def test_arith_zero_dim_ndarray(self, other):
arr = integer_array([1, None, 2])
result = arr + np.array(other)
expected = arr + other
tm.assert_equal(result, expected)
def test_error(self, data, all_arithmetic_operators):
# invalid ops
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
opa = getattr(data, op)
# invalid scalars
msg = (
r"(:?can only perform ops with numeric values)"
r"|(:?IntegerArray cannot perform the operation mod)"
)
with pytest.raises(TypeError, match=msg):
ops("foo")
with pytest.raises(TypeError, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
with pytest.raises(TypeError, match=msg):
ops(pd.Series("foo", index=s.index))
if op != "__rpow__":
# TODO(extension)
# rpow with a datetimelike coerces the integer array incorrectly
msg = (
"can only perform ops with numeric values|"
"cannot perform .* with this index type: DatetimeArray|"
"Addition/subtraction of integers and integer-arrays "
"with DatetimeArray is no longer supported. *"
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
# 2d
result = opa(pd.DataFrame({"A": s}))
assert result is NotImplemented
msg = r"can only perform ops with 1-d structures"
with pytest.raises(NotImplementedError, match=msg):
opa(np.arange(len(s)).reshape(-1, len(s)))
@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)])
def test_divide_by_zero(self, zero, negative):
# https://github.com/pandas-dev/pandas/issues/27398
a = pd.array([0, 1, -1, None], dtype="Int64")
result = a / zero
expected = np.array([np.nan, np.inf, -np.inf, np.nan])
if negative:
expected *= -1
tm.assert_numpy_array_equal(result, expected)
def test_pow_scalar(self):
a = pd.array([-1, 0, 1, None, 2], dtype="Int64")
result = a ** 0
expected = pd.array([1, 1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** 1
expected = pd.array([-1, 0, 1, None, 2], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** pd.NA
expected = pd.array([None, None, 1, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** np.nan
expected = np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
# reversed
a = a[1:] # Can't raise integers to negative powers.
result = 0 ** a
expected = pd.array([1, 0, None, 0], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = 1 ** a
expected = pd.array([1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = pd.NA ** a
expected = pd.array([1, None, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = np.nan ** a
expected = np.array([1, np.nan, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
def test_pow_array(self):
a = integer_array([0, 0, 0, 1, 1, 1, None, None, None])
b = integer_array([0, 1, None, 0, 1, None, 0, 1, None])
result = a ** b
expected = integer_array([1, 0, None, 1, 1, 1, 1, None, None])
tm.assert_extension_array_equal(result, expected)
def test_rpow_one_to_na(self):
# https://github.com/pandas-dev/pandas/issues/22022
# https://github.com/pandas-dev/pandas/issues/29997
arr = integer_array([np.nan, np.nan])
result = np.array([1.0, 2.0]) ** arr
expected = np.array([1.0, np.nan])
tm.assert_numpy_array_equal(result, expected)
class TestComparisonOps(BaseOpsUtil):
def _compare_other(self, data, op_name, other):
op = self.get_op_from_name(op_name)
# array
result = pd.Series(op(data, other))
expected = pd.Series(op(data._data, other), dtype="boolean")
# fill the nan locations
expected[data._mask] = pd.NA
tm.assert_series_equal(result, expected)
# series
s = pd.Series(data)
result = op(s, other)
expected = op(pd.Series(data._data), other)
# fill the nan locations
expected[data._mask] = pd.NA
expected = expected.astype("boolean")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("other", [True, False, pd.NA, -1, 0, 1])
def test_scalar(self, other, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([1, 0, None], dtype="Int64")
result = op(a, other)
if other is pd.NA:
expected = pd.array([None, None, None], dtype="boolean")
else:
values = op(a._data, other)
expected = pd.arrays.BooleanArray(values, a._mask, copy=True)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(a, pd.array([1, 0, None], dtype="Int64"))
def test_array(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([0, 1, 2, None, None, None], dtype="Int64")
b = pd.array([0, 1, None, 0, 1, None], dtype="Int64")
result = op(a, b)
values = op(a._data, b._data)
mask = a._mask | b._mask
expected = pd.arrays.BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(
a, pd.array([0, 1, 2, None, None, None], dtype="Int64")
)
tm.assert_extension_array_equal(
b, pd.array([0, 1, None, 0, 1, None], dtype="Int64")
)
def test_compare_with_booleanarray(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([True, False, None] * 3, dtype="boolean")
b = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype="Int64")
other = pd.array([False] * 3 + [True] * 3 + [None] * 3, dtype="boolean")
expected = op(a, other)
result = op(a, b)
tm.assert_extension_array_equal(result, expected)
def test_no_shared_mask(self, data):
result = data + 1
assert np.shares_memory(result._mask, data._mask) is False
def test_compare_to_string(self, any_nullable_int_dtype):
# GH 28930
s = pd.Series([1, None], dtype=any_nullable_int_dtype)
result = s == "a"
expected = pd.Series([False, pd.NA], dtype="boolean")
self.assert_series_equal(result, expected)
def test_compare_to_int(self, any_nullable_int_dtype, all_compare_operators):
# GH 28930
s1 = pd.Series([1, None, 3], dtype=any_nullable_int_dtype)
s2 = pd.Series([1, None, 3], dtype="float")
method = getattr(s1, all_compare_operators)
result = method(2)
method = getattr(s2, all_compare_operators)
expected = method(2).astype("boolean")
expected[s2.isna()] = pd.NA
self.assert_series_equal(result, expected)
class TestCasting:
@pytest.mark.parametrize("dropna", [True, False])
def test_construct_index(self, all_data, dropna):
# ensure that we do not coerce to Float64Index, rather
# keep as Index
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Index(integer_array(other, dtype=all_data.dtype))
expected = pd.Index(other, dtype=object)
|
tm.assert_index_equal(result, expected)
|
pandas._testing.assert_index_equal
|
import pandas as pd
import glob
data_path = 'E:/GenderClassification/PycharmProjects/GenderClassification/home/abeer/Dropbox/Dataset_HAR project/*'
addrs = glob.glob(data_path)
for i in addrs:
folders = glob.glob(i + '/Walk/Esphalt/Alone/*')
for j in folders:
csv_files = glob.glob(j + '/*')
LUA = pd.read_csv('initAcc.csv')
RC = pd.read_csv('initAcc.csv')
LC = pd.read_csv('initAcc.csv')
back = pd.read_csv('initAcc.csv')
waist = pd.read_csv('initAcc.csv')
RUA = pd.read_csv('initAcc.csv')
LeftWatch = pd.read_csv('initAcc.csv')
RightWatch = pd.read_csv('initAcc.csv')
for k in csv_files:
if '(1)' in k or '(2)' in k or '(3)' in k or '(4)' in k or '(5)' in k:
continue
elif 'Accelerometer' in k and 'F5-RC' in k:
file = pd.read_csv(k)
RC = RC.append(file.iloc[:, 3:])
RC = RC.reset_index(drop=True)
print(RC.columns)
elif 'Accelerometer' in k and "DE-Waist" in k:
file = pd.read_csv(k)
waist = waist.append(file.iloc[:, 3:])
waist = waist.reset_index(drop=True)
elif 'Accelerometer' in k and "D5-LC" in k:
file = pd.read_csv(k)
LC = LC.append(file.iloc[:, 3:])
LC = LC.reset_index(drop=True)
elif 'Accelerometer' in k and "D2-RUA" in k:
file = pd.read_csv(k)
RUA = RUA.append(file.iloc[:, 3:])
RUA = RUA.reset_index(drop=True)
elif 'Accelerometer' in k and "C6-back" in k:
file = pd.read_csv(k)
back = back.append(file.iloc[:, 3:])
back = back.reset_index(drop=True)
elif 'Accelerometer' in k and "C5-LUA" in k:
file = pd.read_csv(k)
LUA = LUA.append(file.iloc[:, 3:])
LUA = LUA.reset_index(drop=True)
for k in csv_files:
if '(1)' in k or '(2)' in k or '(3)' in k or '(4)' in k or '(5)' in k:
continue
elif 'Gyroscope' in k and 'F5-RC' in k:
file = pd.read_csv(k)
file = file.iloc[:, 3:]
RC = pd.concat([RC, file], axis=1)
print(RC.columns)
print(RC.info())
elif 'Gyroscope' in k and "DE-Waist" in k:
file = pd.read_csv(k)
file = file.iloc[:, 3:]
waist = pd.concat([waist, file], axis=1)
elif 'Gyroscope' in k and "D5-LC" in k:
file = pd.read_csv(k)
file = file.iloc[:, 3:]
LC =
|
pd.concat([LC, file], axis=1)
|
pandas.concat
|
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import acf, pacf
import sklearn
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_log_error, r2_score
from statsmodels.tsa.stattools import acf, pacf
from sklearn.model_selection import GridSearchCV
from sklearn.neural_network import MLPRegressor
from scipy import integrate, optimize
from scipy.signal import savgol_filter
from dane import population as popu
dias_restar = 5 #Los últimos días de información que no se tienen en cuenta
dias_pred = 31 #Días sobre los cuáles se hará la predicción a corto plazo
media_movil = 4 #Días que se promediaran en las series para mitigar errores en los datos
Ciudades_dicc={'Bog': 'Bogotá D.C.', 'Mde': 'Medellín', 'Cal':'Cali', 'Brr':'Barranquilla', 'Ctg':'Cartagena de Indias'}
Ciudades=['Bog', 'Mde', 'Cal', 'Brr', 'Ctg']
# Se realiza la limpieza y adecuación de los datos de entrada para los modelos. En esta función
# se entegran los Data Frames para cada una de las 5 variables objetivo para cada ciudad.
def limpieza_datos():
Covid_Col=pd.read_csv("https://www.datos.gov.co/api/views/gt2j-8ykr/rows.csv?accessType=DOWNLOAD", sep=',', encoding='utf-8', low_memory=False)
#Covid_Col=pd.read_csv("C:\Users\danie\DS\vagrant4docker-master\laboratorios\covid-19-guaya-kilera\Casos_positivos_de_COVID-19_en_Colombia.csv", sep=',', encoding='utf-8', low_memory=False)
Covid_Col.drop(['ID de caso', 'Código DIVIPOLA', 'Departamento o Distrito ', 'País de procedencia', 'Tipo', 'Codigo departamento',
'Codigo pais', 'Tipo recuperación', 'Pertenencia etnica', 'Nombre grupo etnico', 'atención'], axis=1, inplace=True)
Covid_Col['FIS']=Covid_Col['FIS'].replace('Asintomático', np.nan)
Covid_Col['FIS']=pd.to_datetime(Covid_Col['FIS'].str[:10])
Covid_Col['fecha reporte web']=pd.to_datetime(Covid_Col['fecha reporte web'].str[:10])
Covid_Col['Fecha de notificación']=pd.to_datetime(Covid_Col['Fecha de notificación'].str[:10])
Covid_Col['Fecha de muerte']=pd.to_datetime(Covid_Col['Fecha de muerte'].str[:10])
Covid_Col['Fecha diagnostico']=pd.to_datetime(Covid_Col['Fecha diagnostico'].str[:10])
Covid_Col['Fecha recuperado']=pd.to_datetime(Covid_Col['Fecha recuperado'].str[:10])
#Covid_Col[(Covid_Col['Fecha diagnostico']<Covid_Col['Fecha de notificación']) & Covid_Col['FIS'].isnull()]
Covid_Col['Fecha contagio']=Covid_Col['FIS']
Covid_Col.loc[Covid_Col['Fecha contagio'].isnull(), 'Fecha contagio'] = Covid_Col['Fecha de notificación']
Covid_Col.drop(['Fecha de notificación', 'FIS', 'Fecha diagnostico', 'fecha reporte web'], axis=1, inplace=True)
Covid_Col['Cantidad de personas']=1
Fecha_Inicio = Covid_Col['Fecha contagio'][0]
Fecha_Fin = max(Covid_Col['Fecha contagio']) - pd.to_timedelta(dias_restar, unit='d')
Fecha_Fin_pred = Fecha_Fin + pd.to_timedelta(dias_pred - 1, unit='d')
Fecha_Fin_SIR = Fecha_Fin_pred + pd.to_timedelta(50, unit='d')
globals()['Fechas_pred_i'] = pd.date_range(start=Fecha_Inicio, end=Fecha_Fin_pred)
globals()['Fechas_SIR'] = pd.date_range(start=Fecha_Inicio, end=Fecha_Fin_SIR)
Fechas_evaluar_i = pd.date_range(start=Fecha_Inicio, end=Fecha_Fin)
Fechas_evaluar = pd.DataFrame(index=Fechas_evaluar_i)
for ciudad in Ciudades:
globals()["Covid_" + str(ciudad)]=Covid_Col[Covid_Col['Ciudad de ubicación']==Ciudades_dicc[ciudad]]
globals()["nuevos_" + str(ciudad)] = globals()["Covid_" + str(ciudad)].groupby('Fecha contagio').sum()
globals()["nuevos_" + str(ciudad)].drop(['Edad'], axis=1, inplace=True)
globals()["nuevos_" + str(ciudad)]=pd.merge(Fechas_evaluar, globals()["nuevos_" + str(ciudad)], \
how='left', left_index=True, right_index=True)
globals()["nuevos_" + str(ciudad)]=globals()["nuevos_" + str(ciudad)].replace(np.nan, 0)
globals()["confirmados_" + str(ciudad)]=globals()["nuevos_" + str(ciudad)].cumsum()
globals()["nuevos_" + str(ciudad)].rename(columns={'Cantidad de personas': "Casos_nuevos_" }, inplace=True)
globals()["confirmados_" + str(ciudad)].rename(columns={'Cantidad de personas': "Casos_confirmados_" }, inplace=True)
globals()["recuperados_" + str(ciudad)]=globals()["Covid_" + str(ciudad)].groupby('Fecha recuperado').sum()
globals()["recuperados_" + str(ciudad)].drop(['Edad'], axis=1, inplace=True)
globals()["recuperados_" + str(ciudad)]=pd.merge(Fechas_evaluar, globals()["recuperados_" + str(ciudad)], \
how='left', left_index=True, right_index=True)
globals()["recuperados_" + str(ciudad)]=globals()["recuperados_" + str(ciudad)].replace(np.nan, 0)
#globals()["recuperados_" + str(ciudad)]=globals()["recuperados_" + str(ciudad)].cumsum()
globals()["recuperados_" + str(ciudad)].rename(columns={'Cantidad de personas': "Casos_recuperados_" }, inplace=True)
globals()["muertes_" + str(ciudad)]=globals()["Covid_" + str(ciudad)].groupby('Fecha de muerte').sum()
globals()["muertes_" + str(ciudad)].drop(['Edad'], axis=1, inplace=True)
globals()["muertes_" + str(ciudad)]=pd.merge(Fechas_evaluar,globals()["muertes_" + str(ciudad)], how='left', \
left_index=True, right_index=True)
globals()["muertes_" + str(ciudad)]=globals()["muertes_" + str(ciudad)].replace(np.nan, 0)
#globals()["muertes_" + str(ciudad)]=globals()["muertes_" + str(ciudad)].cumsum()
globals()["muertes_" + str(ciudad)].rename(columns={'Cantidad de personas': "muertes_" }, inplace=True)
globals()["activos_" + str(ciudad)]=pd.concat([globals()["confirmados_" + str(ciudad)], \
globals()["recuperados_" + str(ciudad)], globals()["muertes_" + str(ciudad)], globals()["nuevos_" + str(ciudad)]], axis=1)
globals()["activos_" + str(ciudad)]['Casos_activos_']=globals()["activos_" + str(ciudad)]["Casos_confirmados_"]- \
globals()["activos_" + str(ciudad)]["Casos_recuperados_"].cumsum()-globals()["activos_" + str(ciudad)]["muertes_"].cumsum()
globals()["Casos_" + str(ciudad)]=globals()["activos_" + str(ciudad)].copy()
globals()["activos_" + str(ciudad)].drop(["Casos_confirmados_", "Casos_recuperados_", "muertes_", "Casos_nuevos_"], axis=1, inplace=True)
globals()["Casos_" + str(ciudad)]["Total_recuperados_"]=globals()["Casos_" + str(ciudad)]["Casos_recuperados_"].cumsum()
globals()["Casos_" + str(ciudad)]["Total_muertes_"]=globals()["Casos_" + str(ciudad)]["muertes_"].cumsum()
# Se realiza el pronóstico de las series de tiempo para los datos sin promediar, por medio de regresión
# de perceptrón multicapa, la optimización de los parámetros se hace en un archivo aparte.
# -- Como se encuentra que la siguiente función arroja mejores resultados, los archivos de salida de
# -- está no son presentados en el dashboard
def redes_neuronales():
limpieza_datos()
for ciudad in Ciudades:
for estado in ['nuevos_', 'recuperados_', 'muertes_']:
globals()['lag_pacf_'+ str(estado) + str(ciudad)] = pacf(globals()[str(estado) +str(ciudad)], nlags=30, method='ols')
globals()['umbral_' + str(estado) + str(ciudad)] = 1.96/np.sqrt(len(globals()[str(estado) +str(ciudad)]))
#P=1
#for lag_pacf in list(globals()['lag_pacf_'+ str(estado) + str(ciudad)]):
# if lag_pacf < globals()['umbral_' + str(estado) + str(ciudad)]:
# break
# P=P+1
P=4
scaler = MinMaxScaler()
globals()[str(estado) + str(ciudad)+'_scaled'] = scaler.fit_transform(globals()[str(estado) + str(ciudad)]).reshape(-1, 1)
globals()[str(estado) + str(ciudad)+'_scaled'] = pd.DataFrame(globals()[str(estado) + str(ciudad)+'_scaled'])
globals()['X_'+ str(estado) + str(ciudad)] = []
for t in range(P-1, len(globals()[str(estado) + str(ciudad)+'_scaled'])-1):
globals()['X_'+ str(estado) + str(ciudad)].append([globals()[str(estado) + str(ciudad)+'_scaled'].iloc[t-n][0] \
for n in range(P)])
if ciudad=='Bog':
if estado=='nuevos_':
lrning_rate_in=0.043
elif estado=='recuperados_':
lrning_rate_in=0.097
elif estado=='muertes_':
lrning_rate_in=0.062
elif ciudad=='Mde':
if estado=='nuevos_':
lrning_rate_in=0.0315
elif estado=='recuperados_':
lrning_rate_in=0.095
elif estado=='muertes_':
lrning_rate_in=0.065
elif ciudad=='Cal':
if estado=='nuevos_':
lrning_rate_in=0.075
elif estado=='recuperados_':
lrning_rate_in=0.085
elif estado=='muertes_':
lrning_rate_in=0.1
elif ciudad=='Brr':
if estado=='nuevos_':
lrning_rate_in=0.01
elif estado=='recuperados_':
lrning_rate_in=0.043
elif estado=='muertes_':
lrning_rate_in=0.022
elif ciudad=='Ctg':
if estado=='nuevos_':
lrning_rate_in=0.025
elif estado=='recuperados_':
lrning_rate_in=0.085
elif estado=='muertes_':
lrning_rate_in=0.082
H = 4
np.random.seed(12345)
mlp = MLPRegressor(
hidden_layer_sizes=(H, ),
activation = 'relu',
learning_rate = 'adaptive',
alpha=0.0001,
learning_rate_init = lrning_rate_in,
max_iter = 100000,
early_stopping=True)
train_size= int(len(globals()[str(estado) + str(ciudad)+'_scaled'])*0.9)
mlp.fit(globals()['X_'+ str(estado) + str(ciudad)][0:train_size], globals()[str(estado) + str(ciudad)+'_scaled'][P:train_size+P][0])
globals()[str(estado) + str(ciudad)+'_scaled_predict']= mlp.predict(globals()['X_'+ str(estado) + str(ciudad)])
globals()[str(estado) + str(ciudad)+'_scaled_predict']= np.asarray([ i if i>=0 else 0 for i \
in globals()[str(estado) + str(ciudad)+'_scaled_predict']])
y_pred=[]
for dias_predict in range(dias_pred):
if dias_predict ==0:
X_pred=[j for j in globals()[str(estado) + str(ciudad)+'_scaled'][:-1-P:-1][0]]
x_pred=[X_pred]
else:
X_pred =y_pred [-1] + X_pred[:P-1]
x_pred=[X_pred]
y_pred.append((mlp.predict(x_pred)).tolist())
y_pred=[i[0] if i[0]>=0 else 0 for i in y_pred]
globals()[str(estado) + str(ciudad)+'_predict'] = [ n[0] for n in scaler.inverse_transform([[u] for u in list(globals()[str(estado) \
+ str(ciudad)+'_scaled'][0])[0:P-1] + globals()[str(estado) + str(ciudad)+'_scaled_predict'].tolist()+ y_pred])]
globals()[str(estado) + str(ciudad)+'_predict'] = pd.DataFrame(globals()[str(estado) + str(ciudad)+'_predict'],\
index = globals()['Fechas_pred_i'])
globals()[str(estado) + str(ciudad)+'_MSLE_train']=mean_squared_log_error(globals()[str(estado) + str(ciudad)][:train_size], \
globals()[str(estado) + str(ciudad)+'_predict'][:train_size])
globals()[str(estado) + str(ciudad)+'_r2_train']=r2_score(globals()[str(estado) + str(ciudad)][:train_size], \
globals()[str(estado) + str(ciudad)+'_predict'][:train_size])
test_end=len(globals()[str(estado) + str(ciudad)])
globals()[str(estado) + str(ciudad)+'_MSLE_test']=mean_squared_log_error(globals()[str(estado) + str(ciudad)][train_size:test_end],\
globals()[str(estado) + str(ciudad)+'_predict'][train_size:test_end])
globals()[str(estado) + str(ciudad)+'_r2_test']=r2_score(globals()[str(estado) + str(ciudad)][train_size:test_end],\
globals()[str(estado) + str(ciudad)+'_predict'][train_size:test_end])
globals()[str(estado) + str(ciudad)+'_real_vs_pred']=globals()[str(estado) + str(ciudad)+'_predict'].copy()
globals()[str(estado) + str(ciudad)+'_real_vs_pred']['reales']=globals()[str(estado) + str(ciudad)]
globals()[str(estado) + str(ciudad)+'_real_vs_pred']=globals()[str(estado) + str(ciudad)+'_real_vs_pred'].rename(columns={0:'predicción'})
globals()[str(estado) + str(ciudad)+'_real_vs_pred'].plot(figsize=(17,10), linewidth=1.5, style=['-r', '.-k'], fontsize=15)
plt.legend(fontsize='x-large')
plt.title('{} en {} por día'.format(str(estado)[:-1].capitalize(), Ciudades_dicc[str(ciudad)]), fontsize=20)
plt.xlabel('Month', fontsize=20)
max_=(globals()[str(estado) + str(ciudad)+'_real_vs_pred'].max()).max()
plt.vlines(globals()['Fechas_pred_i'][train_size],0, max_, colors='b', linestyles ='dashdot' )
plt.grid()
plt.savefig("images/MLP"+ str(estado) + str(ciudad) + ".png")
plt.close()
# Realiza el mismo proceso que la función anterior, pero con datos suavizados. Esta función arroja
# las gráficas para los valores reales y prónosticados para 3 de las variables de interés (casos nuevos
# recuperados y muertes por días). En la gráfica se reportan los errores MSLE de entrenamiento y de
# prueba y el coeficiente R2 de entrenamiento y de prueba.
def redes_neuronales_suavizadas():
limpieza_datos()
for ciudad in Ciudades:
for estado in ['nuevos_', 'recuperados_', 'muertes_']:
globals()['lag_pacf_'+ str(estado) + str(ciudad)] = pacf(globals()[str(estado) +str(ciudad)], nlags=30, method='ols')
globals()['umbral_' + str(estado) + str(ciudad)] = 1.96/np.sqrt(len(globals()[str(estado) +str(ciudad)]))
#P=1
#for lag_pacf in list(globals()['lag_pacf_'+ str(estado) + str(ciudad)]):
# if lag_pacf < globals()['umbral_' + str(estado) + str(ciudad)]:
# break
# P=P+1
P=4
scaler = MinMaxScaler()
globals()[str(estado) + str(ciudad)+ '_smoothed']=globals()[str(estado) + str(ciudad)].rolling(media_movil).mean()
globals()[str(estado) + str(ciudad)+ '_smoothed'][globals()[str(estado) + str(ciudad)+ '_smoothed'].columns[0]][0:media_movil] = \
globals()[str(estado) + str(ciudad)+ '_smoothed'][globals()[str(estado) + str(ciudad)+ '_smoothed'].columns[0]][media_movil]
globals()[str(estado) + str(ciudad)+'_scaled_smoothed'] = scaler.fit_transform(globals()[str(estado) + str(ciudad)+ '_smoothed']).reshape(-1, 1)
globals()[str(estado) + str(ciudad)+'_scaled_smoothed'] = pd.DataFrame(globals()[str(estado) + str(ciudad)+'_scaled_smoothed'])
globals()['X_smoothed'+ str(estado) + str(ciudad)] = []
for t in range(P-1, len(globals()[str(estado) + str(ciudad)+'_scaled_smoothed'])-1):
globals()['X_smoothed'+ str(estado) + str(ciudad)].append([globals()[str(estado) + str(ciudad)+'_scaled_smoothed'].iloc[t-n][0] \
for n in range(P)])
if ciudad=='Bog':
if estado=='nuevos_':
lrning_rate_in=0.04
elif estado=='recuperados_':
lrning_rate_in=0.004
elif estado=='muertes_':
lrning_rate_in=0.071
elif ciudad=='Mde':
if estado=='nuevos_':
lrning_rate_in=0.0354
elif estado=='recuperados_':
lrning_rate_in=0.09
elif estado=='muertes_':
lrning_rate_in=0.065
elif ciudad=='Cal':
if estado=='nuevos_':
lrning_rate_in=0.02
elif estado=='recuperados_':
lrning_rate_in=0.101
elif estado=='muertes_':
lrning_rate_in=0.085
elif ciudad=='Brr':
if estado=='nuevos_':
lrning_rate_in=0.02
elif estado=='recuperados_':
lrning_rate_in=0.01
elif estado=='muertes_':
lrning_rate_in=0.1
elif ciudad=='Ctg':
if estado=='nuevos_':
lrning_rate_in=0.02
elif estado=='recuperados_':
lrning_rate_in=0.1
elif estado=='muertes_':
lrning_rate_in=0.075
H = 4
np.random.seed(12345)
mlp = MLPRegressor(
hidden_layer_sizes=(H, ),
activation = 'relu',
learning_rate = 'adaptive',
alpha=0.0001,
learning_rate_init = lrning_rate_in,
max_iter = 100000,
early_stopping=True)
globals()['train_size']= int(len(globals()[str(estado) + str(ciudad)+'_scaled_smoothed'])*0.9)
mlp.fit(globals()['X_smoothed'+ str(estado) + str(ciudad)][0:train_size], globals()[str(estado) + \
str(ciudad)+'_scaled_smoothed'][P:train_size+P][0])
globals()[str(estado) + str(ciudad)+'_scaled_predict_smoothed']= mlp.predict(globals()['X_smoothed'+ str(estado) + str(ciudad)])
globals()[str(estado) + str(ciudad)+'_scaled_predict_smoothed']= np.asarray([ i if i>=0 else 0 for i \
in globals()[str(estado) + str(ciudad)+'_scaled_predict_smoothed']])
y_pred=[]
for dias_predict in range(dias_pred):
if dias_predict ==0:
X_pred=[j for j in globals()[str(estado) + str(ciudad)+'_scaled_smoothed'][:-1-P:-1][0]]
x_pred=[X_pred]
else:
X_pred =y_pred [-1] + X_pred[:P-1]
x_pred=[X_pred]
y_pred.append((mlp.predict(x_pred)).tolist())
y_pred=[i[0] if i[0]>=0 else 0 for i in y_pred]
globals()[str(estado) + str(ciudad)+'_predict_smoothed'] = [ n[0] for n in scaler.inverse_transform([[u] for u in list(globals()[str(estado) \
+ str(ciudad)+'_scaled_smoothed'][0])[0:P-1] + globals()[str(estado) + str(ciudad)+'_scaled_predict_smoothed'].tolist()+ y_pred])]
globals()[str(estado) + str(ciudad)+'_predict_smoothed'] = pd.DataFrame(globals()[str(estado) + str(ciudad)+'_predict_smoothed'],\
index = globals()['Fechas_pred_i'])
globals()[str(estado) + str(ciudad)+'_MSLE_train_smoothed']=mean_squared_log_error(globals()[str(estado) + str(ciudad)+ '_smoothed']\
[:train_size], globals()[str(estado) + str(ciudad)+'_predict_smoothed'][:train_size])
globals()[str(estado) + str(ciudad)+'_r2_train_smoothed']=r2_score(globals()[str(estado) + str(ciudad)+ '_smoothed'][:train_size], \
globals()[str(estado) + str(ciudad)+'_predict_smoothed'][:train_size])
test_end=len(globals()[str(estado) + str(ciudad)+ '_smoothed'])
globals()[str(estado) + str(ciudad)+'_MSLE_test_smoothed']=mean_squared_log_error(globals()[str(estado) + str(ciudad)+ '_smoothed']\
[train_size:test_end], globals()[str(estado) + str(ciudad)+'_predict_smoothed'][train_size:test_end])
globals()[str(estado) + str(ciudad)+'_r2_test_smoothed']=r2_score(globals()[str(estado) + str(ciudad)+ '_smoothed']\
[train_size:test_end], globals()[str(estado) + str(ciudad)+'_predict_smoothed'][train_size:test_end])
globals()[str(estado) + str(ciudad)+'_real_vs_pred_smoothed']=globals()[str(estado) + str(ciudad)+'_predict_smoothed'].copy()
globals()[str(estado) + str(ciudad)+'_real_vs_pred_smoothed']['reales']=globals()[str(estado) + str(ciudad)+ '_smoothed']
globals()[str(estado) + str(ciudad)+'_real_vs_pred_smoothed']=globals()[str(estado) + str(ciudad)+'_real_vs_pred_smoothed'].rename(columns={0:'predicción'})
globals()[str(estado) + str(ciudad)+'_real_vs_pred_smoothed'].plot(figsize=(17,10), linewidth=1.5, style=['-r', '.-k'], fontsize=15)
plt.legend(fontsize='x-large')
plt.title('{} en {} por día (curva suavizada)'.format(str(estado)[:-1].capitalize(), Ciudades_dicc[str(ciudad)]), fontsize=20)
plt.xlabel('Month', fontsize=20)
max_=(globals()[str(estado) + str(ciudad)+'_real_vs_pred_smoothed'].max()).max()
plt.vlines(globals()['Fechas_pred_i'][train_size],0, max_, colors='b', linestyles ='dashdot' )
plt.figtext(0.14,0.7,' MSLE train: {} \n MSLE test: {} \n R2 train: {} \n R2 test: {}'.\
format(round(globals()[str(estado) + str(ciudad)+'_MSLE_train_smoothed'],4),\
round(globals()[str(estado) + str(ciudad)+'_MSLE_test_smoothed'],4),\
round(globals()[str(estado) + str(ciudad)+'_r2_train_smoothed'],4),\
round(globals()[str(estado) + str(ciudad)+'_r2_test_smoothed'],4)), fontsize=13, color='k', \
bbox={'facecolor': 'blue', 'alpha': 0.25, 'pad': 1})
plt.grid()
plt.savefig("images/images/im/MLP"+ str(estado) + str(ciudad) + "_suavizada.png")
plt.close()
# Saca las gráficas de los casos confirmados y casos activos para cada ciudad.
def variables_derivadas():
redes_neuronales_suavizadas()
for ciudad in Ciudades:
globals()['confirmados_'+ str(ciudad)+'_real_vs_pred_smoothed']=globals()['nuevos_'+ str(ciudad)+'_real_vs_pred_smoothed'].cumsum()
globals()['confirmados_'+ str(ciudad)+'_real_vs_pred_smoothed'].plot(figsize=(17,10), linewidth=1.5, style=['-r', '.-k'], fontsize=15)
plt.legend(fontsize='x-large')
plt.title('Casos confirmados en {} (curva suavizada)'.format(Ciudades_dicc[str(ciudad)]), fontsize=20)
plt.xlabel('Month', fontsize=20)
max_=(globals()['confirmados_'+ str(ciudad)+'_real_vs_pred_smoothed'].max()).max()
plt.vlines(globals()['Fechas_pred_i'][train_size],0, max_, colors='b', linestyles ='dashdot' )
plt.grid()
plt.savefig("images/images/im/MLP_confirmados" + str(ciudad) + "_suavizada.png")
plt.close()
globals()['activos_'+ str(ciudad)+'_real_vs_pred_smoothed']=(globals()['nuevos_'+ str(ciudad)+'_real_vs_pred_smoothed']- \
globals()['recuperados_'+ str(ciudad)+'_real_vs_pred_smoothed']-globals()['muertes_'+ str(ciudad)+'_real_vs_pred_smoothed']).cumsum()
globals()['activos_'+ str(ciudad)+'_real_vs_pred_smoothed'][globals()['activos_'+ str(ciudad)+'_real_vs_pred_smoothed']<0]=0
globals()['activos_'+ str(ciudad)+'_real_vs_pred_smoothed'].plot(figsize=(17,10), linewidth=1.5, style=['-r', '.-k'], fontsize=15)
plt.legend(fontsize='x-large')
plt.title('Casos activos en {} (curva suavizada)'.format(Ciudades_dicc[str(ciudad)]), fontsize=20)
plt.xlabel('Month', fontsize=20)
max_=(globals()['activos_'+ str(ciudad)+'_real_vs_pred_smoothed'].max()).max()
plt.vlines(globals()['Fechas_pred_i'][train_size],0, max_, colors='b', linestyles ='dashdot' )
plt.grid()
plt.savefig("images/images/MLP_activos" + str(ciudad) + "_suavizada.png")
plt.close()
# La función se encarga de hacer el pronóstico a largo plazo usando el modelo SIR y con
# base en funciones optimizadoras
def casos():
for ciudad in Ciudades:
globals()['N'+str(ciudad)] = popu(ciudad)
globals()['real_'+str(ciudad)] = [i for i in globals()["activos_" + str(ciudad)]['Casos_activos_']]
globals()['poly_pred_'+str(ciudad)] = savgol_filter(globals()['real_'+str(ciudad)], 51,3) # window size 51, polynomial order 1
globals()['df_pred_'+str(ciudad)] = pd.DataFrame(globals()['poly_pred_'+str(ciudad)])
globals()['df_real_'+str(ciudad)] = pd.DataFrame(globals()['real_'+str(ciudad)]) #Casos confirmados por día desde el caso 0
# return N,df_poly,df_vec_real,poly,vec_real_140,ciudad
# plt.figure(figsize=(12,6))
# plt.plot(globals()['poly_pred_'+str(ciudad)])
# plt.plot(globals()['real_'+str(ciudad)])
# plt.legend(["Predicción","Real"], loc='upper left')
# plt.title("Infecciones por COVID-19 desde el primer caso"+" "+ str(Ciudades_dicc.get(ciudad)), size=15)
# plt.xlabel("Days", size=13)
# plt.ylabel("Infecciones", size=13)
# plt.ylim(0, max(globals()['real_'+str(ciudad)])+1000)
# plt.show()
N = globals()['N'+str(ciudad)]
depart_df = pd.DataFrame()
depart_df['ConfirmedCases'] = globals()['real_'+str(ciudad)]
depart_df = depart_df[10:]
depart_df['day_count'] = list(range(1,len(depart_df)+1))
ydata = [i for i in depart_df.ConfirmedCases]
xdata = depart_df.day_count
ydata = np.array(ydata, dtype=float)
xdata = np.array(xdata, dtype=float)
inf0 = ydata[0]
sus0 = N - inf0
rec0 = 0.0
def sir_model(y, x, beta, gamma):
sus = -beta * y[0] * y[1] / N
rec = gamma * y[1]
inf = -(sus + rec)
return sus, inf, rec
def fit_odeint(x, beta, gamma):
return integrate.odeint(sir_model, (sus0, inf0, rec0), x, args=(beta, gamma))[:,1]
if ciudad == 'Bogg':
popt = np.array([0.2783922953043075, 0.2165019796859231])
else:
popt, pcov = optimize.curve_fit(fit_odeint, xdata, ydata, maxfev=5000)
xdata2=range(len(globals()['Fechas_SIR']))
xdata2 = np.array(xdata2, dtype=float)
fitted = fit_odeint(xdata2, *popt)
globals()['SIR_activos'+str(ciudad)]=pd.DataFrame(fitted, index=globals()['Fechas_SIR'])
globals()['SIR_activos'+str(ciudad)]['reales']=globals()["activos_" + str(ciudad)]['Casos_activos_']
globals()['SIR_activos'+str(ciudad)]=globals()['SIR_activos'+str(ciudad)].rename(columns={0:'predicción', 'Casos_activos_':'reales'})
globals()['SIR_activos'+str(ciudad)].plot(figsize=(17,10), linewidth=1.5, style=['-r', '.-k'], fontsize=15)
plt.legend(fontsize='x-large')
plt.title("Modelo SIR, Casos activos en "+ str(Ciudades_dicc.get(ciudad)), fontsize=20)
plt.xlabel('Month', fontsize=20)
plt.grid()
print("Optimal parameters: beta =", popt[0], " and gamma = ", popt[1])
plt.savefig("images/SIR_activos" + str(ciudad) + ".png")
plt.close()
# Se encarga de unir los modelos de corto y largo plazo asignando pesos de acuerdo
# con la degradación evidenciada de estos
def Unir_modelos():
variables_derivadas()
casos()
for ciudad in Ciudades:
globals()['Union_activos_'+str(ciudad)]=globals()['SIR_activos'+str(ciudad)].copy()
globals()['Union_activos_'+str(ciudad)]['corto_plazo']=globals()['activos_'+ str(ciudad)+'_real_vs_pred_smoothed']['predicción']
globals()['Union_activos_'+str(ciudad)]['pred']=np.where(globals()['Union_activos_'+str(ciudad)].index<pd.to_datetime('09/08/2020'),100,0)
globals()['Union_activos_'+str(ciudad)]['pred'] = np.where((globals()['Union_activos_'+str(ciudad)].index >=
|
pd.to_datetime('09/08/2020')
|
pandas.to_datetime
|
#!/usr/bin/env python
#
# run phmmer against comma separated list of Uniprot IDs.
# produce csv of pairwise match alignment.
#
#
#
import argparse
import os
import sys
import logging
import traceback
import pandas as pd
gitpath=os.path.expanduser("~/git/cshlwork")
sys.path.append(gitpath)
from protlib import uniprot
from protlib import phmmer
def indexbypacc(lod):
logging.debug(f"indexing uniprot list of dicts len: {len(lod)}")
upbypacc = {}
for p in lod:
pacc = p['proteinacc']
#if pacc == "A0A0J9YTW6":
# logging.debug("Indexing later missing pacc! A0A0R4J0X7")
seq = p['sequence']
upbypacc[pacc] = p
logging.debug(f"produced indexed dict len: {len(upbypacc)}")
return upbypacc
def parse_pairfile(filename):
f = open(filename)
lines = f.readlines()
dupelist = []
lnum = 0
knum = 0
for line in lines:
(p1, p2) = line.split(',')
p1 = p1.strip()
p2 = p2.strip()
if p2 != "NA":
dupelist.append( (p1, p2) )
else:
knum += 1
#logging.debug("skipping NA target. ")
lnum += 1
logging.debug(f" processed {lnum} lines. skipped {knum} NAs. produced {len(dupelist)} items in dupelist[0] = {dupelist[0]}")
#logging.debug(f"dupelist: {dupelist}")
return dupelist
def add_altcodes(upbypacc, infile):
'''
upbypacc { <pacc> : { 'proteinacc' : <pacc>,
'sequence' : <seq> }
,
,
,
}
altcodes:
cat <uniprot>.dat | grep "^AC" > <altcodes>.txt
AC Q9CQV8; O70455; Q3TY33; Q3UAN6;
AC P35213;
AC P62259; P29360; P42655; Q63631;
'''
logging.debug(f"len upbypacc before: {len(upbypacc)}")
nadded = 0
nmissing = 0
try:
f = open(infile)
lines = f.readlines()
for line in lines:
# remove leading AC
fields = line.split()[1:]
#logging.debug(f"fields: {fields}")
if len(fields) > 1:
#logging.debug("more than one field.")
ecode = fields[0].replace(';','')
try:
entry = upbypacc[ecode]
for alt in fields[1:]:
alt = alt.replace(';','')
upbypacc[alt] = entry
#logging.debug(f"added alt {alt} for entry code {ecode}")
nadded += 1
except KeyError:
#logging.warn(f"entry {ecode} not found in upbypacc.")
nmissing += 1
except IOError:
logging.error(f"could not read file {infile}")
traceback.print_exc(file=sys.stdout)
finally:
f.close()
logging.debug(f"len ubypacc after: {len(upbypacc)} {nadded} alts added. {nmissing} missing.")
def parse_filebase(filepath):
'''
gives back filepath minus the last dot extension, or the
same filepath if there is not extension.
'''
return os.path.splitext(filepath)[0]
def run_phmmer(pairlist, uniprot_fasta, uniprot_altcodes, pairtfa, targettfa):
config = get_default_config()
up = parse_uniprot_fasta(uniprot_fasta)
logging.debug(f"up len: {len(up)}")
upbypacc = indexbypacc(up)
add_altcodes(upbypacc, uniprot_altcodes)
logging.debug(f"upbypacc len: {len(upbypacc)}")
write_sequences( pairlist, upbypacc, pairtfa, targettfa )
outfile, exclude_list, cidgidmap = execute_phmmer(config, pairtfa, version='current')
logging.info(f"wrote phmmer output to {outfile}")
df = get_phmmer_df(config, pairtfa)
logging.debug(f"df: {df}")
return df
def get_match(query, target, df):
logging.debug(f"query={query} target={target}")
qdf = df[df['query'] == query]
row = qdf[qdf['target'] == target]
if len(row) > 1 :
logging.warning(f'multiple matches for query={query} target={target} ')
return None
elif len(row) == 1:
r = row.iloc[0]
eval = r['eval']
score =r['score']
bias = r['bias']
return (eval, score, bias)
else:
logging.warning(f'no matches for query={query} target={target} ')
return None
def make_evaltable(pdf, pairlist, evalfile ):
#config = get_default_config()
#pdf = pd.read_csv(phmmerdf, index_col=0)
pdf.drop_duplicates(inplace=True,ignore_index=True)
#dupelist = parse_dupepairs()
lod = []
for tup in pairlist:
(p1, p2) = tup
logging.debug(f"looking for {p1} -> {p2}")
rv = get_match(p1, p2, pdf)
if rv is not None:
(eval, score, bias ) = rv
lod.append( { 'query' : p1,
'target' : p2,
'eval' : eval,
'score' : score,
'bias' : bias,
}
)
logging.debug(f"dupelist length: {len(pairlist)}")
logging.debug(f"matchlist length: {len(lod)}")
edf =
|
pd.DataFrame(lod)
|
pandas.DataFrame
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, isnull, date_range,
MultiIndex, Index)
from pandas.tseries.index import Timestamp
from pandas.compat import range
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.Akima1DInterpolator missing')
class TestSeriesMissingData(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
from pandas import tslib
result = td.fillna(tslib.NaT)
expected = Series([tslib.NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
from pandas import tslib
result = s.fillna(tslib.NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-04 10:00')])
self.assert_series_equal(expected, result)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp(
'2011-01-02 10:00', tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',
Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp(
'2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00', tz=tz)})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp(
'2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00',
tz=tz)])
self.assert_series_equal(expected, result)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp('20130101'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2013-01-01'), Timestamp('2011-01-03 10:00', tz=tz), Timestamp(
'2013-01-01')])
self.assert_series_equal(expected, result)
result = s.fillna(Timestamp('20130101', tz='US/Pacific'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific')])
self.assert_series_equal(expected, result)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
self.assertRaises(TypeError, s.fillna, [1, 2])
self.assertRaises(TypeError, s.fillna, (1, 2))
def test_isnull_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_null', True):
r = s.isnull()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_fillna(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
self.assert_series_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
exp = Series([0., 1., 1., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='ffill'), exp)
exp = Series([0., 1., 3., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='backfill'), exp)
exp = Series([0., 1., 5., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(value=5), exp)
self.assertRaises(ValueError, ts.fillna)
self.assertRaises(ValueError, self.ts.fillna, value=0, method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result, expected)
result = s1.fillna({})
assert_series_equal(result, s1)
result = s1.fillna(Series(()))
assert_series_equal(result, s1)
result = s2.fillna(s1)
assert_series_equal(result, s2)
result = s1.fillna({0: 1})
assert_series_equal(result, expected)
result = s1.fillna({1: 1})
assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
assert_series_equal(result, s1)
s1 = Series([0, 1, 2], list('abc'))
s2 = Series([0, np.nan, 2], list('bac'))
result = s2.fillna(s1)
expected = Series([0, 0, 2.], list('bac'))
assert_series_equal(result, expected)
# limit
s = Series(np.nan, index=[0, 1, 2])
result = s.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
result = s.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
# GH 9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ['0', '1.5', '-0.3']
for val in vals:
s = Series([0, 1, np.nan, np.nan, 4], dtype='float64')
result = s.fillna(val)
expected = Series([0, 1, val, val, 4], dtype='object')
assert_series_equal(result, expected)
def test_fillna_bug(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
filled = x.fillna(method='ffill')
expected = Series([nan, 1., 1., 3., 3.], x.index)
assert_series_equal(filled, expected)
filled = x.fillna(method='bfill')
expected = Series([1., 1., 3., 3., nan], x.index)
assert_series_equal(filled, expected)
def test_fillna_inplace(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
y = x.copy()
y.fillna(value=0, inplace=True)
expected = x.fillna(value=0)
assert_series_equal(y, expected)
def test_fillna_invalid_method(self):
try:
self.ts.fillna(method='ffil')
except ValueError as inst:
self.assertIn('ffil', str(inst))
def test_ffill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))
def test_bfill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))
def test_timedelta64_nan(self):
from pandas import tslib
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
self.assertTrue(isnull(td1[0]))
self.assertEqual(td1[0].value, tslib.iNaT)
td1[0] = td[0]
self.assertFalse(isnull(td1[0]))
td1[1] = tslib.iNaT
self.assertTrue(isnull(td1[1]))
self.assertEqual(td1[1].value, tslib.iNaT)
td1[1] = td[1]
self.assertFalse(isnull(td1[1]))
td1[2] = tslib.NaT
self.assertTrue(isnull(td1[2]))
self.assertEqual(td1[2].value, tslib.iNaT)
td1[2] = td[2]
self.assertFalse(isnull(td1[2]))
# boolean setting
# this doesn't work, not sure numpy even supports it
# result = td[(td>np.timedelta64(timedelta(days=3))) &
# td<np.timedelta64(timedelta(days=7)))] = np.nan
# self.assertEqual(isnull(result).sum(), 7)
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= self.ts <= 0.5
# expected = (self.ts >= -0.5) & (self.ts <= 0.5)
# assert_series_equal(selector, expected)
def test_dropna_empty(self):
s = Series([])
self.assertEqual(len(s.dropna()), 0)
s.dropna(inplace=True)
self.assertEqual(len(s), 0)
# invalid axis
self.assertRaises(ValueError, s.dropna, axis=1)
def test_datetime64_tz_dropna(self):
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-03 10:00')], index=[0, 2])
self.assert_series_equal(result, expected)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT],
tz='Asia/Tokyo')
s = pd.Series(idx)
self.assertEqual(s.dtype, 'datetime64[ns, Asia/Tokyo]')
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-03 10:00', tz='Asia/Tokyo')],
index=[0, 2])
self.assertEqual(result.dtype, 'datetime64[ns, Asia/Tokyo]')
self.assert_series_equal(result, expected)
def test_dropna_no_nan(self):
for s in [Series([1, 2, 3], name='x'), Series(
[False, True, False], name='x')]:
result = s.dropna()
self.assert_series_equal(result, s)
self.assertFalse(result is s)
s2 = s.copy()
s2.dropna(inplace=True)
self.assert_series_equal(s2, s)
def test_valid(self):
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.valid()
self.assertEqual(len(result), ts.count())
tm.assert_series_equal(result, ts[1::2])
tm.assert_series_equal(result, ts[pd.notnull(ts)])
def test_isnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.isnull(),
Series([False, False, False, True, False]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.isnull(), Series([False, False, True]).values)
def test_notnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.notnull(),
Series([True, True, True, False, True]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.notnull(), Series([True, True, False]).values)
def test_pad_nan(self):
x = Series([np.nan, 1., np.nan, 3., np.nan], ['z', 'a', 'b', 'c', 'd'],
dtype=float)
x.fillna(method='pad', inplace=True)
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],
['z', 'a', 'b', 'c', 'd'], dtype=float)
assert_series_equal(x[1:], expected[1:])
self.assertTrue(np.isnan(x[0]), np.isnan(expected[0]))
def test_dropna_preserve_name(self):
self.ts[:5] = np.nan
result = self.ts.dropna()
self.assertEqual(result.name, self.ts.name)
name = self.ts.name
ts = self.ts.copy()
ts.dropna(inplace=True)
self.assertEqual(ts.name, name)
def test_fill_value_when_combine_const(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
|
assert_series_equal(res, exp)
|
pandas.util.testing.assert_series_equal
|
from itertools import product
import numpy as np
import pandas as pd
import pytest
from cudf.core.dataframe import DataFrame, Series
from cudf.tests.utils import INTEGER_TYPES, NUMERIC_TYPES, assert_eq, gen_rand
params_sizes = [0, 1, 2, 5]
def _gen_params():
for t, n in product(NUMERIC_TYPES, params_sizes):
if (t == np.int8 or t == np.int16) and n > 20:
# to keep data in range
continue
yield t, n
@pytest.mark.parametrize("dtype,nelem", list(_gen_params()))
def test_cumsum(dtype, nelem):
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, nelem, low=-2, high=2)
else:
data = gen_rand(dtype, nelem)
decimal = 4 if dtype == np.float32 else 6
# series
gs = Series(data)
ps = pd.Series(data)
np.testing.assert_array_almost_equal(
gs.cumsum().to_array(), ps.cumsum(), decimal=decimal
)
# dataframe series (named series)
gdf = DataFrame()
gdf["a"] = Series(data)
pdf = pd.DataFrame()
pdf["a"] = pd.Series(data)
np.testing.assert_array_almost_equal(
gdf.a.cumsum().to_array(), pdf.a.cumsum(), decimal=decimal
)
def test_cumsum_masked():
data = [1, 2, None, 4, 5]
float_types = ["float32", "float64"]
for type_ in float_types:
gs = Series(data).astype(type_)
ps = pd.Series(data).astype(type_)
assert_eq(gs.cumsum(), ps.cumsum())
for type_ in INTEGER_TYPES:
gs = Series(data).astype(type_)
got = gs.cumsum()
expected = pd.Series([1, 3, np.nan, 7, 12], dtype="float64")
assert_eq(got, expected)
@pytest.mark.parametrize("dtype,nelem", list(_gen_params()))
def test_cummin(dtype, nelem):
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, nelem, low=-2, high=2)
else:
data = gen_rand(dtype, nelem)
decimal = 4 if dtype == np.float32 else 6
# series
gs = Series(data)
ps = pd.Series(data)
np.testing.assert_array_almost_equal(
gs.cummin().to_array(), ps.cummin(), decimal=decimal
)
# dataframe series (named series)
gdf = DataFrame()
gdf["a"] = Series(data)
pdf = pd.DataFrame()
pdf["a"] = pd.Series(data)
np.testing.assert_array_almost_equal(
gdf.a.cummin().to_array(), pdf.a.cummin(), decimal=decimal
)
def test_cummin_masked():
data = [1, 2, None, 4, 5]
float_types = ["float32", "float64"]
for type_ in float_types:
gs = Series(data).astype(type_)
ps = pd.Series(data).astype(type_)
assert_eq(gs.cummin(), ps.cummin())
for type_ in INTEGER_TYPES:
gs = Series(data).astype(type_)
expected = pd.Series([1, 1, np.nan, 1, 1]).astype("float64")
assert_eq(gs.cummin(), expected)
@pytest.mark.parametrize("dtype,nelem", list(_gen_params()))
def test_cummax(dtype, nelem):
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, nelem, low=-2, high=2)
else:
data = gen_rand(dtype, nelem)
decimal = 4 if dtype == np.float32 else 6
# series
gs = Series(data)
ps =
|
pd.Series(data)
|
pandas.Series
|
import numpy as np
import pandas as pd
# List unique values in a DataFrame column
# h/t @makmanalp for the updated syntax!
df = pd.DataFrame() # TODO
df['Column Name'].unique()
# Convert Series datatype to numeric (will error if column has non-numeric values)
# h/t @makmanalp
pd.to_numeric(df['Column Name'])
# Convert Series datatype to numeric, changing non-numeric values to NaN
# h/t @makmanalp for the updated syntax!
pd.to_numeric(df['Column Name'], errors='coerce')
# Grab DataFrame rows where column has certain values
valuelist = ['value1', 'value2', 'value3']
df = df[df.column.isin(valuelist)]
# Grab DataFrame rows where column doesn't have certain values
valuelist = ['value1', 'value2', 'value3']
df = df[~df.column.isin(valuelist)]
# Delete column from DataFrame
del df['column']
# Select from DataFrame using criteria from multiple columns
# (use `|` instead of `&` to do an OR)
newdf = df[(df['column_one']>2004) & (df['column_two']==9)]
# Rename several DataFrame columns
df = df.rename(columns = {
'col1 old name':'col1 new name',
'col2 old name':'col2 new name',
'col3 old name':'col3 new name',
})
# Lower-case all DataFrame column names
df.columns = map(str.lower, df.columns)
# Even more fancy DataFrame column re-naming
# lower-case all DataFrame column names (for example)
df.rename(columns=lambda x: x.split('.')[-1], inplace=True)
# Loop through rows in a DataFrame
# (if you must)
for index, row in df.iterrows():
print(index, row['some column'])
# Much faster way to loop through DataFrame rows
# if you can work with tuples
# (h/t hughamacmullaniv)
for row in df.itertuples():
print(row)
# Next few examples show how to work with text data in Pandas.
# Full list of .str functions: http://pandas.pydata.org/pandas-docs/stable/text.html
# Slice values in a DataFrame column (aka Series)
df.column.str[0:2]
# Lower-case everything in a DataFrame column
df.column_name = df.column_name.str.lower()
# Get length of data in a DataFrame column
df.column_name.str.len()
# Sort dataframe by multiple columns
df = df.sort(['col1','col2','col3'],ascending=[1,1,0])
# Get top n for each group of columns in a sorted dataframe
# (make sure dataframe is sorted first)
top5 = df.groupby(['groupingcol1', 'groupingcol2']).head(5)
# Grab DataFrame rows where specific column is null/notnull
newdf = df[df['column'].isnull()]
# Select from DataFrame using multiple keys of a hierarchical index
df.xs(('index level 1 value','index level 2 value'), level=('level 1','level 2'))
# Change all NaNs to None (useful before
# loading to a db)
df = df.where((pd.notnull(df)), None)
# More pre-db insert cleanup...make a pass through the dataframe, stripping whitespace
# from strings and changing any empty values to None
# (not especially recommended but including here b/c I had to do this in real life one time)
df = df.applymap(lambda x: str(x).strip() if len(str(x).strip()) else None)
# Get quick count of rows in a DataFrame
len(df.index)
# Pivot data (with flexibility about what what
# becomes a column and what stays a row).
# Syntax works on Pandas >= .14
pd.pivot_table(
df,values='cell_value',
index=['col1', 'col2', 'col3'], #these stay as columns; will fail silently if any of these cols have null values
columns=['col4']) #data values in this column become their own column
# Change data type of DataFrame column
df.column_name = df.column_name.astype(np.int64)
# Get rid of non-numeric values throughout a DataFrame:
refunds = df
for col in refunds.columns.values:
refunds[col] = refunds[col].replace('[^0-9]+.-', '', regex=True)
# Set DataFrame column values based on other column values (h/t: @mlevkov)
some_value, some_other_value, new_value = 0, 99, 999
df.loc[(df['column1'] == some_value) & (df['column2'] == some_other_value), ['column_to_change']] = new_value
# Clean up missing values in multiple DataFrame columns
df = df.fillna({
'col1': 'missing',
'col2': '99.999',
'col3': '999',
'col4': 'missing',
'col5': 'missing',
'col6': '99'
})
# Concatenate two DataFrame columns into a new, single column
# (useful when dealing with composite keys, for example)
# (h/t @makmanalp for improving this one!)
df['newcol'] = df['col1'].astype(str) + df['col2'].astype(str)
# Doing calculations with DataFrame columns that have missing values
# In example below, swap in 0 for df['col1'] cells that contain null
df['new_col'] = np.where(pd.isnull(df['col1']),0,df['col1']) + df['col2']
# Split delimited values in a DataFrame column into two new columns
df['new_col1'], df['new_col2'] = zip(*df['original_col'].apply(lambda x: x.split(': ', 1)))
# Collapse hierarchical column indexes
df.columns = df.columns.get_level_values(0)
# Convert Django queryset to DataFrame
DjangoModelName = None # TODO
qs = DjangoModelName.objects.all()
q = qs.values()
df =
|
pd.DataFrame.from_records(q)
|
pandas.DataFrame.from_records
|
import re
import unicodedata
from collections import Counter
from itertools import product
import pickle
import numpy as np
import pandas as pd
from sklearn.decomposition import TruncatedSVD
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import LabelEncoder
import umap
import pickle
from src import sentence_splitter
def get_umap(train, test, size=2):
um = umap.UMAP(transform_seed=1, random_state=1, n_neighbors=size)
um.fit(train.values)
tr_em = um.transform(train.values)
te_em = um.transform(test.values)
return tr_em, te_em
def LE(train, test):
for col in train.columns:
if train[col].dtypes == object:
train[col].fillna("null")
test[col].fillna("null")
lbl = LabelEncoder()
lbl.fit(list(train[col].values) + list(test[col].values))
train[col] = lbl.transform(list(train[col].values))
test[col] = lbl.transform(list(test[col].values))
# カウントエンコーディング
def CE(train, test, cols, all_df):
for col in cols:
# all_df = pd.concat([train.drop(["y"], axis=1), test], ignore_index=True).reset_index()
train[col + "_count"] = train[col].map(all_df[col].value_counts())
test[col + "_count"] = test[col].map(all_df[col].value_counts())
# ターゲットエンコーディング
def TE(train, test, func, target, cols):
funcs = ["max", "min", "mean", "std"]
for col in cols:
data_tmp = pd.DataFrame({col: train[col], "target": target})
target_dic = data_tmp.groupby(col)["target"].aggregate(func)
test[col + "_TE_" + func] = test[col].map(target_dic)
tmp = np.repeat(np.nan, train.shape[0])
# 学習データを分割
kf = StratifiedKFold(n_splits=5, shuffle=True, random_state=22)
for idx_1, idx_2 in kf.split(train, train[col]):
target_dic = data_tmp.iloc[idx_1].groupby(col)["target"].aggregate(func)
tmp[idx_2] = train[col].iloc[idx_2].map(target_dic)
train[col + "_TE_" + func] = tmp
def group(train, test, col, target, all_df):
mean_map = all_df.groupby(col)[target].mean()
train["group_" + col + "_mean_" + target] = train[col].map(mean_map)
test["group_" + col + "_mean_" + target] = test[col].map(mean_map)
std_map = all_df.groupby(col)[target].std()
train["group_" + col + "_std_" + target] = train[col].map(std_map)
test["group_" + col + "_std_" + target] = test[col].map(std_map)
sum_map = all_df.groupby(col)[target].sum()
train["group_" + col + "_sum_" + target] = train[col].map(sum_map)
test["group_" + col + "_sum_" + target] = test[col].map(sum_map)
min_map = all_df.groupby(col)[target].min()
train["group_" + col + "_min_" + target] = train[col].map(min_map)
test["group_" + col + "_min_" + target] = test[col].map(min_map)
max_map = all_df.groupby(col)[target].max()
train["group_" + col + "_max_" + target] = train[col].map(max_map)
test["group_" + col + "_max_" + target] = test[col].map(max_map)
train["group_" + col + "_range_" + target] = \
train["group_" + col + "_max_" + target] - train["group_" + col + "_min_" + target]
test["group_" + col + "_range_" + target] = \
test["group_" + col + "_max_" + target] - test["group_" + col + "_min_" + target]
def calculate(df: pd.DataFrame):
df["eval_count"] = df.likes + df.dislikes
df["likes_ratio"] = df.likes / df.eval_count
df["likes_ratio"].fillna(-1)
df["dislikes_ratio"] = df.dislikes / df.eval_count
df["dislikes_ratio"].fillna(-1)
df["score"] = df["comment_count"] * df["eval_count"]
df["score_2"] = df["comment_count"] / df["eval_count"]
df["title_div_description"] = df["title_len"] / df["description_len"]
df["title_mul_description"] = df["title_len"] * df["description_len"]
def is_japanese(string):
count = 0
for ch in str(string):
try:
name = unicodedata.name(ch)
except:
continue
if "CJK UNIFIED" in name \
or "HIRAGANA" in name \
or "KATAKANA" in name:
count += 1
return count
def count_alphabet(string):
r = re.compile(r"[a-z|A-Z]+")
return len("".join(r.findall(str(string))))
def count_number(string):
r = re.compile(r"[0-9]+")
return len("".join(r.findall(str(string))))
def change_to_Date(train, test, input_column_name, output_column_name):
train[output_column_name] = train[input_column_name].map(lambda x: x.split('.'))
test[output_column_name] = test[input_column_name].map(lambda x: x.split('.'))
train[output_column_name] = train[output_column_name].map(
lambda x: '20' + x[0] + '-' + x[2] + '-' + x[1] + 'T00:00:00.000Z')
test[output_column_name] = test[output_column_name].map(
lambda x: '20' + x[0] + '-' + x[2] + '-' + x[1] + 'T00:00:00.000Z')
def tag_counter(train, test, n=500, pca_size=None, drop=False, create=True):
cols = [f"tags_{i}" for i in range(n)]
if create:
# tagのカウント
tags = []
for tag in train["tags"]:
tags.extend(str(tag).split("|"))
tmp = Counter(tags)
tmp = sorted(tmp.items(), key=lambda x: x[1], reverse=True)[:n]
for i, item in enumerate(tmp):
train[f"tags_{i}"] = train["tags"].apply(lambda x: 1 if item[0] in str(x).split("|") else 0)
test[f"tags_{i}"] = test["tags"].apply(lambda x: 1 if item[0] in str(x).split("|") else 0)
train[cols].to_csv("./data/input/train_tags.csv", index=False)
test[cols].to_csv("./data/input/test_tags.csv", index=False)
else:
train_tags = pd.read_csv("./data/input/train_tags.csv")
test_tags = pd.read_csv("./data/input/test_tags.csv")
train = pd.concat([train, train_tags[cols]], axis=1)
test = pd.concat([test, test_tags[cols]], axis=1)
if pca_size:
# pca = TruncatedSVD(n_components=pca_size, random_state=2)
# pca.fit(train[cols])
# train_pca = pca.transform(train[cols])
# test_pca = pca.transform(test[cols])
train_pca, test_pca = get_umap(train[cols], test[cols], size=pca_size)
pca_cols = [f"tangs_pca_{i}" for i in range(pca_size)]
train = pd.concat([train, pd.DataFrame(train_pca, columns=pca_cols)], axis=1)
test = pd.concat([test, pd.DataFrame(test_pca, columns=pca_cols)], axis=1)
if drop:
train = train.drop(cols, axis=1)
test = test.drop(cols, axis=1)
return train, test
def title_counter(train, test, n=100, pca_size=None, drop=False, create=True):
train["title_words"] = train.title.apply(lambda x: sentence_splitter.splitter(str(x)))
test["title_words"] = test.title.apply(lambda x: sentence_splitter.splitter(str(x)))
cols = [f"title_word_{i}" for i in range(n)]
if create:
# titleの単語のカウント
word_list = []
for words in train["title_words"]:
word_list.extend(words)
tmp = Counter(word_list)
tmp = sorted(tmp.items(), key=lambda x: x[1], reverse=True)[:n]
for i, item in enumerate(tmp):
train[f"title_word_{i}"] = train["title_words"].apply(lambda x: x.count(item[0]))
test[f"title_word_{i}"] = test["title_words"].apply(lambda x: x.count(item[0]))
train[cols].to_csv("./data/input/train_title_words.csv", index=False)
test[cols].to_csv("./data/input/test_title_words.csv", index=False)
else:
train_tags = pd.read_csv("./data/input/train_title_words.csv")
test_tags = pd.read_csv("./data/input/test_title_words.csv")
train = pd.concat([train, train_tags[cols]], axis=1)
test = pd.concat([test, test_tags[cols]], axis=1)
if pca_size:
# pca = TruncatedSVD(n_components=pca_size, random_state=2)
# pca.fit(train[cols])
# train_pca = pca.transform(train[cols])
# test_pca = pca.transform(test[cols])
train_pca, test_pca = get_umap(train[cols], test[cols], size=pca_size)
pca_cols = [f"title_pca_{i}" for i in range(pca_size)]
train = pd.concat([train, pd.DataFrame(train_pca, columns=pca_cols)], axis=1)
test = pd.concat([test, pd.DataFrame(test_pca, columns=pca_cols)], axis=1)
if drop:
train = train.drop(cols, axis=1)
test = test.drop(cols, axis=1)
train = train.drop(["title_words"], axis=1)
test = test.drop(["title_words"], axis=1)
return train, test
def count_tag_in_title(tags, title):
tag_list = str(tags).split("|")
count = 0
for tag in tag_list:
if tag in str(title):
count += 1
return count
def category_unstack(train, test, all_df, group, category, normalize=True, pca_size=2):
use_columns = set(train[category].unique()) & set(test[category].unique())
unstack_df = all_df.groupby(group)[category].value_counts(normalize=normalize).unstack().fillna(0)
for col in use_columns:
train[f"{category}_{col}_ratio_in_{group}_group"] = train[group].map(unstack_df[col])
test[f"{category}_{col}_ratio_in_{group}_group"] = test[group].map(unstack_df[col])
cols = [f"{category}_{col}_ratio_in_{group}_group" for col in use_columns]
pca_cols = [f"{category}_pca_{i}_in_{group}_group" for i in range(pca_size)]
pca = TruncatedSVD(n_components=pca_size, random_state=2)
pca.fit(train[cols])
train_pca = pca.transform(train[cols])
test_pca = pca.transform(test[cols])
train = pd.concat([train, pd.DataFrame(train_pca, columns=pca_cols)], axis=1)
test = pd.concat([test, pd.DataFrame(test_pca, columns=pca_cols)], axis=1)
return train, test
def make_dataset(complement=True):
train = pd.read_csv("./data/input/train_data.csv")
test = pd.read_csv("./data/input/test_data.csv")
if complement:
complement_likes = pd.read_csv("./data/input/complement_likes.csv")
complement_dislikes = pd.read_csv("./data/input/complement_dislikes.csv")
complement_comment = pd.read_csv("./data/input/complement_comment.csv")
likes_dict = dict(zip(complement_likes.video_id, complement_likes.y))
dislikes_dict = dict(zip(complement_dislikes.video_id, complement_dislikes.y))
comment_dict = dict(zip(complement_comment.video_id, complement_comment.y))
train["likes"] = train.apply(
lambda x: likes_dict[x["video_id"]] if x["video_id"] in likes_dict.keys() else x["likes"], axis=1)
train["dislikes"] = train.apply(
lambda x: dislikes_dict[x["video_id"]] if x["video_id"] in dislikes_dict.keys() else x["dislikes"], axis=1)
train["comment_count"] = train.apply(
lambda x: comment_dict[x["video_id"]] if x["video_id"] in comment_dict.keys() else x["comment_count"],
axis=1)
test["likes"] = test.apply(
lambda x: likes_dict[x["video_id"]] if x["video_id"] in likes_dict.keys() else x["likes"], axis=1)
test["dislikes"] = test.apply(
lambda x: dislikes_dict[x["video_id"]] if x["video_id"] in dislikes_dict.keys() else x["dislikes"], axis=1)
test["comment_count"] = test.apply(
lambda x: comment_dict[x["video_id"]] if x["video_id"] in comment_dict.keys() else x["comment_count"],
axis=1)
# サムネイルの色の平均
# train_thumbnail = pd.read_csv("./data/input/train_thumbnail.csv")
# test_thumbnail = pd.read_csv("./data/input/test_thumbnail.csv")
# train = train.merge(train_thumbnail, on="video_id")
# test = test.merge(test_thumbnail, on="video_id")
# サムネイル特徴量
# train_image_features = pd.read_csv("./data/input/train_image_features.csv")
# test_image_features = pd.read_csv("./data/input/test_image_features.csv")
# train_umap, test_umap = get_umap(train_image_features, test_image_features, size=2)
# pca_cols = [f"image_features_umap_{i}" for i in range(2)]
# train = pd.concat([train, pd.DataFrame(train_umap, columns=pca_cols)], axis=1)
# test = pd.concat([test, pd.DataFrame(test_umap, columns=pca_cols)], axis=1)
train.likes = train.likes.apply(np.log1p)
test.likes = test.likes.apply(np.log1p)
train.dislikes = train.dislikes.apply(np.log1p)
test.dislikes = test.dislikes.apply(np.log1p)
train.comment_count = train.comment_count.apply(np.log1p)
test.comment_count = test.comment_count.apply(np.log1p)
train["title_len"] = train.title.apply(lambda x: len(str(x)))
test["title_len"] = test.title.apply(lambda x: len(str(x)))
train["channelTitle_len"] = train.channelTitle.apply(lambda x: len(str(x)))
test["channelTitle_len"] = test.channelTitle.apply(lambda x: len(str(x)))
train["description_len"] = train.description.apply(lambda x: len(str(x)))
test["description_len"] = test.description.apply(lambda x: len(str(x)))
train["tags_count"] = train.tags.apply(lambda x: str(x).count("|"))
test["tags_count"] = test.tags.apply(lambda x: str(x).count("|"))
# 時間系
train["year"] = pd.to_datetime(train.publishedAt).apply(lambda x: x.year)
test["year"] = pd.to_datetime(test.publishedAt).apply(lambda x: x.year)
train["month"] = pd.to_datetime(train.publishedAt).apply(lambda x: x.month)
test["month"] = pd.to_datetime(test.publishedAt).apply(lambda x: x.month)
train["hour"] = pd.to_datetime(train.publishedAt).apply(lambda x: x.hour)
test["hour"] = pd.to_datetime(test.publishedAt).apply(lambda x: x.hour)
change_to_Date(train, test, "collection_date", "collectionAt")
train["period"] = (
|
pd.to_datetime(train.collectionAt)
|
pandas.to_datetime
|
import requests, time
from tqdm import tqdm
import pandas as pd
import numpy as np
import spotipy
import re
import yaml
import logging
from spotipy import oauth2
from spotipy import SpotifyException
cid = secret = lfkey = logPath = None # vars for config.yaml
logger = None # global logger
class lfmxtractplus:
def __init__(self,cfgPath):
self.load_cfg(cfgPath)
self.init_logger()
self.authenticate()
def load_cfg(self, yaml_filepath):
"""
Load config vars from yaml
:param yaml_filepath: path to config.yaml
"""
global cid, secret, lfkey, logPath
with open(yaml_filepath, 'r') as stream:
config = yaml.safe_load(stream)
cid = config['sp_cid']
secret = config['sp_secret']
lfkey = config['lf_key']
logPath = config['log_path']
def init_logger(self):
'''
Initialize logger globally
'''
global logPath, logger
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(logPath, 'w', 'utf-8')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
def get_spotify_token(self):
'''
Get OAuth token from spotify.
:return token_info dict
:return sp_oauth object
'''
sp_oauth = oauth2.SpotifyOAuth(client_id=cid, client_secret=secret,
redirect_uri='https://example.com/callback/')
token_info = sp_oauth.get_cached_token()
if not token_info:
auth_url = sp_oauth.get_authorize_url()
print(auth_url)
response = input('Paste the above link into your browser, then paste the redirect url here: ')
code = sp_oauth.parse_response_code(response)
token_info = sp_oauth.get_access_token(code)
return token_info, sp_oauth
def token_refresh(self,token_info, sp_oauth):
'''
Used to refresh OAuth token if token expired
:param token_info dict
:param sp_oauth object
'''
global sp
if sp_oauth._is_token_expired(token_info):
token_info_ref = sp_oauth.refresh_access_token(token_info['refresh_token'])
token_ref = token_info_ref['access_token']
sp = spotipy.Spotify(auth=token_ref)
logger.info("________token refreshed________")
def authenticate(self):
'''
authenticate with spotify
'''
global token_info, sp, sp_oauth
token_info, sp_oauth = self.get_spotify_token() # authenticate with spotify
sp = spotipy.Spotify(auth=token_info['access_token']) # create spotify object globally
def clean_query(self,q):
'''
optimizes queries for spotify for better chance of mapping spotifyID
:param q: query string
:return: optimized query string
'''
def collapse_brackets(text, brackets="()[]"):
count = [0] * (len(brackets) // 2) # count open/close brackets
saved_chars = []
for character in text:
for i, b in enumerate(brackets):
if character == b: # found bracket
kind, is_close = divmod(i, 2)
count[kind] += (-1) ** is_close # `+1`: open, `-1`: close
if count[kind] < 0: # unbalanced bracket
count[kind] = 0 # keep it
else: # found bracket to remove
break
else: # character is not a [balanced] bracket
if not any(count): # outside brackets
saved_chars.append(character)
return ''.join(saved_chars)
s = collapse_brackets(q)
s = re.sub("'", '', s)
return s
#thanks to <NAME> : https://github.com/gboeing/data-visualization/blob/master/lastfm-listening-history/lastfm_downloader.ipynb
def get_scrobbles(self,username, method='recenttracks', timezone='Asia/Kolkata', limit=200, page=1, pages=0):
'''
Retrieves scrobbles from lastfm for a user
:param method: api method
:param username: last.fm username for retrieval
:param timezone: timezone of the user (must correspond with the timezone in user's settings)
:param limit: api lets you retrieve up to 200 records per call
:param page: page of results to start retrieving at
:param pages: how many pages of results to retrieve. if 0, get as many as api can return.
:return dataframe with lastfm scrobbles
'''
# initialize url and lists to contain response fields
print("\nFetching data from last.fm for user " + username)
url = 'https://ws.audioscrobbler.com/2.0/?method=user.get{}&user={}&api_key={}&limit={}&page={}&format=json'
responses = []
artist_names = []
artist_mbids = []
album_names = []
album_mbids = []
track_names = []
track_mbids = []
timestamps = []
# read from loadCFG()
key = lfkey
# make first request, just to get the total number of pages
request_url = url.format(method, username, key, limit, page)
response = requests.get(request_url).json()
# error handling
if 'error' in response:
print("Error code : " + str(response['error']))
logging.critical("Error code : " + str(response['error']))
print("Error message : " + response['message'])
logging.critical("Error message : " + response['message'])
return None
total_pages = int(response[method]['@attr']['totalPages'])
total_scrobbles = int(response[method]['@attr']['total'])
if pages > 0:
total_pages = min([total_pages, pages])
print('\n{} total tracks scrobbled by the user'.format(total_scrobbles))
print('\n{} total pages to retrieve'.format(total_pages))
# request each page of data one at a time
for page in tqdm(range(1, int(total_pages) + 1, 1)):
time.sleep(0.20)
request_url = url.format(method, username, key, limit, page)
responses.append(requests.get(request_url))
# parse the fields out of each scrobble in each page (aka response) of scrobbles
for response in responses:
scrobbles = response.json()
if method in scrobbles.keys():
for scrobble in scrobbles[method]['track']:
# only retain completed scrobbles (aka, with timestamp and not 'now playing')
if 'date' in scrobble.keys():
artist_names.append(scrobble['artist']['#text'])
artist_mbids.append(scrobble['artist']['mbid'])
album_names.append(scrobble['album']['#text'])
album_mbids.append(scrobble['album']['mbid'])
track_names.append(scrobble['name'])
track_mbids.append(scrobble['mbid'])
timestamps.append(scrobble['date']['uts'])
else:
print("Error occured, rerun")
logging.warning("Error occurred")
# create and populate a dataframe to contain the data
df = pd.DataFrame()
df['timestamp'] = timestamps
df['datetime'] = pd.to_datetime(df['timestamp'].astype(int), unit='s')
df['datetime'] = df['datetime'].dt.tz_localize('UTC').dt.tz_convert(timezone)
df['artist_name'] = artist_names
df['artist_mbid'] = artist_mbids
df['album_name'] = album_names
df['album_mbid'] = album_mbids
df['track_name'] = track_names
df['track_mbid'] = track_mbids
return df
def map_to_spotify(self,scrobblesDF):
"""
Maps track names to spotifyID and adds track length,popularity,genre to dataframe.
:param scrobblesDF : lastfm scrobbles dataframe
:return scrobblesDF : dataframe with spotifyID ,track length,popularity,genre
"""
track_ids = []
length = []
pop = []
genre = []
print("\n\nFetching SpotifyID for tracks")
for index, row in tqdm(scrobblesDF.iterrows(), total=scrobblesDF.shape[0]):
#time.sleep(2.5)
try:
artist = self.clean_query(row['artist_name'])
track = self.clean_query(row['track_name'])
searchDict = sp.search(q='artist:' + artist + ' track:' + track, type='track', limit=1,
market='US') # api call
logging.debug("Mapping spotifyID for " + track)
# logging.debug("Mapping spotifyID for " + str(index) + " out of " + str(len(scrobblesDF.index)-1))
if len(searchDict['tracks']['items']) != 0:
track_ids.append(searchDict['tracks']['items'][0]['id'])
length.append(searchDict['tracks']['items'][0]['duration_ms'])
pop.append(searchDict['tracks']['items'][0]['popularity'])
artist_id = searchDict['tracks']['items'][0]['artists'][0]['id']
artist = sp.artist(artist_id) # get genre from artist
try:
genreA = artist['genres'][0] # gets only the first genre from list of genres (may be inaccurate)
genre.append(genreA)
except IndexError:
genre.append(np.nan)
else:
track_ids.append(np.nan)
length.append(np.nan)
pop.append(np.nan)
genre.append(np.nan)
logging.warning("failed to map " + track)
except SpotifyException:
if sp_oauth._is_token_expired(token_info):
self.token_refresh(token_info, sp_oauth) # refresh OAuth token
else:
logging.critical("SpotifyException")
scrobblesDF['trackID'] = pd.Series(track_ids)
scrobblesDF['lengthMS'] = pd.Series(length)
scrobblesDF['popularity'] = pd.Series(pop)
scrobblesDF['genre_name'] = pd.Series(genre)
unmapped_cnt = scrobblesDF['trackID'].isna().sum()
print("\ntracks without spotifyID : " + str(unmapped_cnt))
return scrobblesDF
# todo: [for v2]pass 50 IDs at once in chunks to sp.audio_features to speedup
def map_audio_features(self, scrobblesDF):
'''
Adds track features to dataframe with SpotifyID.
:param scrobblesDF: dataframe with SpotifyID
:return enriched dataframe with audio features
'''
danceabilitySeries = []
energySeries = []
keySeries = []
loudnessSeries = []
modeSeries = []
speechinessSeries = []
acousticnessSeries = []
instrumentalnessSeries = []
livenessSeries = []
valenceSeries = []
tempoSeries = []
print("\nFetching audio features for tracks")
for index, row in tqdm(scrobblesDF.iterrows(), total=scrobblesDF.shape[0]):
try:
logging.debug("Fetching features for " + str(index) + " out of " + str(len(scrobblesDF.index) - 1))
if row['trackID'] is not np.nan:
search_id = [str(row['trackID'])]
feature = sp.audio_features(search_id) # api call
try:
danceabilitySeries.append(feature[0]["danceability"])
energySeries.append(feature[0]["energy"])
keySeries.append(feature[0]["key"])
loudnessSeries.append(feature[0]["loudness"])
modeSeries.append(feature[0]["mode"])
speechinessSeries.append(feature[0]["speechiness"])
acousticnessSeries.append(feature[0]["acousticness"])
livenessSeries.append(feature[0]["liveness"])
valenceSeries.append(feature[0]["valence"])
tempoSeries.append(feature[0]["tempo"])
instrumentalnessSeries.append(feature[0]["instrumentalness"])
except (TypeError, AttributeError, IndexError):
logging.warning("\nTrack feature fetch failed for " + row['track_name'])
danceabilitySeries.append(np.nan)
energySeries.append(np.nan)
keySeries.append(np.nan)
loudnessSeries.append(np.nan)
modeSeries.append(np.nan)
speechinessSeries.append(np.nan)
acousticnessSeries.append(np.nan)
livenessSeries.append(np.nan)
valenceSeries.append(np.nan)
tempoSeries.append(np.nan)
instrumentalnessSeries.append(np.nan)
else:
logging.warning("\nTrack ID not available for " + row['track_name'])
danceabilitySeries.append(np.nan)
energySeries.append(np.nan)
keySeries.append(np.nan)
loudnessSeries.append(np.nan)
modeSeries.append(np.nan)
speechinessSeries.append(np.nan)
acousticnessSeries.append(np.nan)
livenessSeries.append(np.nan)
valenceSeries.append(np.nan)
tempoSeries.append(np.nan)
instrumentalnessSeries.append(np.nan)
continue
except SpotifyException:
if sp_oauth._is_token_expired(token_info):
self.token_refresh(token_info, sp_oauth) # refresh OAuth token
else:
logging.critical("SpotifyException")
scrobblesDF['danceability'] = danceabilitySeries
scrobblesDF['energy'] = energySeries
scrobblesDF['key'] = keySeries
scrobblesDF['loudness'] = loudnessSeries
scrobblesDF['mode'] = modeSeries
scrobblesDF['speechiness'] = speechinessSeries
scrobblesDF['acousticness'] = acousticnessSeries
scrobblesDF['liveness'] = livenessSeries
scrobblesDF['instrumentalness'] = instrumentalnessSeries
scrobblesDF['valence'] = valenceSeries
scrobblesDF['tempo'] = tempoSeries
unmapped_cnt = scrobblesDF['trackID'].isna().sum()
print("tracks without audio features : " + str(unmapped_cnt))
return scrobblesDF
def get_playlist(self, user='billboard.com', playlist_id='6UeSakyzhiEt4NB3UAd6NQ'):
'''
retrives audio features of a playlist (Billboard Hot 100 is the default playlist)
:param user: username of the playlist owner
:param playlist_id: playlist id (found at the end of a playlist url)
:return: a dataframe with audio features of a playlist
'''
trackID = []
track = []
artist = []
artistID = []
genre = []
lengthMS = []
popularity = []
try:
playlist = sp.user_playlist(user=user, playlist_id=playlist_id)
count = playlist['tracks']['total']
print("\n\nFetching playlist")
for i in tqdm(range(count)):
# print('fetching ' + str(i) + ' out of ' + str(count) + ' ' + playlist['tracks']['items'][i]['track']['id'])
trackID.append(playlist['tracks']['items'][i]['track']['id'])
track.append(playlist['tracks']['items'][i]['track']['name'])
lengthMS.append(playlist['tracks']['items'][i]['track']['duration_ms'])
popularity.append(playlist['tracks']['items'][i]['track']['popularity'])
artist.append(playlist['tracks']['items'][i]['track']['artists'][0]['name'])
artistID.append(playlist['tracks']['items'][i]['track']['artists'][0]['id'])
artistOb = sp.artist(artistID[i])
try:
genreA = artistOb['genres'][0]
genre.append(genreA)
except IndexError:
genre.append(None)
except SpotifyException:
if sp_oauth._is_token_expired(token_info):
self.token_refresh(token_info, sp_oauth) # refresh OAuth token
else:
logging.critical("SpotifyException")
playlistDF = pd.DataFrame()
playlistDF['track'] = pd.Series(track)
playlistDF['trackID'] = pd.Series(trackID)
playlistDF['artist'] =
|
pd.Series(artist)
|
pandas.Series
|
import matplotlib
from pcpca import PCPCA
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import sys
from sklearn.decomposition import PCA
from numpy.linalg import slogdet
from scipy import stats
font = {"size": 20}
matplotlib.rc("font", **font)
matplotlib.rcParams["text.usetex"] = True
inv = np.linalg.inv
DATA_PATH = "../../../data/mouse_protein_expression/clean/Data_Cortex_Nuclear.csv"
N_COMPONENTS = 10
N_GD_ITER = 10
LEARNING_RATE = 1e-2
n_repeats = 3
missing_p_range = np.arange(0.1, 0.8, 0.1)
def mean_confidence_interval(data, confidence=0.95):
n = data.shape[0]
m, se = np.mean(data, axis=0), stats.sem(data, axis=0)
width = se * stats.t.ppf((1 + confidence) / 2.0, n - 1)
return width
# Read in data
data = pd.read_csv(DATA_PATH)
# Separate into background and foreground data
# In this case,
# background data is data from mice who did not receive shock therapty
# foreground data is from mice who did receive shock therapy
# Get names of proteins
protein_names = data.columns.values[1:78]
# Fill NAs
data = data.fillna(0)
# Background
Y_df = data[
(data.Behavior == "C/S")
& (data.Genotype == "Control")
& (data.Treatment == "Saline")
]
Y = Y_df[protein_names].values
Y -= np.nanmean(Y, axis=0)
Y /= np.nanstd(Y, axis=0)
Y_full = Y.T
# Foreground
X_df = data[(data.Behavior == "S/C") & (data.Treatment == "Saline")]
X = X_df[protein_names].values
X -= np.nanmean(X, axis=0)
X /= np.nanstd(X, axis=0)
X_full = X.T
p, n = X_full.shape
_, m = Y_full.shape
# import ipdb; ipdb.set_trace()
# n_subsample = 80
# X_full = X_full[:, np.random.choice(np.arange(n), size=n_subsample, replace=False)]
# m_subsample = 80
# Y_full = Y_full[:, np.random.choice(np.arange(m), size=m_subsample, replace=False)]
# rand_idx = np.random.choice(np.arange(p), size=10)
# X_full = X_full[rand_idx, :]
# Y_full = Y_full[rand_idx, :]
p, n = X_full.shape
_, m = Y_full.shape
def abline(slope, intercept):
"""Plot a line from slope and intercept"""
axes = plt.gca()
x_vals = np.array(axes.get_xlim())
y_vals = intercept + slope * x_vals
plt.plot(x_vals, y_vals, "--")
def log_likelihood_fg(X, W, sigma2, gamma):
p, n = X.shape
Ls = [make_L(X[:, ii]) for ii in range(n)]
As = [Ls[ii] @ (W @ W.T + sigma2 * np.eye(p)) @ Ls[ii].T for ii in range(n)]
running_sum_X = 0
for ii in range(n):
L = Ls[ii]
A = As[ii]
x = L @ np.nan_to_num(X[:, ii], nan=0)
Di = L.shape[0]
A_inv = inv(A)
curr_summand = (
Di * np.log(2 * np.pi) + slogdet(A)[1] + np.trace(A_inv @ np.outer(x, x))
)
running_sum_X += curr_summand
LL = -0.5 * running_sum_X
return LL
gamma = 0.9
# missing_p_range = np.arange(0.1, 0.3, 0.1)
imputation_errors_pcpca = np.empty((n_repeats, len(missing_p_range)))
imputation_errors_ppca = np.empty((n_repeats, len(missing_p_range)))
imputation_errors_sample_means = np.empty((n_repeats, len(missing_p_range)))
imputation_errors_feature_means = np.empty((n_repeats, len(missing_p_range)))
# plt.figure(figsize=(7*len(missing_p_range), 6))
for repeat_ii in range(n_repeats):
for ii, missing_p in enumerate(missing_p_range):
# Mask out missing data
X = X_full.copy()
Y = Y_full.copy()
missing_mask_X = np.random.choice(
[0, 1], p=[1 - missing_p, missing_p], size=(p, n)
).astype(bool)
missing_mask_Y = np.random.choice(
[0, 1], p=[1 - missing_p, missing_p], size=(p, m)
).astype(bool)
X[missing_mask_X] = np.nan
Y[missing_mask_Y] = np.nan
X_mean = np.nanmean(X, axis=1)
Y_mean = np.nanmean(Y, axis=1)
# X = (X.T - X_mean).T
# X = (X.T / np.nanstd(X, axis=1)).T
# Y = (Y.T - Y_mean).T
# Y = (Y.T / np.nanstd(Y, axis=1)).T
### ----- Row and column means ------
sample_means = np.nanmean(X, axis=0)
X_imputed_sample_means = X.copy()
X_imputed_sample_means = pd.DataFrame(X_imputed_sample_means).fillna(pd.Series(sample_means)).values
imputation_mse = np.mean(
(X_full[missing_mask_X] - X_imputed_sample_means[missing_mask_X]) ** 2
)
imputation_errors_sample_means[repeat_ii, ii] = imputation_mse
feature_means = np.nanmean(X, axis=1)
X_imputed_feature_means = X.copy()
X_imputed_feature_means = pd.DataFrame(X_imputed_feature_means.T).fillna(pd.Series(feature_means)).values.T
imputation_mse = np.mean(
(X_full[missing_mask_X] - X_imputed_feature_means[missing_mask_X]) ** 2
)
print("Feature means {} missing, error: {}".format(missing_p, imputation_mse))
imputation_errors_feature_means[repeat_ii, ii] = imputation_mse
### ----- PCPCA ------
pcpca = PCPCA(gamma=gamma, n_components=N_COMPONENTS)
W, sigma2 = pcpca.gradient_descent_missing_data(X, Y, n_iter=N_GD_ITER) #, learning_rate=LEARNING_RATE)
X_imputed = pcpca.impute_missing_data(X)
# X_imputed = (X_imputed.T + X_mean).T
imputation_mse = np.mean(
(X_full[missing_mask_X] - X_imputed[missing_mask_X]) ** 2
)
print("PCPCA {} missing, error: {}".format(missing_p, imputation_mse))
imputation_errors_pcpca[repeat_ii, ii] = imputation_mse
### ----- PPCA ------
X = X_full.copy()
Y = Y_full.copy()
X[missing_mask_X] = np.nan
Y[missing_mask_Y] = np.nan
fg = np.concatenate([X, Y], axis=1)
fg_mean = np.nanmean(fg, axis=1)
# fg = (fg.T - fg_mean).T
# fg = (fg.T / np.nanstd(fg, axis=1)).T
pcpca = PCPCA(gamma=0, n_components=N_COMPONENTS)
W, sigma2 = pcpca.gradient_descent_missing_data(fg, Y, n_iter=N_GD_ITER) #, learning_rate=LEARNING_RATE)
X_imputed = pcpca.impute_missing_data(X)
# X_imputed = (X_imputed.T + fg_mean).T
imputation_mse = np.mean(
(X_full[missing_mask_X] - X_imputed[missing_mask_X]) ** 2
)
print("PPCA {} missing, error: {}".format(missing_p, imputation_mse))
imputation_errors_ppca[repeat_ii, ii] = imputation_mse
pcpca_results_df = pd.DataFrame(imputation_errors_pcpca, columns=missing_p_range - 0.015)
pcpca_results_df["method"] = ["PCPCA"] * pcpca_results_df.shape[0]
ppca_results_df =
|
pd.DataFrame(imputation_errors_ppca, columns=missing_p_range - 0.005)
|
pandas.DataFrame
|
# Extract the identified Liver proteins from the Tissue Atlas paper.
# Use the Uniprot webservice to convert from Ensembl protein ID into uniprot accession
import argparse
import urllib.parse
import urllib.request
import pandas as pd
import tqdm
URL = "https://www.uniprot.org/uploadlists/"
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--tissuedb", dest="tissuedb")
parser.add_argument("dst")
args = parser.parse_args()
db =
|
pd.read_excel(args.tissuedb, sheet_name="A. Protein copies")
|
pandas.read_excel
|
import sys
import os
from flask import Flask, request
from pprint import pprint
import json
import nltk
import spacy
import gensim
import sklearn
import keras
import pandas as pd
import numpy as np
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer, SnowballStemmer
from nltk.stem.porter import *
nltk.download('wordnet') # run once
nltk.download('stopwords') # run once
from gensim.parsing.preprocessing import STOPWORDS
from gensim.utils import simple_preprocess
from gensim import corpora, models
from keras.preprocessing.text import text_to_word_sequence
from sklearn.feature_extraction import stop_words
from scipy.spatial import distance
from random import randint
import calendar, datetime
"""
SEARCH_APP: Launch search engine.
Set host and port prior to running.
Requires a corpus or sub-corpus with inferred topic vectors, i.e.:
title raw tokens topics
0 https://en.wikipedia.org/wiki/Graphic_design graphic design is the process of visual commun... [graphic, design, process, visual, commun, pro... [(0, 0.63671833), (1, 0.0), (2, 0.0), (3, 0.29...
1 https://en.wikipedia.org/wiki/Design_fiction design fiction is a design practice aiming at ... [design, fiction, design, practic, aim, explor... [(0, 0.9217787), (1, 0.0), (2, 0.0), (3, 0.076...
2 https://en.wikipedia.org/wiki/Creativity_techn... creativity techniques are methods that encoura... [creativ, techniqu, method, encourag, creativ,... [(0, 0.9970473), (1, 0.0), (2, 0.0), (3, 0.0),...
3 https://en.wikipedia.org/wiki/Jewelry_design jewellery design is the art or profession of d... [jewelleri, design, art, profess, design, crea... [(0, 0.80666345), (1, 0.0), (2, 0.18880607), (...
4 https://en.wikipedia.org/wiki/Benjamin_Franklin <NAME> frs frsa frse january 17 170... [benjamin, franklin, fr, frsa, frse, januari, ... [(0, 0.9998033), (1, 0.0), (2, 0.0), (3, 0.0),...
5 https://en.wikipedia.org/wiki/Strategic_design strategic design is the application of future ... [strateg, design, applic, futur, orient, desig... [(0, 0.45011556), (1, 0.0), (2, 0.0), (3, 0.54...
6 https://en.wikipedia.org/wiki/Activity-centere... activity centered design acd is an extension o... [activ, center, design, acd, extens, human, ce... [(0, 0.6329251), (1, 0.0), (2, 0.0), (3, 0.344...
7 https://en.wikipedia.org/wiki/Architecture architecture latin architectura from the greek... [architectur, latin, architectura, greek, ἀρχι... [(0, 0.9993874), (1, 0.0), (2, 0.0), (3, 0.0),...
8 https://en.wikipedia.org/wiki/Web_developer a web developer is a programmer who specialize... [web, develop, programm, special, specif, enga... [(0, 0.0), (1, 0.0), (2, 0.0), (3, 0.8699879),...
9 https://en.wikipedia.org/wiki/Sonic_interactio... sonic interaction design is the study and expl... [sonic, interact, design, studi, exploit, soun... [(0, 0.8485447), (1, 0.0), (2, 0.0), (3, 0.0),...
10 https://en.wikipedia.org/wiki/Costume_design costume design is the investing of clothing an... [costum, design, invest, cloth, overal, appear... [(0, 0.9970691), (1, 0.0), (2, 0.0), (3, 0.0),...
11 https://en.wikipedia.org/wiki/Software_applica... application software app for short is software... [applic, softwar, app, short, softwar, design,... [(0, 0.0), (1, 0.0), (2, 0.0), (3, 0.9974447),...
12 https://en.wikipedia.org/wiki/Art_Nouveau art nouveau ˌɑːrt nuːˈvoʊ ˌɑːr french aʁ nuv... [art, nouveau, ˌɑːrt, nuːˈvoʊ, ˌɑːr, french, n... [(0, 0.9998343), (1, 0.0), (2, 0.0), (3, 0.0),...
13 https://en.wikipedia.org/wiki/Philosophy_of_de... philosophy of design is the study of definitio... [philosophi, design, studi, definit, design, a... [(0, 0.9634965), (1, 0.0), (2, 0.0), (3, 0.0),...
14 https://en.wikipedia.org/wiki/Environmental_im... environmental impact design eid is the design ... [environment, impact, design, eid, design, dev... [(0, 0.67384595), (1, 0.3187163), (2, 0.0), (3...
This serves as the pool of candidate results.
Query text topics are derived from the pre-loaded model.
The distance between the query's topic probability distribution and that of each of the candidate documents is measured using Jensen-Shannon Distance.
The nearest 1% of candidate documents are returned as results to the user.
These are returned in rank order from closest to furthest in terms of JSD, where the closest is 1.0 and the furthest is 0.0.
This returns to the user the document with the highest relevance in terms of 'topic profile' from among the available candidate documents.
"""
# generate random query id
def rand_id(n):
n_digit_str = ''.join(["{}".format(randint(0, 9)) for num in range(0, n)])
return int(n_digit_str)
# timestamp predictions
def get_ts():
d = datetime.datetime.utcnow()
ts = calendar.timegm(d.timetuple())
return ts
# define stopwords
def default_stop():
# intersection of gensim, nltk, spacy, and sklearn stopword lists
default = ['me', 'inc', 'shan', "needn't", 'she', '‘s', 'therefore', 'find', 'down',
'thereupon', 'without', 'up', 'yourselves', 'many', 'eleven', 'full', 'de', 're',
'wherever', 'on', 'her', 'already', 'through', 'side', 'having', 'together', 't',
'take', "'m", 'therein', 'everyone', 'himself', 'whenever', 'them', "'s", 'once',
'forty', 'only', 'must', 'hereupon', 'moreover', 'my', 'very', 'say', 'whom', 'get',
'eg', 'does', 'll', 'indeed', 'everything', 'couldnt', '’m', 'not', 'each', 'using',
'do', 've', 'cant', 'if', 'various', 'throughout', 'otherwise', 'serious', 'd',
'regarding', 'mustn', 'yourself', 'noone', 'somewhere', 'twenty', 'most', 'thick',
'describe', 'however', 'fire', 'see', 'eight', 'while', 'besides', 'neither', 'well',
'us', 'below', 'is', "won't", 'might', 'mine', 'anywhere', 'weren', "'re", "n't",
'whereupon', 'becomes', 'should', 'hereafter', 'ours', 'during', 'a', 'ltd', 'con',
'isn', 'else', 'whither', 'shouldn', 'why', 'will', 'seems', 'ie', 'every', 'someone',
'bottom', 'ain', 'needn', 'then', 'thin', 'being', 'whereafter', 'via', 'never',
'same', "haven't", 'y', 'behind', 'name', 'give', 'move', 'some', 'six', 'we',
'whole', 'than', 'myself', 'our', "wasn't", 'now', 'whether', "mustn't", 'were',
'still', 'along', 'enough', 'for', 'yours', 'whereby', 'per', 'had', 'next', 'twelve',
"doesn't", 'onto', 'cry', 'seeming', 'are', 'between', 'almost', 'third', 'latter',
'by', 'nevertheless', 'in', 'across', 'though', 'kg', 'somehow', 'out', 'show', 'no',
'either', 'didn', 'computer', '’ve', 'such', 'all', 'both', 'few', "weren't", 'from',
'’d', 'doing', 'alone', 'nan', 'latterly', 's', 'although', 'fifteen', 'hasn', 'own',
'due', 'whereas', 'beyond', "you'd", "shouldn't", 'whose', 'who', 'n’t', 'unless',
'something', "shan't", 'other', 'also', 'they', 'make', 'three', 'been', 'found',
'whoever', 'doesn', 'first', 'made', 'ten', 'seem', '‘ll', 'of', 'your', 'at', 'the',
'where', 'further', 'has', 'former', 'their', 'or', 'four', 'so', 'wherein', 'empty',
'among', 'mill', 'be', 'hasnt', 'used', 'go', 'amongst', 'everywhere', 'fifty',
"hadn't", '’ll', 'you', 'km', 'others', 'this', 'thru', 'may', 'wouldn', 'itself',
"'d", 'please', 'could', 'done', 'several', 'afterwards', 'two', 'becoming', 'those',
'‘ve', 'part', 'hundred', 'system', 'upon', "wouldn't", 'meanwhile', 'thus', '’s',
'herein', 'hadn', 'put', 'toward', 'hers', 'these', 'sometime', 'don', 'nine', 'have',
'won', 'least', 'thereafter', 'often', 'nobody', 'except', 'always', '’re', "you've",
'since', 'elsewhere', 'here', 'wasn', 'as', 'less', 'there', 'one', 'anyone', 'when',
'sometimes', 'its', 'formerly', 'ca', 'thence', 'm', "don't", 'rather', 'but', 'above',
'themselves', 'his', 'haven', 'what', 'too', 'aren', 'keep', "mightn't", 'top', 'he',
'anyhow', 'co', 'around', 'etc', 'about', 'nor', 'anyway', 'hence', '‘d', 'sixty',
'mostly', 'detail', 'anything', 'bill', 'much', "she's", 'ourselves', 'fify', 'that',
'last', 'theirs', 'really', 'back', 'un', 'yet', 'just', 'was', 'an', 'ma', "isn't",
"you'll", "should've", 'until', 'off', 'perhaps', 'beside', 'nowhere', 'mightn',
'sincere', "'ll", "didn't", "it's", 'am', 'again', 'even', 'which', 'front', 'can',
'within', "aren't", 'him', "you're", 'and', 'namely', 'against', '‘re', "that'll",
'with', 'whence', 'five', 'amount', 'o', 'quite', 'call', 'interest', 'none', 'before',
'fill', 'how', 'it', 'ever', 'seemed', 'i', 'because', 'thereby', 'would', '‘m',
'couldn', "couldn't", 'did', "'ve", 'under', 'after', 'more', 'become', 'nothing',
'herself', 'to', 'any', 'over', 'into', "hasn't", 'hereby', 'towards', 'amoungst',
'whatever', 'became', 'n‘t', 'beforehand', 'another', 'cannot']
return default
my_stopwords = default_stop()
# preprocessing functions
stemmer = PorterStemmer()
def lemmatize_stemming(text):
return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v'))
def preprocess(text):
result = []
for token in gensim.utils.simple_preprocess(text):
if token not in my_stopwords and len(token) > 2:
result.append(lemmatize_stemming(token))
return result
def word_split(doc):
words = []
for word in doc.split(' '):
words.append(word)
return words
def flatten_text(doc):
output = ' '.join([w for w in text_to_word_sequence(str(doc))])
return output
def gen_dict_vector(doc):
query_text = doc
tokens = preprocess(doc)
vector = interp_topics(infer_topic(tokens))
source = "Search Bar"
query_id = rand_id(10)
query_ts = get_ts()
q_dict = ({'source': f'{source}', 'query_id': f'{query_id}', 'query_ts':
f'{query_ts}', 'query_text': f'{query_text}', 'tokens': f'{tokens}', 'topics': f'{vector}'})
return q_dict, vector
def gen_json(q_dict):
return json.dumps(q_dict)
def infer_topic(tokens):
dict_new = dictionary.doc2bow(tokens)
vector = model[dict_new]
return vector
def interp_topics(vector):
present = []
for i in vector:
t = i[0]
present.append(t)
all_t = [x for x in range(num_topics)]
missing = [x for x in all_t if x not in present]
if len(missing) > 0:
for i in missing:
missing_i = (i, 0.0)
vector.append(missing_i)
fixed = sorted(vector)
return fixed
def jsdist(p, q):
return distance.jensenshannon(p, q, base=None)
def all_jsd(vector, tp):
aj = []
for i in tp:
j = jsdist(vector, i)
aj.append(j[1])
return aj
def pickle_df(df, pname):
df.to_pickle(pname)
def unpickle_df(pname, df):
new_df = pd.read_pickle(pname)
return new_df
def load_model():
filepath = os.getcwd()
filename_model = filepath + '/' + 'tf-lda.model'
filename_dict = filepath + '/' + 'tf-lda.dict'
model = gensim.models.LdaModel.load(filename_model)
dictionary = corpora.Dictionary.load(filename_dict)
return model, dictionary
def load_compare_docs(pkl_filename):
compare_docs = pkl_filename
tdf = unpickle_df(compare_docs, 'tdf')
tt = tdf['title']
rw = tdf['raw']
tp = tdf['topics']
return tt, rw, tp
def gen_json_results(vector, compare_docs, thresh):
r_titles = compare_docs[0]
r_raw = compare_docs[1]
r_topics = compare_docs[2]
r_distances = all_jsd(vector, r_topics) # measure JSD between vector and all compare_docs
rdf = pd.DataFrame({'title': [x for x in r_titles], 'raw': [x for x in r_raw],
'topics': [x for x in r_topics], 'distances': [x for x in r_distances]})
tt = rdf['title']
rw = rdf['raw']
tp = rdf['topics']
aj = rdf['distances']
pct_val = thresh
pct_thresh = np.percentile(aj, pct_val)
filtered = rdf[rdf['distances'] <= pct_thresh]
filtered = filtered.sort_values(by=['distances'])
tt = filtered['title']
rw = filtered['raw']
tp = filtered['topics']
aj = filtered['distances']
def confidence(n):
pct = abs(n-1)*100
return pct
ajc = aj.map(confidence)
rwf = rw.map(flatten_text)
# sort and jsonify results
results_df =
|
pd.DataFrame({'title': [x for x in tt], 'score': [f'{x:.0f}' for x in ajc], 'text': [x[0:500] for x in rwf], 'topics': [x for x in tp]})
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import pickle
from tqdm import tqdm_notebook
import matplotlib.pyplot as plt
import os
from ml_utils.plot_utils import plot_scatter, get_subplot_rows_cols
def covariate_shift(train, test, categorical_columns, n_samples, iterations = 200, weights_coef = 1, AUC_threshold = 0.8, importance_threshold = 0.9, max_loops = 20, test_size = 0.1, trys_all_influencer=5, calc_sample_weights=True, task_type="CPU", data_dir='', load_cov=False, save_cov=False, plot=True):
""" Select features without Covariate Shift between training and test set using iteratively CatBoostClassifier to identify relation between train and test """
import seaborn as sns
import catboost as cb
from sklearn.model_selection import train_test_split
if not os.path.exists(data_dir + 'cov_shift_features.pkl') or not load_cov:
train_sample = train.sample(n_samples)
train_sample.loc[:,'origin'] = 0
test_sample = test.sample(n_samples)
test_sample.loc[:,'origin'] = 1
combined_train, combined_test = train_test_split(
pd.concat([train_sample.reset_index(drop=True), test_sample.reset_index(drop=True)]),
test_size = test_size,
shuffle = True)
try:
influence_columns = []
count_all_influencer = 0
i = 0
AUC_score = 1
while i < max_loops and AUC_score > AUC_threshold:
x_columns = combined_train.columns.drop(['origin',] + influence_columns)
# Get the indexes for the categorical columns which CatBoost requires to out-perform other algorithms
cat_features_index = [list(x_columns).index(col) for col in categorical_columns if col in list(x_columns)]
# Do the feature selection once and only try again if no feature is selected
cov_shift_feature_selection = []
while len(cov_shift_feature_selection) == 0 and count_all_influencer < trys_all_influencer:
if count_all_influencer > 0:
print("Try again because model has set any feature as influencer")
cov_shift_model = cb.CatBoostClassifier(iterations = iterations,
eval_metric = "AUC",
cat_features = cat_features_index,
task_type = task_type,
verbose = False
)
cov_shift_feature_selection, df_cov_shift_feature_selection = shadow_feature_selection(
cov_shift_model,
combined_train['origin'], combined_train[x_columns],
need_cat_features_index=True, categorical_columns=categorical_columns,
collinear_threshold = 1,
n_iterations_mean = 1, times_no_change_features = 1
)
count_all_influencer += 1
if count_all_influencer == trys_all_influencer:
cov_shift_feature_selection = list(x_columns)
# Get the indexes for the categorical columns which CatBoost requires to out-perform other algorithms
cat_features_index = [cov_shift_feature_selection.index(col) for col in categorical_columns if col in cov_shift_feature_selection]
params = {'iterations' : 2*iterations, 'learning_rate' : 0.05, 'depth' : 6}
cov_shift_model = cb.CatBoostClassifier(iterations = iterations,
eval_metric = "AUC",
cat_features = cat_features_index,
scale_pos_weight = combined_train['origin'].value_counts()[0] / combined_train['origin'].value_counts()[1],
task_type = task_type,
verbose = False
)
cov_shift_model.set_params(**params)
cov_shift_model.fit(combined_train.drop('origin', axis = 1)[cov_shift_feature_selection],
combined_train['origin'],
eval_set = (combined_test.drop('origin', axis = 1)[cov_shift_feature_selection], combined_test['origin']),
use_best_model = True,
#sample_weight = sample_weight,
#early_stopping_rounds = True,
plot = False,
verbose = False)
AUC_score = cov_shift_model.get_best_score()['validation']['AUC']
print(f"Model score AUC of {AUC_score} on test")
# Remove the features which cumulative importance is relevant to predict origin of data (train or test)
if count_all_influencer != trys_all_influencer:
df_cov_shift_importance = pd.DataFrame(cov_shift_model.feature_importances_, columns = ['importance'], index = cov_shift_feature_selection)
df_cov_shift_importance['cumulative_importance'] = df_cov_shift_importance['importance'].cumsum() / df_cov_shift_importance['importance'].sum()
new_influence_columns = list(df_cov_shift_importance[df_cov_shift_importance['cumulative_importance'] < importance_threshold].index)
influence_columns = influence_columns + new_influence_columns
print(f"New {len(new_influence_columns)} columns will be removed from model: ", new_influence_columns)
print()
count_all_influencer = 0
i = i + 1
finally:
print()
print(f"Due to difference of influence of features to distinguish between data and submission, {len(influence_columns)} columns are removed:")
print(influence_columns)
if calc_sample_weights:
print("Calculating weights for each training sample")
probs = cov_shift_model.predict_proba(train[cov_shift_model.feature_names_])[:, 1] #calculating the probability
#print("Plot Train AUC")
#plot_roc_auc(pd.Serie(1,index = train.index), probs)
sample_weight = -np.log(probs)
sample_weight /= max(sample_weight) # Normalizing the weights
sample_weight = 1 + weights_coef * sample_weight
if plot:
plt.xlabel('Computed sample weight')
plt.ylabel('# Samples')
sns.distplot(sample_weight, kde=False)
if save_cov:
with open(data_dir + 'cov_shift_features.pkl', 'wb') as file:
print("Saving data in ", data_dir + 'cov_shift_features.pkl')
pickle.dump(influence_columns, file)
else:
print("Loading influence columns from ",data_dir)
with open(data_dir + 'cov_shift_features.pkl', 'rb') as file:
influence_columns = pickle.load(file)
cov_shift_model = None
sample_weight = [1,] * len(train)
return influence_columns, cov_shift_model, sample_weight
def stadistic_difference_distributions(data, submission, time_column, test_percentage=0.2, p_value_threshold=None,
verbose=False):
""" Calculate relation between initial and end part of the dataset for each column using Kolmogorov-Smirnov statistic on 2 samples """
from scipy import stats
from sklearn.model_selection import train_test_split
train, test = train_test_split(data.sort_values(time_column), test_size=test_percentage, shuffle=False)
time_analysis_df = pd.DataFrame(False, columns=['train_test', 'train_submission', 'test_submission'],
index=submission.columns.values)
for col in tqdm_notebook(submission.columns.values):
try:
KS_stat_test, p_value_test = stats.ks_2samp(train[col], test[col])
KS_stat_submission, p_value_submission = stats.ks_2samp(train[col], submission[col])
KS_stat_test_submission, p_value_test_submission = stats.ks_2samp(test[col], submission[col])
time_analysis_df.loc[col] = [p_value_test, p_value_submission, p_value_test_submission]
if verbose:
if p_value_test <= p_value_threshold or p_value_submission <= p_value_threshold or p_value_test_submission <= p_value_threshold:
print_s = f'Column {col} has different distribution'
if p_value_test <= p_value_threshold:
print_s = print_s + ' // train <--> test'
if p_value_submission <= p_value_threshold:
print_s = print_s + ' // train <--> submission'
if p_value_test_submission <= p_value_threshold:
print_s = print_s + ' // test <--> submission'
print(print_s)
except TypeError:
time_analysis_df.loc[col] = [np.nan, np.nan, np.nan]
if p_value_threshold == None:
cond1 = time_analysis_df['train_test'] == 0
cond2 = time_analysis_df['train_submission'] == 0
cond3 = time_analysis_df['test_submission'] == 0
else:
cond1 = time_analysis_df['train_test'] <= p_value_threshold
cond2 = time_analysis_df['train_submission'] <= p_value_threshold
cond3 = time_analysis_df['test_submission'] <= p_value_threshold
cols_to_remove = list(time_analysis_df[cond1 | cond2 | cond3].index)
return time_analysis_df, cols_to_remove
def outliers_analysis(full_data, features_names=None, x_column=None, subplot_rows=None, subplot_cols=None, starting_index=0,
index_offset=0, z_score_threshold=3.5, use_mean=False, plot=True, num_bins=50):
""" Calculate and visualize outliers analysis from Modified Z-score with MAD """
# Compatibility with numpy arrays
if type(full_data) == np.ndarray:
assert len(full_data.shape) <= 2
if len(full_data.shape) == 1:
columns = ['feature']
else:
columns = ['feature_'+str(i) for i in range(full_data.shape[-1])]
full_data = pd.DataFrame(full_data, columns=columns)
# Features not provided, use all the columns
if features_names is None:
features_names = list(full_data.columns)
if plot:
# Set a good relation rows/cols for the plot if not specified
if subplot_rows is None or subplot_cols is None:
subplot_rows, subplot_cols = get_subplot_rows_cols(len(features_names), [3,4,5])
# Resize for better visualization of subplots
plt.rcParams['figure.figsize'] = [subplot_cols * 5, subplot_rows * 4]
fig, axes = plt.subplots(subplot_rows, subplot_cols, sharex=False, sharey=False)
outliers_pd = full_data.copy()
outliers_summary = {}
i = starting_index
while i < len(features_names):
feature_name = features_names[i]
data = outliers_pd.loc[outliers_pd[feature_name].notnull(), feature_name]
# Modified Z-score with MAD (Median Absolute Deviation)
if use_mean:
outliers_pd.loc[outliers_pd[feature_name].notnull(), feature_name + '_zscore'] = 0.6745 * (data - data.mean()).abs() / (
data - data.mean()).abs().mean()
else:
outliers_pd.loc[outliers_pd[feature_name].notnull(), feature_name + '_zscore'] = 0.6745 * (data - data.median()).abs() / (
data - data.median()).abs().median()
outliers_pd[feature_name + '_zscore_outliers'] = outliers_pd[feature_name + '_zscore'] > z_score_threshold
if plot:
# Take into account the case of only one plot
if subplot_rows * subplot_cols == 1:
ax = axes
elif subplot_rows == 1:
ax = axes[(i + index_offset) % subplot_cols]
else:
ax = axes[(i + index_offset) // subplot_cols, (i + index_offset) % subplot_cols]
# If X_column provided plot scatter, otherwise histogram
if x_column is None:
bins = np.linspace(data.min(), data.max(), num_bins)
ax.hist(data[~outliers_pd[feature_name + '_zscore_outliers']], bins=bins, density=False)
ax.hist(data[outliers_pd[feature_name + '_zscore_outliers']], bins=bins, density=False)
ax.set_title(feature_name)
else:
plot_scatter(outliers_pd[outliers_pd[feature_name].notnull()], x_column=x_column, y_column=feature_name,
axes=ax, highlight_column=feature_name + '_zscore_outliers')
outliers_percentage = 100 * outliers_pd[feature_name + '_zscore_outliers'].sum() / outliers_pd[
feature_name + '_zscore_outliers'].count()
outliers_summary[feature_name] = outliers_percentage
print("Feature: ", feature_name, " - Percentage of outliers using modified Z-score approach is: ",
np.round(outliers_percentage, 2), "%")
i = i + 1
if plot:
fig.tight_layout()
# Resize to original settings
plt.rcParams['figure.figsize'] = [10, 6]
outliers_summary =
|
pd.DataFrame.from_dict(outliers_summary, orient='index', columns=['Percentage'])
|
pandas.DataFrame.from_dict
|
#!/usr/bin/env/python
import os
import glob
import logging
import collections
import numpy as np
import pandas as pd
import yaml
import _utils as ikea_utils
from mathtools import utils, pose
from kinemparse.assembly import Assembly
CAMERA_NAMES = ('upper', 'lower')
POSE_VAR_NAMES = ['p_x', 'p_y', 'p_z', 'q_x', 'q_y', 'q_z', 'q_w']
logger = logging.getLogger(__name__)
def collectMarkerPoses(marker_sample_seqs, marker_keys):
marker_sample_seqs = [marker_sample_seqs[k] for k in marker_keys]
min_len = min(x.shape[0] for x in marker_sample_seqs)
for i in range(len(marker_sample_seqs)):
num_samples = marker_sample_seqs[i].shape[0]
if min_len < num_samples:
marker_sample_seqs[i] = marker_sample_seqs[i][:min_len, :]
logger.info(
f"Truncated seq of len {num_samples} "
f"to {marker_sample_seqs[i].shape[0]}"
)
marker_index_seqs = tuple(seq[:, :1] for seq in marker_sample_seqs)
marker_pose_seqs = tuple(seq[:, 1:] for seq in marker_sample_seqs)
return marker_index_seqs, marker_pose_seqs
def cameraIndices(frame_idx_seqs, marker_keys, camera_name):
frame_indices = np.hstack(tuple(
seq for seq, k in zip(frame_idx_seqs, marker_keys)
if k[0] == camera_name
))
if not np.all(frame_indices == frame_indices[:, 0:1]):
# import pdb; pdb.set_trace()
raise AssertionError()
return frame_indices[:, 0]
def readFrameFns(fn, names_as_int=False):
frame_fns = pd.read_csv(fn, header=None).iloc[:, 0].tolist()
if names_as_int:
frame_fns = np.array([int(fn.strip('.png').strip('frame')) for fn in frame_fns])
return frame_fns
def partLabelsToHoleLabels(labels, ignore_sibling_holes=True):
correct_connections = (
(('left', 1), ('frontbeam', 1)),
(('left', 2), ('backbeam', 1)),
(('left', 3), ('backrest', 1)),
(('right', 1), ('frontbeam', 2)),
(('right', 2), ('backbeam', 2)),
(('right', 3), ('backrest', 2)),
(('cushion', 1), ('frontbeam', 3)),
(('cushion', 2), ('backbeam', 3)),
)
correct_connections += tuple((rhs, lhs) for lhs, rhs in correct_connections)
if ignore_sibling_holes:
part_pairs_to_hole_pairs = {
(f'{part1}', f'{part2}'): ((f'{part1}_hole_{hole1}', f'{part2}_hole_{hole2}'),)
for (part1, hole1), (part2, hole2) in correct_connections
}
else:
part_pairs_to_hole_pairs = {
(f'{part1}', f'{part2}'): tuple(
(f'{part1}_hole_{hole1}_{i}', f'{part2}_hole_{hole2}_{i}')
for i in (1, 2)
)
for (part1, hole1), (part2, hole2) in correct_connections
}
rows = []
for i, (start_idx, end_idx, action, part1, part2) in labels.iterrows():
part_pairs = part_pairs_to_hole_pairs[part1, part2]
for part1, part2 in part_pairs:
rows.append([start_idx, end_idx, action, part1, part2])
labels =
|
pd.DataFrame(data=rows, columns=labels.columns)
|
pandas.DataFrame
|
# Data Collection and Updating CSV files
import ssl
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
import yfinance as yf
from pytrends.request import TrendReq
import requests
import json
import csv
from bs4 import BeautifulSoup
file = 'ITK Cases.csv'
# ignore ssl errors when retrieving HTML or JSON files
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# create list of all dates up that day
current = datetime.now()
df =
|
pd.read_csv(file)
|
pandas.read_csv
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 12 16:53:59 2018
@author: xavier.qiu
"""
import pandas as pd
import datetime
import gc
import numpy as np
import featuretools as ft
import os
from util.util import compress_int, send_msg
class DataSet(object):
def __init__(self, data_dir='/content/EloMerchantKaggle/data/'):
self.data_dir = data_dir
self.train_x_path = os.path.join(self.data_dir, 'x_train_agg')
self.test_x_path = os.path.join(self.data_dir, 'x_test_agg')
self.train_y_path = os.path.join(self.data_dir, 'y_train')
pass
def get_train_dataset(self, reset=False, load=True):
if load and os.path.isfile(self.train_x_path) and os.path.isfile(self.train_y_path):
return pd.read_csv(self.train_x_path), pd.read_csv(self.train_y_path)
train_df, hist_df_train, new_trans_df_train = split_trans_into_train_test(data_dir=self.data_dir,
reset=reset)
return agg(train_df, hist_df_train, new_trans_df_train, True, self.train_x_path, self.train_y_path)
def get_test_dataset(self, load=True):
if load and os.path.isfile(self.test_x_path):
return pd.read_csv(self.test_x_path), None
print("loading test.csv ...")
d = {'feature_1': np.uint8, 'feature_2': np.uint8, 'feature_3': np.bool_}
test_df = pd.read_csv(os.path.join(self.data_dir, "test.csv"), parse_dates=["first_active_month"], dtype=d)
test_df.info(memory_usage='deep')
hist_df_test = pd.read_csv(os.path.join(self.data_dir, "historical_transactions_test.csv"),
parse_dates=["purchase_date"])
hist_df_test = compress_int(hist_df_test)
new_trans_df_test = pd.read_csv(os.path.join(self.data_dir, "new_merchant_transactions_test.csv"),
parse_dates=["purchase_date"])
new_trans_df_test = compress_int(new_trans_df_test)
send_msg("load done")
return agg(test_df, hist_df_test, new_trans_df_test, False, self.test_x_path, None)
def agg(train_df, hist_df, new_trans_df, isTrain, x_save_path, y_save_path):
train_df = train_df.copy(deep=True)
if isTrain:
target = train_df['target']
del train_df['target']
else:
target = None
es_train = ft.EntitySet(id='es_train')
es_train = es_train.entity_from_dataframe(entity_id='train', dataframe=train_df,
index='', time_index='first_active_month')
es_train = es_train.entity_from_dataframe(entity_id='history', dataframe=hist_df,
index='', time_index='purchase_date')
es_train = es_train.entity_from_dataframe(entity_id='new_trans', dataframe=new_trans_df,
index='', time_index='purchase_date')
# Relationship between clients and previous loans
r_client_previous = ft.Relationship(es_train['train']['card_id'],
es_train['history']['card_id'])
# Add the relationship to the entity set
es_train = es_train.add_relationship(r_client_previous)
r_client_previous = ft.Relationship(es_train['train']['card_id'],
es_train['new_trans']['card_id'])
# Add the relationship to the entity set
es_train = es_train.add_relationship(r_client_previous)
print(" dfs ing ... ")
x_train, _ = ft.dfs(entityset=es_train,
target_entity='train',
max_depth=2)
send_msg("dfs done! ")
print("saving...")
if target:
target.to_csv(y_save_path)
x_train['index'] = target.index
x_train.set_index('index')
x_train.to_csv(x_save_path)
return x_train, target
def split_trans_into_train_test(data_dir='/content/EloMerchantKaggle/data/', reset=False):
d = {'feature_1': np.uint8, 'feature_2': np.uint8, 'feature_3': np.bool_}
print("loading train.csv ...")
train_df = pd.read_csv(os.path.join(data_dir, "train.csv"), parse_dates=["first_active_month"], dtype=d)
train_df.info(memory_usage='deep')
if not reset and os.path.isfile(os.path.join(data_dir, "historical_transactions_train.csv")) and os.path.isfile(
os.path.join(data_dir, "new_merchant_transactions_train.csv")):
hist_df_train = pd.read_csv(os.path.join(data_dir, "historical_transactions_train.csv"),
parse_dates=["purchase_date"])
hist_df_train = compress_int(hist_df_train)
new_trans_df_train = pd.read_csv(os.path.join(data_dir, "new_merchant_transactions_train.csv"),
parse_dates=["purchase_date"])
new_trans_df_train = compress_int(new_trans_df_train)
send_msg("load done")
return train_df, hist_df_train, new_trans_df_train
pass
print("loading test.csv ...")
test_df = pd.read_csv(os.path.join(data_dir, "test.csv"), parse_dates=["first_active_month"], dtype=d)
test_df.info(memory_usage='deep')
print("loading historical_transactions.csv ...")
hist_df = pd.read_csv(os.path.join(data_dir, "historical_transactions.csv"), parse_dates=["purchase_date"])
print(' compressing ...')
hist_df = compressByDType(hist_df)
print(' split to get train hist ...')
hist_df_train = hist_df[hist_df.card_id.isin(set(train_df['card_id'].unique()))]
print(' saving ... ')
hist_df_train.to_csv(os.path.join(data_dir, "historical_transactions_train.csv"))
print(' split to get test hist ...')
hist_df_test = hist_df[hist_df.card_id.isin(set(test_df['card_id'].unique()))]
print(' saving ... ')
hist_df_test.to_csv(os.path.join(data_dir, "historical_transactions_test.csv"))
del hist_df_test
del hist_df
gc.collect()
print("loading new_merchant_transactions.csv ...")
new_trans_df = pd.read_csv(os.path.join(data_dir, "new_merchant_transactions.csv"),
parse_dates=["purchase_date"])
print(' compressing ...')
new_trans_df = compressByDType(new_trans_df)
print(' split to get train new trans ...')
new_trans_df_train = new_trans_df[new_trans_df.card_id.isin(set(train_df['card_id'].unique()))]
print(' saving ... ')
new_trans_df_train.to_csv(os.path.join(data_dir, "new_merchant_transactions_train.csv"))
print(' split to get test new trans ...')
new_trans_df_test = new_trans_df[new_trans_df.card_id.isin(set(test_df['card_id'].unique()))]
print(' saving ... ')
new_trans_df_test.to_csv(os.path.join(data_dir, "new_merchant_transactions_test.csv"))
del new_trans_df_test
del new_trans_df
gc.collect()
send_msg("split and save done")
return train_df, hist_df_train, new_trans_df_train
def agg2(df_train, df_test, df_hist_trans):
aggs = {}
for col in ['month', 'hour', 'weekofyear', 'dayofweek', 'year', 'subsector_id', 'merchant_category_id']:
aggs[col] = ['nunique']
aggs['purchase_amount'] = ['sum', 'max', 'min', 'mean', 'var']
aggs['installments'] = ['sum', 'max', 'min', 'mean', 'var']
aggs['purchase_date'] = ['max', 'min']
aggs['month_lag'] = ['max', 'min', 'mean', 'var']
aggs['month_diff'] = ['mean']
aggs['authorized_flag'] = ['sum', 'mean']
aggs['weekend'] = ['sum', 'mean']
aggs['category_1'] = ['sum', 'mean']
aggs['card_id'] = ['size']
for col in ['category_2', 'category_3']:
df_hist_trans[col + '_mean'] = df_hist_trans.groupby([col])['purchase_amount'].transform('mean')
aggs[col + '_mean'] = ['mean']
new_columns = get_new_columns('hist', aggs)
df_hist_trans_group = df_hist_trans.groupby('card_id').agg(aggs)
df_hist_trans_group.columns = new_columns
df_hist_trans_group.reset_index(drop=False, inplace=True)
df_hist_trans_group['hist_purchase_date_diff'] = (
df_hist_trans_group['hist_purchase_date_max'] - df_hist_trans_group['hist_purchase_date_min']).dt.days
df_hist_trans_group['hist_purchase_date_average'] = df_hist_trans_group['hist_purchase_date_diff'] / \
df_hist_trans_group['hist_card_id_size']
df_hist_trans_group['hist_purchase_date_uptonow'] = (
datetime.datetime.today() - df_hist_trans_group['hist_purchase_date_max']).dt.days
df_train = df_train.merge(df_hist_trans_group, on='card_id', how='left')
df_test = df_test.merge(df_hist_trans_group, on='card_id', how='left')
del df_hist_trans_group
gc.collect()
return df_train, df_test
def get_new_columns(name, aggs):
return [name + '_' + k + '_' + agg for k in aggs.keys() for agg in aggs[k]]
def compressByDType(df_new_merchant_trans):
"""
:param df_new_merchant_trans:
:return:
"""
df_new_merchant_trans = df_new_merchant_trans.drop(columns=['merchant_id'])
df_new_merchant_trans['category_2'].fillna(1.0, inplace=True)
df_new_merchant_trans['category_3'].fillna('D', inplace=True)
df_new_merchant_trans['authorized_flag'].fillna('Y', inplace=True)
df_new_merchant_trans['authorized_flag'] = df_new_merchant_trans['authorized_flag'].map({'Y': 1, 'N': 0})
df_new_merchant_trans['category_1'] = df_new_merchant_trans['category_1'].map({'Y': 1, 'N': 0})
df_new_merchant_trans['category_3'] = df_new_merchant_trans['category_3'].map({'A': 0, 'B': 1, 'C': 2, 'D': 3})
df_new_merchant_trans['category_1'] = pd.to_numeric(df_new_merchant_trans['category_1'], downcast='integer')
df_new_merchant_trans['category_2'] = pd.to_numeric(df_new_merchant_trans['category_2'], downcast='integer')
df_new_merchant_trans['category_3'] = pd.to_numeric(df_new_merchant_trans['category_3'], downcast='integer')
df_new_merchant_trans['merchant_category_id'] = pd.to_numeric(df_new_merchant_trans['merchant_category_id'],
downcast='integer')
df_new_merchant_trans['authorized_flag'] = pd.to_numeric(df_new_merchant_trans['authorized_flag'],
downcast='integer')
df_new_merchant_trans['city_id'] = pd.to_numeric(df_new_merchant_trans['city_id'], downcast='integer')
df_new_merchant_trans['installments'] = pd.to_numeric(df_new_merchant_trans['installments'], downcast='integer')
df_new_merchant_trans['state_id'] = pd.to_numeric(df_new_merchant_trans['state_id'], downcast='integer')
df_new_merchant_trans['subsector_id'] = pd.to_numeric(df_new_merchant_trans['subsector_id'], downcast='integer')
df_new_merchant_trans['month_lag'] =
|
pd.to_numeric(df_new_merchant_trans['month_lag'], downcast='integer')
|
pandas.to_numeric
|
import requests
from typing import List
import re
# from nciRetriever.updateFC import updateFC
# from nciRetriever.csvToArcgisPro import csvToArcgisPro
# from nciRetriever.geocode import geocodeSites
# from nciRetriever.createRelationships import createRelationships
# from nciRetriever.zipGdb import zipGdb
# from nciRetriever.updateItem import update
# from nciRetriever.removeTables import removeTables
from datetime import date
import pandas as pd
import logging
from urllib.parse import urljoin
import json
import time
import sys
import os
from pprint import pprint
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
today = date.today()
# nciThesaurus = pd.read_csv('thesaurus.csv')
# uniqueMainDiseasesDf = pd.read_csv('nciUniqueMainDiseasesReference.csv')
# uniqueSubTypeDiseasesDf = pd.read_csv('nciUniqueSubTypeDiseasesReference.csv')
# uniqueDiseasesWithoutSynonymsDf = pd.read_csv('nciUniqueDiseasesWithoutSynonymsReference.csv')
def createTrialDict(trial: dict) -> dict:
trialDict = {'nciId': trial['nci_id'],
'protocolId': trial['protocol_id'],
'nctId': trial['nct_id'],
'detailDesc': trial['detail_description'],
'officialTitle': trial['official_title'],
'briefTitle': trial['brief_title'],
'briefDesc': trial['brief_summary'],
'phase': trial['phase'],
'leadOrg': trial['lead_org'],
'amendmentDate': trial['amendment_date'],
'primaryPurpose': trial['primary_purpose'],
'currentTrialStatus': trial['current_trial_status'],
'startDate': trial['start_date']}
if 'completion_date' in trial.keys():
trialDict.update({'completionDate': trial['completion_date']})
if 'active_sites_count' in trial.keys():
trialDict.update({'activeSitesCount': trial['active_sites_count']})
if 'max_age_in_years' in trial['eligibility']['structured'].keys():
trialDict.update({'maxAgeInYears': int(trial['eligibility']['structured']['max_age_in_years'])})
if 'min_age_in_years' in trial['eligibility']['structured'].keys():
trialDict.update({'minAgeInYears': int(trial['eligibility']['structured']['min_age_in_years']) if trial['eligibility']['structured']['min_age_in_years'] is not None else None})
if 'gender' in trial['eligibility']['structured'].keys():
trialDict.update({'gender': trial['eligibility']['structured']['gender']})
if 'accepts_healthy_volunteers' in trial['eligibility']['structured'].keys():
trialDict.update({'acceptsHealthyVolunteers': trial['eligibility']['structured']['accepts_healthy_volunteers']})
if 'study_source' in trial.keys():
trialDict.update({'studySource': trial['study_source']})
if 'study_protocol_type' in trial.keys():
trialDict.update({'studyProtocolType': trial['study_protocol_type']})
if 'record_verification_date' in trial.keys():
trialDict.update({'recordVerificationDate': trial['record_verification_date']})
return trialDict
def createSiteDict(trial:dict, site:dict) -> dict:
siteDict = {'nciId': trial['nci_id'],
'orgStateOrProvince': site['org_state_or_province'],
'contactName': site['contact_name'],
'contactPhone': site['contact_phone'],
'recruitmentStatusDate': site['recruitment_status_date'],
'orgAddressLine1': site['org_address_line_1'],
'orgAddressLine2': site['org_address_line_2'],
'orgVa': site['org_va'],
'orgTty': site['org_tty'],
'orgFamily': site['org_family'],
'orgPostalCode': site['org_postal_code'],
'contactEmail': site['contact_email'],
'recruitmentStatus': site['recruitment_status'],
'orgCity': site['org_city'],
'orgEmail': site['org_email'],
'orgCountry': site['org_country'],
'orgFax': site['org_fax'],
'orgPhone': site['org_phone'],
'orgName': site['org_name']
}
# if 'org_coordinates' in site.keys():
# siteDict['lat'] = site['org_coordinates']['lat']
# siteDict['long'] = site['org_coordinates']['lon']
return siteDict
def createBiomarkersDicts(trial:dict, marker:dict) -> List[dict]:
parsedBiomarkers = []
for name in [*marker['synonyms'], marker['name']]:
biomarkerDict = {
'nciId': trial['nci_id'],
'nciThesaurusConceptId': marker['nci_thesaurus_concept_id'],
'name': name,
'assayPurpose': marker['assay_purpose']
}
if 'eligibility_criterion' in marker.keys():
biomarkerDict.update({'eligibilityCriterion': marker['eligibility_criterion']})
if 'inclusion_indicator' in marker.keys():
biomarkerDict.update({'inclusionIndicator': marker['inclusion_indicator']})
parsedBiomarkers.append(biomarkerDict)
return parsedBiomarkers
def createMainBiomarkersDict(trial:dict, marker:dict) -> dict:
parsedBiomarker = {
'nciId': trial['nci_id'],
'nciThesaurusConceptId': marker['nci_thesaurus_concept_id'],
'name': marker['name'],
'assayPurpose': marker['assay_purpose'],
}
if 'eligibility_criterion' in marker.keys():
parsedBiomarker.update({'eligibilityCriterion': marker['eligibility_criterion']})
if 'inclusion_indicator' in marker.keys():
parsedBiomarker.update({'inclusionIndicator': marker['inclusion_indicator']})
return parsedBiomarker
def createDiseasesDicts(trial:dict, disease:dict) -> List[dict]:
parsedDiseases = []
try:
names = [disease['name']]
if 'synonyms' in disease.keys():
names.extend(disease['synonyms'])
except KeyError:
logger.error(f'Invalid key for diseases. Possible keys: {disease.keys()}')
return parsedDiseases
for name in names:
diseaseDict = {
'inclusionIndicator': disease['inclusion_indicator'],
'isLeadDisease': disease['is_lead_disease'],
'name': name,
'nciThesaurusConceptId': disease['nci_thesaurus_concept_id'],
'nciId': trial['nci_id']
}
parsedDiseases.append(diseaseDict)
return parsedDiseases
def createMainToSubTypeRelDicts(trial:dict, disease:dict) -> List[dict]:
if 'subtype' not in disease['type']:
return []
relDicts = []
for parent in disease['parents']:
relDicts.append({
'maintype': parent,
'subtype': disease['nci_thesaurus_concept_id']
})
return relDicts
def createDiseasesWithoutSynonymsDict(trial:dict, disease:dict) -> dict:
# diseaseDict = {
# 'nciId': trial['nci_id'],
# 'inclusionIndicator': disease['inclusion_indicator'],
# 'isLeadDisease': disease['is_lead_disease'],
# 'nciThesaurusConceptId': disease['nci_thesaurus_concept_id']
# }
# correctDisease = uniqueDiseasesWithoutSynonymsDf.loc[uniqueDiseasesWithoutSynonymsDf['nciThesaurusConceptId'] == disease['nci_thesaurus_concept_id']]
# if correctDisease.empty:
# logger.error('Disease not found in full reference. Aborting insertion...')
# return {}
# # logger.debug(correctDisease['name'].values[0])
# # time.sleep(2)
# diseaseDict.update({
# 'name': correctDisease['name'].values[0]
# })
# return diseaseDict
try:
return {
'nciId': trial['nci_id'],
'name': disease['name'],
'isLeadDisease': disease['is_lead_disease'],
'nciThesaurusConceptId': disease['nci_thesaurus_concept_id'],
'inclusionIndicator': disease['inclusion_indicator']
}
except KeyError:
logger.error('Invalid key for main diseases. Not adding to list...')
return {}
def createMainDiseasesDict(trial:dict, disease:dict) -> dict:
# diseaseDict = {
# 'nciId': trial['nci_id'],
# 'inclusionIndicator': disease['inclusion_indicator'],
# 'isLeadDisease': disease['is_lead_disease'],
# 'nciThesaurusConceptId': disease['nci_thesaurus_concept_id']
# }
# correctDisease = uniqueMainDiseasesDf.loc[uniqueMainDiseasesDf['nciThesaurusConceptId'] == disease['nci_thesaurus_concept_id']]
# if correctDisease.empty:
# return {}
# diseaseDict.update({
# 'name': correctDisease['name'].values[0]
# })
# return diseaseDict
# if 'type' not in disease.keys():
# return {}
if 'maintype' not in disease['type']:
return {}
try:
return {
'nciId': trial['nci_id'],
'name': disease['name'],
'isLeadDisease': disease['is_lead_disease'],
'nciThesaurusConceptId': disease['nci_thesaurus_concept_id'],
'inclusionIndicator': disease['inclusion_indicator']
}
except KeyError:
logger.error('Invalid key for main diseases. Not adding to list...')
return {}
def createSubTypeDiseasesDict(trial:dict, disease:dict) -> dict:
# diseaseDict = {
# 'nciId': trial['nci_id'],
# 'inclusionIndicator': disease['inclusion_indicator'],
# 'isLeadDisease': disease['is_lead_disease'],
# 'nciThesaurusConceptId': disease['nci_thesaurus_concept_id']
# }
# correctDisease = uniqueSubTypeDiseasesDf.loc[uniqueSubTypeDiseasesDf['nciThesaurusConceptId'] == disease['nci_thesaurus_concept_id']]
# if correctDisease.empty:
# return {}
# diseaseDict.update({
# 'name': correctDisease['name'].values[0]
# })
# return diseaseDict
# if 'type' not in disease.keys():
# return {}
if 'subtype' not in disease['type']:
return {}
try:
return {
'nciId': trial['nci_id'],
'name': disease['name'],
'isLeadDisease': disease['is_lead_disease'],
'nciThesaurusConceptId': disease['nci_thesaurus_concept_id'],
'inclusionIndicator': disease['inclusion_indicator']
}
except KeyError:
logger.error('Invalid key for subtype diseases. Not adding to list...')
return {}
def createArmsDict(trial:dict, arm:dict) -> dict:
parsedArm = re.sub(r'\(.+\)', '', arm['name'])
parsedArm = re.sub(r'\s+', '_', parsedArm.strip())
return {
'nciId': trial['nci_id'],
'name': arm['name'],
'nciIdWithName': f'{trial["nci_id"]}_{parsedArm}',
'description': arm['description'],
'type': arm['type']
}
def createInterventionsDicts(trial:dict, arm:dict) -> List[dict]:
parsedInterventions = []
parsedArm = re.sub(r'\(.+\)', '', arm['name'])
parsedArm = re.sub(r'\s+', '_', parsedArm.strip())
for intervention in arm['interventions']:
names = intervention['synonyms']
if 'name' in intervention.keys():
names.append(intervention['name'])
elif 'intervention_name' in intervention.keys():
names.append(intervention['intervention_name'])
for name in names:
try:
interventionDict = {
'nciId': trial['nci_id'],
'arm': arm['name'],
'nciIdWithArm': f'{trial["nci_id"]}_{parsedArm}',
'type': intervention['intervention_type'],
'inclusionIndicator': intervention['inclusion_indicator'],
'name': name,
'category': intervention['category'],
'nciThesaurusConceptId': intervention['intervention_code'],
'description': intervention['intervention_description']
}
except KeyError:
try:
interventionDict = {
'nciId': trial['nci_id'],
'arm': arm['name'],
'nciIdWithArm': f'{trial["nci_id"]}_{parsedArm}',
'type': intervention['type'],
'inclusionIndicator': intervention['inclusion_indicator'],
'name': name,
'category': intervention['category'],
'nciThesaurusConceptId': intervention['nci_thesaurus_concept_id'],
'description': intervention['description']
}
except KeyError as e:
logger.exception(e)
logger.error(f'Invalid intervention keys. Possible keys are: {intervention.keys()}')
continue
parsedInterventions.append(interventionDict)
return parsedInterventions
def createMainInterventionDicts(trial:dict, arm:dict) -> List[dict]:
parsedArm = re.sub(r'\(.+\)', '', arm['name'])
parsedArm = re.sub(r'\s+', '_', parsedArm.strip())
parsedMainInterventions = []
for intervention in arm['interventions']:
try:
mainInterventionDict = {
'nciId': trial['nci_id'],
'arm': arm['name'],
'nciIdWithArm': f'{trial["nci_id"]}_{parsedArm}',
'type': intervention['intervention_type'],
'inclusionIndicator': intervention['inclusion_indicator'],
'name': intervention['intervention_name'],
'category': intervention['category'],
'nciThesaurusConceptId': intervention['intervention_code'],
'description': intervention['intervention_description']
}
except KeyError:
try:
mainInterventionDict = {
'nciId': trial['nci_id'],
'arm': arm['name'],
'nciIdWithArm': f'{trial["nci_id"]}_{parsedArm}',
'type': intervention['type'],
'inclusionIndicator': intervention['inclusion_indicator'],
'name': intervention['name'],
'category': intervention['category'],
'nciThesaurusConceptId': intervention['nci_thesaurus_concept_id'],
'description': intervention['description']
}
except KeyError:
logger.error(f'Unexpected intervention keys: {intervention.keys()}. Not inserting...')
continue
parsedMainInterventions.append(mainInterventionDict)
return parsedMainInterventions
def deDuplicateTable(csvName:str, deduplicationList:List[str]):
df = pd.read_csv(csvName)
df.drop_duplicates(subset=deduplicationList, inplace=True)
df.to_csv(csvName, index=False)
def correctMainToSubTypeTable(today):
mainDf = pd.read_csv(f'nciUniqueMainDiseases{today}.csv')
subTypeDf = pd.read_csv(f'nciUniqueSubTypeDiseases{today}.csv')
relDf = pd.read_csv(f'MainToSubTypeRelTable{today}.csv')
for idx, row in relDf.iterrows():
parentId = row['maintype']
if parentId in mainDf['nciThesaurusConceptId'].values:
continue
elif parentId in subTypeDf['nciThesaurusConceptId'].values:
while True:
possibleMainTypesDf = relDf[relDf['subtype'] == parentId]
if possibleMainTypesDf.empty:
logger.error(f'Parent {parentId} not found in main diseases or subtype diseases')
parentId = ''
break
#setting the parentId value with the parent of the subtype found
for value in possibleMainTypesDf['maintype'].values:
if parentId == value:
continue
parentId = value
break
else:
logger.error(f'Parent {parentId} not found in main diseases or subtype diseases')
parentId = ''
break
# parentId = possibleMainTypesDf['maintype'].values[0]
if parentId in mainDf['nciThesaurusConceptId'].values:
break
if parentId == '':
continue
relDf.iloc[idx]['maintype'] = parentId
else:
pass
relDf.to_csv(f'MainToSubTypeRelTable{today}.csv', index=False)
# logger.error(f'maintype id {parentId} is not found in main diseases or subtype diseases')
def createUniqueSitesCsv(today):
logger.debug('Reading sites...')
sitesDf = pd.read_csv(f'nciSites{today}.csv')
logger.debug('Dropping duplicates and trial-depedent information...')
sitesDf.drop_duplicates(subset='orgName', inplace=True)
sitesDf.drop(['recruitmentStatusDate', 'recruitmentStatus', 'nciId'], axis=1, inplace=True)
logger.debug('Saving unique sites table...')
sitesDf.to_csv(f'nciUniqueSites{today}.csv', index=False)
def createUniqueDiseasesWithoutSynonymsCsv(today):
logger.debug('Reading diseases without synonyms...')
diseasesWithoutSynonymsDf = pd.read_csv(f'nciDiseasesWithoutSynonyms{today}.csv')
logger.debug('Dropping duplicates and trial-dependent information...')
diseasesWithoutSynonymsDf.drop_duplicates(subset='nciThesaurusConceptId', inplace=True)
diseasesWithoutSynonymsDf.drop(['isLeadDisease', 'inclusionIndicator', 'nciId'], axis=1, inplace=True)
diseasesWithoutSynonymsDf.dropna()
logger.debug('Saving unique diseases table...')
diseasesWithoutSynonymsDf.to_csv(f'nciUniqueDiseasesWithoutSynonyms{today}.csv', index=False)
def createUniqueMainDiseasesCsv(today):
logger.debug('Reading main diseases...')
mainDiseasesDf = pd.read_csv(f'nciMainDiseases{today}.csv')
logger.debug('Dropping duplicates and trial-dependent information...')
mainDiseasesDf.drop_duplicates(subset='nciThesaurusConceptId', inplace=True)
mainDiseasesDf.drop(['isLeadDisease', 'inclusionIndicator', 'nciId'], axis=1, inplace=True)
mainDiseasesDf.dropna()
logger.debug('Saving unique diseases table...')
mainDiseasesDf.to_csv(f'nciUniqueMainDiseases{today}.csv', index=False)
def createUniqueSubTypeDiseasesCsv(today):
logger.debug('Reading main diseases...')
subTypeDiseasesDf = pd.read_csv(f'nciSubTypeDiseases{today}.csv')
logger.debug('Dropping duplicates and trial-dependent information...')
subTypeDiseasesDf.drop_duplicates(subset='nciThesaurusConceptId', inplace=True)
subTypeDiseasesDf.drop(['isLeadDisease', 'inclusionIndicator', 'nciId'], axis=1, inplace=True)
subTypeDiseasesDf.dropna()
logger.debug('Saving unique diseases table...')
subTypeDiseasesDf.to_csv(f'nciUniqueSubTypeDiseases{today}.csv', index=False)
def createUniqueBiomarkersCsv(today):
logger.debug('Reading main biomarkers...')
mainBiomarkersDf = pd.read_csv(f'nciMainBiomarkers{today}.csv')
logger.debug('Dropping duplicates and trial-dependent information...')
mainBiomarkersDf.drop_duplicates(subset='nciThesaurusConceptId', inplace=True)
mainBiomarkersDf.drop(['eligibilityCriterion', 'inclusionIndicator', 'assayPurpose', 'nciId'], axis=1, inplace=True)
mainBiomarkersDf.dropna()
logger.debug('Saving unique biomarkers table...')
mainBiomarkersDf.to_csv(f'nciUniqueMainBiomarkers{today}.csv', index=False)
def createUniqueInterventionsCsv(today):
logger.debug('Reading main interventions...')
mainInterventionsDf = pd.read_csv(f'nciMainInterventions{today}.csv')
logger.debug('Dropping duplicates and trial-dependent information...')
mainInterventionsDf.drop_duplicates(subset='nciThesaurusConceptId', inplace=True)
mainInterventionsDf.drop(['nciId', 'inclusionIndicator', 'arm', 'nciIdWithArm'], axis=1, inplace=True)
mainInterventionsDf.dropna()
logger.debug('Saving unique interventions table...')
mainInterventionsDf.to_csv(f'nciUniqueMainInterventions{today}.csv', index=False)
def retrieveToCsv():
baseUrl = r'https://clinicaltrialsapi.cancer.gov/api/v2/'
with open('./nciRetriever/secrets/key.txt', 'r') as f:
apiKey = f.read()
headers = {
'X-API-KEY': apiKey,
'Content-Type': 'application/json'
}
trialEndpoint = urljoin(baseUrl, 'trials')
logger.debug(trialEndpoint)
#sending initial request to get the total number of trials
trialsResponse = requests.get(trialEndpoint, headers=headers, params={'trial_status': 'OPEN'})
trialsResponse.raise_for_status()
trialJson = trialsResponse.json()
totalNumTrials = trialJson['total']
logger.debug(f'Total number of trials: {totalNumTrials}')
start = time.perf_counter()
createdTrialCsv = False
createdSiteCsv = False
createdEligibilityCsv = False
createdBiomarkerCsv = False
createdMainBiomarkerCsv = False
createdDiseaseCsv = False
createdMainToSubTypeRelTableCsv = False
createdDiseaseWithoutSynonymsCsv = False
createdMainDiseaseCsv = False
createdSubTypeDiseaseCsv = False
createdArmsCsv = False
createdInterventionCsv = False
createdMainInterventionCsv = False
for trialNumFrom in range(0, totalNumTrials, 50):
sectionStart = time.perf_counter()
#creating the dataframes again after every 50 trials to avoid using too much memory
trialsDf = pd.DataFrame(columns=['protocolId',
'nciId',
'nctId',
'detailDesc',
'officialTitle',
'briefTitle',
'briefDesc',
'phase',
'leadOrg',
'amendmentDate',
'primaryPurpose',
'activeSitesCount',
'currentTrialStatus',
'startDate',
'completionDate',
'maxAgeInYears',
'minAgeInYears',
'gender',
'acceptsHealthyVolunteers',
'studySource',
'studyProtocolType',
'recordVerificationDate'])
sitesDf = pd.DataFrame(columns=['nciId',
'orgStateOrProvince',
'contactName',
'contactPhone',
'recruitmentStatusDate',
'orgAddressLine1',
'orgAddressLine2',
'orgVa',
'orgTty',
'orgFamily',
'orgPostalCode',
'contactEmail',
'recruitmentStatus',
'orgCity',
'orgEmail',
'orgCounty',
'orgFax',
'orgPhone',
'orgName'])
eligibilityDf = pd.DataFrame(columns=['nciId',
'inclusionIndicator',
'description'])
biomarkersDf = pd.DataFrame(columns=[
'nciId',
'eligibilityCriterion',
'inclusionIndicator',
'nciThesaurusConceptId',
'name',
'assayPurpose'
])
mainBiomarkersDf = pd.DataFrame(columns=[
'nciId',
'eligibilityCriterion',
'inclusionIndicator',
'nciThesaurusConceptId',
'name',
'assayPurpose'
])
diseasesDf = pd.DataFrame(columns=[
'nciId',
'inclusionIndicator',
'isLeadDisease',
'nciThesaurusConceptId',
'name'
])
mainToSubTypeRelsDf = pd.DataFrame(columns=[
'maintype',
'subtype'
])
mainDiseasesDf = pd.DataFrame(columns=[
'nciId',
'inclusionIndicator',
'isLeadDisease',
'nciThesaurusConceptId',
'name'
])
diseasesWithoutSynonymsDf = pd.DataFrame(columns=[
'nciId',
'inclusionIndicator',
'isLeadDisease',
'nciThesaurusConceptId',
'name'
])
subTypeDiseasesDf = pd.DataFrame(columns=[
'nciId',
'inclusionIndicator',
'isLeadDisease',
'nciThesaurusConceptId',
'name'
])
armsDf = pd.DataFrame(columns=[
'nciId',
'name',
'nciIdWithName',
'description',
'type'
])
interventionsDf = pd.DataFrame(columns=[
'nciId',
'arm',
'nciIdWithArm',
'type',
'inclusionIndicator',
'name',
'category',
'nciThesaurusConceptId',
'description'
])
mainInterventionsDf = pd.DataFrame(columns=[
'nciId',
'arm',
'nciIdWithArm',
'type',
'inclusionIndicator',
'name',
'category',
'nciThesaurusConceptId',
'description'
])
payload = {
'size': 50,
'trial_status': 'OPEN',
'from': trialNumFrom
}
response = requests.get(trialEndpoint, headers=headers, params=payload)
response.raise_for_status()
sectionJson = response.json()
trials = []
for trial in sectionJson['data']:
trials.append(createTrialDict(trial))
if trial['eligibility']['unstructured'] is not None:
#parsing the unstructured eligibility information from the trial
eligibilityInfo = []
for condition in trial['eligibility']['unstructured']:
eligibilityInfo.append({
'nciId': trial['nci_id'],
'inclusionIndicator': condition['inclusion_indicator'],
'description': condition['description']
})
conditionDf = pd.DataFrame.from_records(eligibilityInfo)
eligibilityDf = pd.concat([eligibilityDf, conditionDf], verify_integrity=True, ignore_index=True)
if trial['sites'] is not None:
#parsing the sites associated with the trial
sites = []
for site in trial['sites']:
sites.append(createSiteDict(trial, site))
siteDf = pd.DataFrame.from_records(sites)
sitesDf = pd.concat([sitesDf, siteDf], ignore_index=True, verify_integrity=True)
if trial['biomarkers'] is not None:
#parsing the biomarkers associated with the trial
biomarkers = []
mainBiomarkers = []
for biomarker in trial['biomarkers']:
# biomarkers.extend(createBiomarkersDicts(trial, biomarker))
mainBiomarkersDict = createMainBiomarkersDict(trial, biomarker)
if mainBiomarkersDict != {}:
mainBiomarkers.append(mainBiomarkersDict)
# biomarkerDf = pd.DataFrame.from_records(biomarkers)
# biomarkersDf = pd.concat([biomarkersDf, biomarkerDf], ignore_index=True, verify_integrity=True)
mainBiomarkerDf = pd.DataFrame.from_records(mainBiomarkers)
mainBiomarkersDf = pd.concat([mainBiomarkersDf, mainBiomarkerDf], ignore_index=True, verify_integrity=True)
if trial['diseases'] is not None:
# diseases = []
mainToSubTypeRel = []
mainDiseases = []
subTypeDiseases = []
diseasesWithoutSynonyms = []
for disease in trial['diseases']:
# diseasesDicts = createDiseasesDicts(trial, disease)
# diseases.extend(diseasesDicts)
mainDiseasesDict = createMainDiseasesDict(trial, disease)
if mainDiseasesDict != {}:
mainDiseases.append(mainDiseasesDict)
subTypeDiseasesDict = createSubTypeDiseasesDict(trial, disease)
if subTypeDiseasesDict != {}:
subTypeDiseases.append(subTypeDiseasesDict)
diseasesWithoutSynonymsDict = createDiseasesWithoutSynonymsDict(trial, disease)
if diseasesWithoutSynonymsDict != {}:
diseasesWithoutSynonyms.append(diseasesWithoutSynonymsDict)
mainToSubTypeRel.extend(createMainToSubTypeRelDicts(trial, disease))
# diseaseDf = pd.DataFrame.from_records(diseases)
# diseasesDf = pd.concat([diseasesDf, diseaseDf], ignore_index=True, verify_integrity=True)
mainToSubTypeRelDf = pd.DataFrame.from_records(mainToSubTypeRel)
mainToSubTypeRelsDf = pd.concat([mainToSubTypeRelsDf, mainToSubTypeRelDf], ignore_index=True, verify_integrity=True)
mainDiseaseDf = pd.DataFrame.from_records(mainDiseases)
mainDiseasesDf = pd.concat([mainDiseasesDf, mainDiseaseDf], ignore_index=True, verify_integrity=True)
subTypeDiseaseDf =
|
pd.DataFrame.from_records(subTypeDiseases)
|
pandas.DataFrame.from_records
|
import pandas as pd
import random
from sklearn.neighbors import NearestNeighbors
import math
answer_list =
|
pd.read_csv("templates/meta_answers.csv")
|
pandas.read_csv
|
import itertools
import string
import numpy as np
from numpy import random
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame, MultiIndex, Series, date_range, timedelta_range
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
import pandas.plotting as plotting
""" Test cases for .boxplot method """
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
@pytest.mark.slow
def test_boxplot_legacy1(self):
df = DataFrame(
np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
df["indic"] = ["foo", "bar"] * 3
df["indic2"] = ["foo", "bar", "foo"] * 2
_check_plot_works(df.boxplot, return_type="dict")
_check_plot_works(df.boxplot, column=["one", "two"], return_type="dict")
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, column=["one", "two"], by="indic")
_check_plot_works(df.boxplot, column="one", by=["indic", "indic2"])
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="indic")
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by=["indic", "indic2"])
_check_plot_works(plotting._core.boxplot, data=df["one"], return_type="dict")
_check_plot_works(df.boxplot, notch=1, return_type="dict")
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="indic", notch=1)
@pytest.mark.slow
def test_boxplot_legacy2(self):
df = DataFrame(np.random.rand(10, 2), columns=["Col1", "Col2"])
df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
df["Y"] = Series(["A"] * 10)
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="X")
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.boxplot("Col1", by="X", ax=ax)
ax_axes = ax.axes
assert ax_axes is axes
fig, ax = self.plt.subplots()
axes = df.groupby("Y").boxplot(ax=ax, return_type="axes")
ax_axes = ax.axes
assert ax_axes is axes["A"]
# Multiple columns with an ax argument should use same figure
fig, ax = self.plt.subplots()
with tm.assert_produces_warning(UserWarning):
axes = df.boxplot(
column=["Col1", "Col2"], by="X", ax=ax, return_type="axes"
)
assert axes["Col1"].get_figure() is fig
# When by is None, check that all relevant lines are present in the
# dict
fig, ax = self.plt.subplots()
d = df.boxplot(ax=ax, return_type="dict")
lines = list(itertools.chain.from_iterable(d.values()))
assert len(ax.get_lines()) == len(lines)
@pytest.mark.slow
def test_boxplot_return_type_none(self):
# GH 12216; return_type=None & by=None -> axes
result = self.hist_df.boxplot()
assert isinstance(result, self.plt.Axes)
@pytest.mark.slow
def test_boxplot_return_type_legacy(self):
# API change in https://github.com/pandas-dev/pandas/pull/7096
import matplotlib as mpl # noqa
df = DataFrame(
np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
with pytest.raises(ValueError):
df.boxplot(return_type="NOTATYPE")
result = df.boxplot()
self._check_box_return_type(result, "axes")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="dict")
self._check_box_return_type(result, "dict")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="axes")
self._check_box_return_type(result, "axes")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="both")
self._check_box_return_type(result, "both")
@pytest.mark.slow
def test_boxplot_axis_limits(self):
def _check_ax_limits(col, ax):
y_min, y_max = ax.get_ylim()
assert y_min <= col.min()
assert y_max >= col.max()
df = self.hist_df.copy()
df["age"] = np.random.randint(1, 20, df.shape[0])
# One full row
height_ax, weight_ax = df.boxplot(["height", "weight"], by="category")
_check_ax_limits(df["height"], height_ax)
_check_ax_limits(df["weight"], weight_ax)
assert weight_ax._sharey == height_ax
# Two rows, one partial
p = df.boxplot(["height", "weight", "age"], by="category")
height_ax, weight_ax, age_ax = p[0, 0], p[0, 1], p[1, 0]
dummy_ax = p[1, 1]
_check_ax_limits(df["height"], height_ax)
_check_ax_limits(df["weight"], weight_ax)
_check_ax_limits(df["age"], age_ax)
assert weight_ax._sharey == height_ax
assert age_ax._sharey == height_ax
assert dummy_ax._sharey is None
@pytest.mark.slow
def test_boxplot_empty_column(self):
df = DataFrame(np.random.randn(20, 4))
df.loc[:, 0] = np.nan
_check_plot_works(df.boxplot, return_type="axes")
@pytest.mark.slow
def test_figsize(self):
df = DataFrame(np.random.rand(10, 5), columns=["A", "B", "C", "D", "E"])
result = df.boxplot(return_type="axes", figsize=(12, 8))
assert result.figure.bbox_inches.width == 12
assert result.figure.bbox_inches.height == 8
def test_fontsize(self):
df = DataFrame({"a": [1, 2, 3, 4, 5, 6]})
self._check_ticks_props(
df.boxplot("a", fontsize=16), xlabelsize=16, ylabelsize=16
)
def test_boxplot_numeric_data(self):
# GH 22799
df = DataFrame(
{
"a": date_range("2012-01-01", periods=100),
"b": np.random.randn(100),
"c": np.random.randn(100) + 2,
"d": date_range("2012-01-01", periods=100).astype(str),
"e": date_range("2012-01-01", periods=100, tz="UTC"),
"f": timedelta_range("1 days", periods=100),
}
)
ax = df.plot(kind="box")
assert [x.get_text() for x in ax.get_xticklabels()] == ["b", "c"]
@pytest.mark.parametrize(
"colors_kwd, expected",
[
(
dict(boxes="r", whiskers="b", medians="g", caps="c"),
dict(boxes="r", whiskers="b", medians="g", caps="c"),
),
(dict(boxes="r"), dict(boxes="r")),
("r", dict(boxes="r", whiskers="r", medians="r", caps="r")),
],
)
def test_color_kwd(self, colors_kwd, expected):
# GH: 26214
df = DataFrame(random.rand(10, 2))
result = df.boxplot(color=colors_kwd, return_type="dict")
for k, v in expected.items():
assert result[k][0].get_color() == v
@pytest.mark.parametrize(
"dict_colors, msg",
[(dict(boxes="r", invalid_key="r"), "invalid key 'invalid_key'")],
)
def test_color_kwd_errors(self, dict_colors, msg):
# GH: 26214
df = DataFrame(random.rand(10, 2))
with pytest.raises(ValueError, match=msg):
df.boxplot(color=dict_colors, return_type="dict")
@pytest.mark.parametrize(
"props, expected",
[
("boxprops", "boxes"),
("whiskerprops", "whiskers"),
("capprops", "caps"),
("medianprops", "medians"),
],
)
def test_specified_props_kwd(self, props, expected):
# GH 30346
df = DataFrame({k: np.random.random(100) for k in "ABC"})
kwd = {props: dict(color="C1")}
result = df.boxplot(return_type="dict", **kwd)
assert result[expected][0].get_color() == "C1"
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
@pytest.mark.slow
def test_boxplot_legacy1(self):
grouped = self.hist_df.groupby(by="gender")
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type="axes")
self._check_axes_shape(list(axes.values), axes_num=2, layout=(1, 2))
axes =
|
_check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
|
pandas.tests.plotting.common._check_plot_works
|
#!/usr/bin/env python
# coding: utf-8
# In[42]:
import pandas as pd
from sklearn.linear_model import LinearRegression
import pickle
import matplotlib.pyplot as plt
import pandas_datareader as pdr
STOCK_NAME = "FB"
# In[43]:
yahoo_data_final = pdr.data.get_data_yahoo(STOCK_NAME, start='2020-01-01')
yahoo_data_final.to_csv("df.csv")
del yahoo_data_final
df = pd.read_csv("df.csv")
# In[44]:
df.Open = df.Open.fillna(df.Open.median())
df.High = df.High.fillna(df.High.median())
df.Low = df.Low.fillna(df.Low.median())
df.Close = df.Close.fillna(df.Close.median())
# In[45]:
regression = LinearRegression()
regression.fit(df[["Open", "High", "Low"]], df.Close)
# In[46]:
PRE_CLOSE = []
for i in range(len(df["Close"])):
temp = (df["Date"][i], (regression.predict([[df["Open"][i], df["High"][i], df["Low"][i]]])))
PRE_CLOSE.append(temp)
perdicted =
|
pd.DataFrame(PRE_CLOSE, columns=["Date", "Close"])
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 25 10:41:54 2018
@author: priyansu
"""
import pandas as pd
import numpy as np
train=pd.read_csv("Train.csv")
test=
|
pd.read_csv("Test.csv")
|
pandas.read_csv
|
""" test the scalar Timestamp """
import pytz
import pytest
import dateutil
import calendar
import locale
import numpy as np
from dateutil.tz import tzutc
from pytz import timezone, utc
from datetime import datetime, timedelta
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tseries import offsets
from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz
from pandas.errors import OutOfBoundsDatetime
from pandas.compat import long, PY3
from pandas.compat.numpy import np_datetime64_compat
from pandas import Timestamp, Period, Timedelta, NaT
class TestTimestampProperties(object):
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
control = Timestamp('2017-10-01')
assert ts.dayofweek == 6
assert not ts.is_month_start # not a weekday
assert not ts.is_quarter_start # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp('2017-09-30', freq='B')
control = Timestamp('2017-09-30')
assert ts.dayofweek == 5
assert not ts.is_month_end # not a weekday
assert not ts.is_quarter_end # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
def test_fields(self):
def check(value, equal):
# that we are int/long like
assert isinstance(value, (int, long))
assert value == equal
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.minute, 6)
check(ts.second, 3)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
# GH 13303
ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
check(ts.year, 2014)
check(ts.month, 12)
check(ts.day, 31)
check(ts.hour, 23)
check(ts.minute, 59)
check(ts.second, 0)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
check(ts.quarter, 4)
check(ts.dayofyear, 365)
check(ts.week, 1)
check(ts.daysinmonth, 31)
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
assert getattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
assert getattr(ts, end)
# GH 12806
@pytest.mark.parametrize('data',
[Timestamp('2017-08-28 23:00:00'),
Timestamp('2017-08-28 23:00:00', tz='EST')])
@pytest.mark.parametrize('time_locale', [
None] if tm.get_locales() is None else [None] + tm.get_locales())
def test_names(self, data, time_locale):
# GH 17354
# Test .weekday_name, .day_name(), .month_name
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
assert data.weekday_name == 'Monday'
if time_locale is None:
expected_day = 'Monday'
expected_month = 'August'
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calendar.day_name[0].capitalize()
expected_month = calendar.month_name[8].capitalize()
assert data.day_name(time_locale) == expected_day
assert data.month_name(time_locale) == expected_month
# Test NaT
nan_ts = Timestamp(NaT)
assert np.isnan(nan_ts.day_name(time_locale))
assert np.isnan(nan_ts.month_name(time_locale))
@pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo'])
def test_is_leap_year(self, tz):
# GH 13727
dt = Timestamp('2000-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
assert isinstance(dt.is_leap_year, bool)
dt = Timestamp('1999-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
dt = Timestamp('2004-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
dt = Timestamp('2100-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013, 12, 31)
result = Timestamp(d).week
expected = 1 # ISO standard
assert result == expected
d = datetime(2008, 12, 28)
result = Timestamp(d).week
expected = 52 # ISO standard
assert result == expected
d = datetime(2009, 12, 31)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 1)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 3)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
result = np.array([Timestamp(datetime(*args)).week
for args in [(2000, 1, 1), (2000, 1, 2), (
2005, 1, 1), (2005, 1, 2)]])
assert (result == [52, 52, 53, 53]).all()
class TestTimestampConstructors(object):
def test_constructor(self):
base_str = '2014-07-01 09:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_dt, base_expected),
('2014-07-01 10:00', datetime(2014, 7, 1, 10),
base_expected + 3600 * 1000000000),
('2014-07-01 09:00:00.000008000',
datetime(2014, 7, 1, 9, 0, 0, 8),
base_expected + 8000),
('2014-07-01 09:00:00.000000005',
Timestamp('2014-07-01 09:00:00.000000005'),
base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, date, expected in tests:
for result in [Timestamp(date_str), Timestamp(date)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
for result in [Timestamp(date_str, tz=tz), Timestamp(date,
tz=tz)]:
expected_tz = expected - offset * 3600 * 1000000000
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected - offset * 3600 * 1000000000
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
def test_constructor_with_stringoffset(self):
# GH 7833
base_str = '2014-07-01 11:00:00+02:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_expected),
('2014-07-01 12:00:00+02:00',
base_expected + 3600 * 1000000000),
('2014-07-01 11:00:00.000008000+02:00', base_expected + 8000),
('2014-07-01 11:00:00.000000005+02:00', base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, expected in tests:
for result in [Timestamp(date_str)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
result = Timestamp(date_str, tz=tz)
expected_tz = expected
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
# This should be 2013-11-01 05:00 in UTC
# converted to Chicago tz
result = Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')" # noqa
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2013-11-01 05:00 in UTC
# converted to Tokyo tz (+09:00)
result = Timestamp('2013-11-01 00:00:00-0500', tz='Asia/Tokyo')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
assert repr(result) == expected
assert result == eval(repr(result))
# GH11708
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Katmandu
result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')"
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Kolkata
result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')"
assert repr(result) == expected
assert result == eval(repr(result))
def test_constructor_invalid(self):
with tm.assert_raises_regex(TypeError, 'Cannot convert input'):
Timestamp(slice(2))
with tm.assert_raises_regex(ValueError, 'Cannot convert Period'):
Timestamp(Period('1000-01-01'))
def test_constructor_invalid_tz(self):
# GH#17690
with tm.assert_raises_regex(TypeError, 'must be a datetime.tzinfo'):
Timestamp('2017-10-22', tzinfo='US/Eastern')
with tm.assert_raises_regex(ValueError, 'at most one of'):
Timestamp('2017-10-22', tzinfo=utc, tz='UTC')
with tm.assert_raises_regex(ValueError, "Invalid frequency:"):
# GH#5168
# case where user tries to pass tz as an arg, not kwarg, gets
# interpreted as a `freq`
Timestamp('2012-01-01', 'US/Pacific')
def test_constructor_tz_or_tzinfo(self):
# GH#17943, GH#17690, GH#5168
stamps = [Timestamp(year=2017, month=10, day=22, tz='UTC'),
Timestamp(year=2017, month=10, day=22, tzinfo=utc),
Timestamp(year=2017, month=10, day=22, tz=utc),
Timestamp(datetime(2017, 10, 22), tzinfo=utc),
Timestamp(datetime(2017, 10, 22), tz='UTC'),
Timestamp(datetime(2017, 10, 22), tz=utc)]
assert all(ts == stamps[0] for ts in stamps)
def test_constructor_positional(self):
# see gh-10758
with pytest.raises(TypeError):
Timestamp(2000, 1)
with pytest.raises(ValueError):
Timestamp(2000, 0, 1)
with pytest.raises(ValueError):
Timestamp(2000, 13, 1)
with pytest.raises(ValueError):
Timestamp(2000, 1, 0)
with pytest.raises(ValueError):
Timestamp(2000, 1, 32)
# see gh-11630
assert (repr(Timestamp(2015, 11, 12)) ==
repr(Timestamp('20151112')))
assert (repr(Timestamp(2015, 11, 12, 1, 2, 3, 999999)) ==
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_keyword(self):
# GH 10758
with pytest.raises(TypeError):
Timestamp(year=2000, month=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=0, day=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=13, day=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=1, day=0)
with pytest.raises(ValueError):
Timestamp(year=2000, month=1, day=32)
assert (repr(Timestamp(year=2015, month=11, day=12)) ==
repr(Timestamp('20151112')))
assert (repr(Timestamp(year=2015, month=11, day=12, hour=1, minute=2,
second=3, microsecond=999999)) ==
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_fromordinal(self):
base = datetime(2000, 1, 1)
ts = Timestamp.fromordinal(base.toordinal(), freq='D')
assert base == ts
assert ts.freq == 'D'
assert base.toordinal() == ts.toordinal()
ts = Timestamp.fromordinal(base.toordinal(), tz='US/Eastern')
assert Timestamp('2000-01-01', tz='US/Eastern') == ts
assert base.toordinal() == ts.toordinal()
# GH#3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
assert ts.to_pydatetime() == dt
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(), tz='US/Eastern')
assert ts.to_pydatetime() == dt_tz
@pytest.mark.parametrize('result', [
Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), nanosecond=1),
Timestamp(year=2000, month=1, day=2, hour=3, minute=4, second=5,
microsecond=6, nanosecond=1),
Timestamp(year=2000, month=1, day=2, hour=3, minute=4, second=5,
microsecond=6, nanosecond=1, tz='UTC'),
Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, None),
Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, pytz.UTC)])
def test_constructor_nanosecond(self, result):
# GH 18898
expected = Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), tz=result.tz)
expected = expected + Timedelta(nanoseconds=1)
assert result == expected
@pytest.mark.parametrize('arg', ['year', 'month', 'day', 'hour', 'minute',
'second', 'microsecond', 'nanosecond'])
def test_invalid_date_kwarg_with_string_input(self, arg):
kwarg = {arg: 1}
with pytest.raises(ValueError):
Timestamp('2010-10-10 12:59:59.999999999', **kwarg)
def test_out_of_bounds_value(self):
one_us = np.timedelta64(1).astype('timedelta64[us]')
# By definition we can't go out of bounds in [ns], so we
# convert the datetime64s to [us] so we can go out of bounds
min_ts_us = np.datetime64(Timestamp.min).astype('M8[us]')
max_ts_us = np.datetime64(Timestamp.max).astype('M8[us]')
# No error for the min/max datetimes
Timestamp(min_ts_us)
Timestamp(max_ts_us)
# One us less than the minimum is an error
with pytest.raises(ValueError):
Timestamp(min_ts_us - one_us)
# One us more than the maximum is an error
with pytest.raises(ValueError):
Timestamp(max_ts_us + one_us)
def test_out_of_bounds_string(self):
with pytest.raises(ValueError):
Timestamp('1676-01-01')
with pytest.raises(ValueError):
Timestamp('2263-01-01')
def test_barely_out_of_bounds(self):
# GH#19529
# GH#19382 close enough to bounds that dropping nanos would result
# in an in-bounds datetime
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2262-04-11 23:47:16.854775808')
def test_bounds_with_different_units(self):
out_of_bounds_dates = ('1677-09-21', '2262-04-12')
time_units = ('D', 'h', 'm', 's', 'ms', 'us')
for date_string in out_of_bounds_dates:
for unit in time_units:
dt64 = np.datetime64(date_string, dtype='M8[%s]' % unit)
with pytest.raises(ValueError):
Timestamp(dt64)
in_bounds_dates = ('1677-09-23', '2262-04-11')
for date_string in in_bounds_dates:
for unit in time_units:
dt64 = np.datetime64(date_string, dtype='M8[%s]' % unit)
Timestamp(dt64)
def test_min_valid(self):
# Ensure that Timestamp.min is a valid Timestamp
Timestamp(Timestamp.min)
def test_max_valid(self):
# Ensure that Timestamp.max is a valid Timestamp
Timestamp(Timestamp.max)
def test_now(self):
# GH#9000
ts_from_string = Timestamp('now')
ts_from_method = Timestamp.now()
ts_datetime = datetime.now()
ts_from_string_tz = Timestamp('now', tz='US/Eastern')
ts_from_method_tz = Timestamp.now(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily
# small)
delta = Timedelta(seconds=1)
assert abs(ts_from_method - ts_from_string) < delta
assert abs(ts_datetime - ts_from_method) < delta
assert abs(ts_from_method_tz - ts_from_string_tz) < delta
assert (abs(ts_from_string_tz.tz_localize(None) -
ts_from_method_tz.tz_localize(None)) < delta)
def test_today(self):
ts_from_string = Timestamp('today')
ts_from_method = Timestamp.today()
ts_datetime = datetime.today()
ts_from_string_tz = Timestamp('today', tz='US/Eastern')
ts_from_method_tz = Timestamp.today(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily
# small)
delta = Timedelta(seconds=1)
assert abs(ts_from_method - ts_from_string) < delta
assert abs(ts_datetime - ts_from_method) < delta
assert abs(ts_from_method_tz - ts_from_string_tz) < delta
assert (abs(ts_from_string_tz.tz_localize(None) -
ts_from_method_tz.tz_localize(None)) < delta)
class TestTimestamp(object):
def test_tz(self):
tstr = '2014-02-01 09:00'
ts = Timestamp(tstr)
local = ts.tz_localize('Asia/Tokyo')
assert local.hour == 9
assert local == Timestamp(tstr, tz='Asia/Tokyo')
conv = local.tz_convert('US/Eastern')
assert conv == Timestamp('2014-01-31 19:00', tz='US/Eastern')
assert conv.hour == 19
# preserves nanosecond
ts = Timestamp(tstr) + offsets.Nano(5)
local = ts.tz_localize('Asia/Tokyo')
assert local.hour == 9
assert local.nanosecond == 5
conv = local.tz_convert('US/Eastern')
assert conv.nanosecond == 5
assert conv.hour == 19
def test_utc_z_designator(self):
assert get_timezone(Timestamp('2014-11-02 01:00Z').tzinfo) == 'UTC'
def test_asm8(self):
np.random.seed(7960929)
ns = [Timestamp.min.value, Timestamp.max.value, 1000]
for n in ns:
assert (Timestamp(n).asm8.view('i8') ==
np.datetime64(n, 'ns').view('i8') == n)
assert (Timestamp('nat').asm8.view('i8') ==
np.datetime64('nat', 'ns').view('i8'))
def test_class_ops_pytz(self):
def compare(x, y):
assert (int(Timestamp(x).value / 1e9) ==
int(Timestamp(y).value / 1e9))
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now('UTC'), datetime.now(timezone('UTC')))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calendar.timegm(datetime.now().utctimetuple())
compare(Timestamp.utcfromtimestamp(current_time),
datetime.utcfromtimestamp(current_time))
compare(Timestamp.fromtimestamp(current_time),
datetime.fromtimestamp(current_time))
date_component = datetime.utcnow()
time_component = (date_component + timedelta(minutes=10)).time()
compare(Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component))
def test_class_ops_dateutil(self):
def compare(x, y):
assert (int(np.round(Timestamp(x).value / 1e9)) ==
int(np.round(Timestamp(y).value / 1e9)))
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now('UTC'), datetime.now(tzutc()))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calendar.timegm(datetime.now().utctimetuple())
compare(Timestamp.utcfromtimestamp(current_time),
datetime.utcfromtimestamp(current_time))
compare(Timestamp.fromtimestamp(current_time),
datetime.fromtimestamp(current_time))
date_component = datetime.utcnow()
time_component = (date_component + timedelta(minutes=10)).time()
compare(Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component))
def test_basics_nanos(self):
val = np.int64(946684800000000000).view('M8[ns]')
stamp = Timestamp(val.view('i8') + 500)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.microsecond == 0
assert stamp.nanosecond == 500
# GH 14415
val = np.iinfo(np.int64).min + 80000000000000
stamp = Timestamp(val)
assert stamp.year == 1677
assert stamp.month == 9
assert stamp.day == 21
assert stamp.microsecond == 145224
assert stamp.nanosecond == 192
def test_unit(self):
def check(val, unit=None, h=1, s=1, us=0):
stamp = Timestamp(val, unit=unit)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.day == 1
assert stamp.hour == h
if unit != 'D':
assert stamp.minute == 1
assert stamp.second == s
assert stamp.microsecond == us
else:
assert stamp.minute == 0
assert stamp.second == 0
assert stamp.microsecond == 0
assert stamp.nanosecond == 0
ts =
|
Timestamp('20000101 01:01:01')
|
pandas.Timestamp
|
# Read the bitcoin blockchain data and extract their topological properties
# Modify on 12.04.2021
from collections import defaultdict
from multiprocessing import Pool, Queue
import multiprocessing
import datetime
import math
import time
import pandas as pd
import requests
import numpy as np
import json
import os
import torch
begin_date = '2020-01-01'
end_date = '2020-12-31'
AMODATA_DIR = 'amoData/'
AMOMAT_DIR = 'amoMat/'
OCCMAT_DIR = 'occMat/'
PERMAT_DIR = 'perMat/'
BETTI_DIR = 'betti/'
BETTI_0_DIR = 'betti/betti_0/'
BETTI_1_DIR = 'betti/betti_1/'
def getBetweenDay(begin_date, end_date):
date_list = []
date_arr = []
date_unix_list = []
begin_date = datetime.datetime.strptime(begin_date, "%Y-%m-%d")
print(type(begin_date))
print("begin_date:",begin_date)
# end_date = datetime.datetime.strptime(time.strftime('%Y-%m-%d', time.localtime(time.time())), "%Y-%m-%d")
end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
print("end_date:",end_date)
while begin_date <= end_date:
date_unix = math.trunc(begin_date.replace(tzinfo=datetime.timezone.utc).timestamp()*1000)
date_unix_list.append(date_unix)
date_str = begin_date.strftime("%Y-%m-%d")
date_list.append(date_str)
date_arr.append([date_str, date_unix])
begin_date += datetime.timedelta(days=1)
return np.asarray(date_arr)
def new_file(dir):
list = os.listdir(dir)
list.sort(key=lambda fn:os.path.getmtime(dir+fn))
filetime = datetime.datetime.fromtimestamp(os.path.getmtime(dir+list[-1]))
filepath = os.path.join(dir, list[-1])
print("latest file is: ", list[-1])
print("time: ", filetime .strftime("%Y-%m-%d %H:%M:%S"))
return filepath
# Read block
def read_block(block):
tx_btc_total = []
print("block:", block['hash'])
# Get every transaction data
tx_api = 'https://blockchain.info/rawblock/'+block['hash']
tx_data = requests.get(tx_api)
None_count = 0
for tx in tx_data.json()['tx']:
# Extract its input size and output size
# chain_data.append([tx['vin_sz'], tx['vout_sz']])
vin = tx['vin_sz']
vout = tx['vout_sz']
if vin > 20:
vin = 20
if vout > 20:
vout = 20
IOName = f'{vin:02}' + f'{vout:02}'
tx_value = 0
for value in tx['inputs']:
if ('prev_out' in value) & (value['prev_out'] is not None):
#print("value:", value)
tx_value = tx_value + value['prev_out']['value']
else:
None_count = None_count + 1
tx_btc_total.append([IOName, tx_value])
tx_btc_total = pd.DataFrame(tx_btc_total)
#print("None_count: ", None_count)
return tx_btc_total
# print(tx_btc_total)
# create a IO-Name list
def create_IONameList():
IONameList = []
for i in range(20):
for j in range(20):
IOName = f'{i:02}' + f'{j:02}'
IONameList.append(IOName)
return IONameList
# Merge two dictionaries
def mergeDict(dict1, dict2):
''' Merge dictionaries and keep values of common keys in list'''
dict3 = {**dict1, **dict2}
for key, value in dict3.items():
if key in dict1 and key in dict2:
dict3[key] = [value , dict1[key]]
return dict3
# with 10-quantiles
# calculate the quantile of two nodes
def calculate_quantile(amount_1, amount_2):
# for quantile_ in range(quantile_value,1,quantile_value)
# print("quantile;",amount_1.quantile([0.25, 0.5, 0.75])[0][0.25])
quantile_percentage = [ i/500 for i in range(1, 500, 1)]
quantile_squar = (amount_1.quantile(quantile_percentage)[0]-amount_2.quantile(quantile_percentage)[0])**2
quantile_sum = quantile_squar.sum()
# print(quantile_1, quantile_2, quantile_3)
return (quantile_sum)**0.5
def getYearFromDate(date):
Year = date.split("-")[0]
return Year
def getYearsFromDate(begin_date, end_date):
begin_Year = begin_date.split("-")[0]
end_Year = end_date.split("-")[0]
YEARS = [str(i) for i in range(int(begin_Year), int(end_Year) + 1)]
return YEARS
# If any errors are encountered, it will automatically restart.
def genOccMat(date_unix):
time_now = time.time()
Year = getYearFromDate(date_unix[0])
while True:
try:
print("check file {}...".format(date_unix[0]+".json"))
if 'occ' + date_unix[0] + '.csv' in os.listdir(OCCMAT_DIR + YEAR + "/"):
print(date_unix[0]+".csv already exists ...")
# continue
else:
'''
# Get the daily block
datum = "https://blockchain.info/blocks/"+date_unix[1]+"?format=json"
amo_data_total = pd.DataFrame([])
res = requests.get(datum)
for block in res.json()["blocks"]:
block_data = read_block(block)
amo_data_total = pd.concat([amo_data_total, block_data])
'''
# For 2018 data columns are [index,"0", "1"]
# amo_data_total = pd.read_csv(AMODATA_DIR + YEAR + "/" + date_unix[0] + ".csv", index_col=0, converters={"0":str})
# amo_data_total.to_csv(AMODATA_DIR + YEAR + "/" + date_unix[0] + ".csv")
# amo_data_total.columns = ['IOSize', 'tx_value']
# For 2020 data columns are [index,"IOSize", "tx_value", "tx_value_log"]
### amo_data_total = pd.read_csv(AMODATA_DIR + YEAR + "/" + date_unix[0] + ".csv", index_col=0, names=["IOSize","tx_value"], converters={"IOSize":str, "tx_value": float})
#amo_data_total_convert = amo_data_total[["IOSize", "tx_value"]]
#amo_data_total_convert.columns = ["0","1"]
#amo_data_total_convert.to_csv(AMODATA_DIR + YEAR + "/" + date_unix[0] + ".csv")
#amo_data_total = amo_data_total_convert
###############
# group the same address
amo_data_total = pd.read_csv(AMODATA_DIR + YEAR + "/" + date_unix[0] + ".csv", names=["addr", "in_sz", "out_sz", "tx_value"], converters={"addr":str, "tx_value": float})
amo_data_total = pd.concat([amo_data_total.groupby(amo_data_total["addr"]).sum()], axis=1).reset_index(drop=True)
eth_tx_total = []
for tx in amo_data_total.values:
# Extract its input size and output size
vin = int(tx[0])
vout = int(tx[1])
if math.floor(vin/2) > 19:
vin = 19
else:
vin = math.floor(vin/2)
if math.floor(vout/2) > 19:
vout = 19
else:
vout = math.floor(vout/2)
IOName = f'{vin:02}' + f'{vout:02}'
tx_value = tx[2]
eth_tx_total.append([IOName, tx_value])
amo_data_total = pd.DataFrame(eth_tx_total, columns=["IOSize", "tx_value"])
###############
### amo_data_total.columns = ['IOSize', 'tx_value']
amo_data_total["tx_value_log"] = amo_data_total["tx_value"].map(lambda x: round(math.log(1 + x/(10**8)),5))
amo_data_total.reset_index(drop=True)
amo_data_total_dict = amo_data_total.groupby('IOSize').tx_value_log.apply(list).to_dict()
IONameList = create_IONameList()
print("amo_data_total:", amo_data_total)
MATRIX_SIZE = len(IONameList)
amoMat = [[0] * MATRIX_SIZE] * MATRIX_SIZE
amoMat_df = pd.DataFrame(amoMat, columns = IONameList, index = IONameList)
for IO_1 in IONameList:
if IO_1 in amo_data_total_dict:
amount_1 = pd.DataFrame(amo_data_total_dict[IO_1])
else:
amount_1 = pd.DataFrame([0])
for IO_2 in IONameList:
if IO_2 in amo_data_total_dict:
amount_2 = pd.DataFrame(amo_data_total_dict[IO_2])
else:
amount_2 = pd.DataFrame([0])
amoMat_df.loc[IO_1, IO_2] = calculate_quantile(amount_1, amount_2)
#print("amoMat_df:", amoMat_df)
print("amoMat_df:", amoMat_df)
# Calculate betti nummber
# add parameter for perseus computing
amoMat_df.apply(str)
param_1 = pd.DataFrame([["400"]], columns=["0101"])
param_2 = pd.DataFrame([["1","1","101","1"]], columns=["0101", "0102", "0103", "0104"])
param_amoMat_df = pd.concat([param_1,param_2, amoMat_df], axis=0, sort=False)
perMat_path = PERMAT_DIR + YEAR + "/" + date_unix[0] + ".csv"
param_amoMat_df.to_csv(perMat_path, index=False, sep='\t', header=False)
# use perseus to compute betti number
betti_path = "betti/" + YEAR + "/" + date_unix[0]
betti_0_path = "betti/betti_0/" + YEAR + "/" + date_unix[0] + "_betti_0.csv"
betti_1_path = "betti/betti_1/" + YEAR + "/" + date_unix[0] + "_betti_1.csv"
perseus_command = "perseus/perseus distmat " + perMat_path + " " + betti_path
if(os.system(perseus_command) == 0):
betti_number = pd.read_csv(betti_path +"_betti.txt", sep='\s+', index_col=0, names=["betti_0", "betti_1"])
init_betti_0 = pd.DataFrame([[0]]*101, columns=["betti_0"])
init_betti_1 = pd.DataFrame([[0]]*101, columns=["betti_1"])
betti_0 = (betti_number["betti_0"] + init_betti_0["betti_0"]).fillna(axis=0, method='ffill').fillna(0).astype(int)
betti_1 = (betti_number["betti_1"] + init_betti_1["betti_1"]).fillna(axis=0, method='ffill').fillna(0).astype(int)
betti_0.to_csv(betti_0_path)
betti_1.to_csv(betti_1_path)
print("Successfully calculated Betti number!")
else:
print("Failed to calculate Betti number!")
# Calculate OccMat and AmoMat
io_data_amo = amo_data_total['tx_value'].groupby(amo_data_total['IOSize']).sum()
io_data_occ = amo_data_total.groupby(amo_data_total['IOSize']).count()
io_data_occ = io_data_occ.iloc[:,1]
occMat = torch.zeros(20,20)
amoMat = torch.zeros(20,20)
for i in range(20):
for j in range(20):
io_name = str(i).zfill(2) + str(j).zfill(2)
if(io_name in io_data_amo.index):
amoMat[i][j] = io_data_amo[io_name]
if(io_name in io_data_occ.index):
occMat[i][j] = io_data_occ[io_name]
amoMat_np = amoMat.numpy()
amoMat_df = pd.DataFrame(amoMat_np)
#amoMat_df.to_csv(AMOMAT_DIR + 'amo2020' + str(day).zfill(3) + '.csv', float_format='%.0f', header=False, index=False)
amoMat_df.to_csv(AMOMAT_DIR + YEAR + "/" + 'amo' + date_unix[0] + '.csv', float_format='%.0f', header=False, index=False)
occMat_np = occMat.numpy()
occMat_df = pd.DataFrame(occMat_np)
#occMat_df.to_csv(OCCMAT_DIR + 'occ2020' + str(day).zfill(3) + '.csv', float_format='%.0f', header=False, index=False)
occMat_df.to_csv(OCCMAT_DIR + YEAR + "/" + 'occ' + date_unix[0] + '.csv', float_format='%.0f', header=False, index=False)
except Exception as ex:
template = "An exception of type {0} occurred. Arguments: \n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(date_unix[0] + "\n" + message)
continue
break
# count the time
total_days = len(getBetweenDay(begin_date, end_date))
finished_days = len(os.listdir(BETTI_0_DIR + YEAR + "/")) - 1
left_days = total_days - finished_days
finished_percentage = math.floor(finished_days / total_days * 100)
single_file_time = time.time()-time_now
left_time = single_file_time * left_days
print('\tcost: {:.4f}s/file; left time: {:.4f}s; {} {}%'.format(single_file_time, left_time, "#"*finished_percentage+"."*(100-finished_percentage), finished_percentage))
# Create a directory if it does not exist
YEARS = getYearsFromDate(begin_date, end_date)
for YEAR in YEARS:
amoData_Year_dir = AMODATA_DIR + YEAR + "/"
amoMat_Year_dir = AMOMAT_DIR + YEAR + "/"
occMat_Year_dir = OCCMAT_DIR + YEAR + "/"
perMat_Year_dir = PERMAT_DIR + YEAR + "/"
betti_Year_dir = BETTI_DIR + YEAR + "/"
betti_0_Year_dir = BETTI_0_DIR + YEAR + "/"
betti_1_Year_dir = BETTI_1_DIR + YEAR + "/"
check_dir_list = [amoData_Year_dir, amoMat_Year_dir, occMat_Year_dir, perMat_Year_dir,betti_Year_dir, betti_0_Year_dir, betti_1_Year_dir]
for dir_name in check_dir_list:
if not os.path.exists(dir_name):
print("Create "+dir_name)
os.makedirs(dir_name)
p = Pool(10)
for date_unix in getBetweenDay(begin_date, end_date):
p.apply_async(genOccMat, args=(date_unix,))
#genOccMat(date_unix)
p.close()
p.join()
'''
for date_unix in getBetweenDay(begin_date, end_date):
genOccMat(date_unix)
'''
betti_0_total = pd.DataFrame([])
betti_1_total = pd.DataFrame([])
for date in getBetweenDay(begin_date,end_date):
YEAR = getYearFromDate(date[0])
betti_0 =
|
pd.read_csv(BETTI_0_DIR + YEAR + "/" +date[0]+"_betti_0.csv", index_col=False, names=["id",date[0]])
|
pandas.read_csv
|
#!python
# join hole intervals using the support of one
# check manual for usage and important details
# v1.0 05/2021 paulo.ernesto
'''
usage: $0 target_db*csv,xlsx target_hid:target_db target_from:target_db target_to:target_db source_db*csv,xlsx source_hid:source_db source_from:source_db source_to:source_db variables#variable:source_db#ponderation=mean,major,sum,list output*csv,xlsx
'''
import sys, os.path
import numpy as np
import pandas as pd
# import modules from a pyz (zip) file with same name as scripts
sys.path.append(os.path.splitext(sys.argv[0])[0] + '.pyz')
from _gui import usage_gui, commalist, pd_load_dataframe, pd_save_dataframe
from db_join_interval import pd_join_interval
from bm_breakdown import pd_breakdown
def db_join_support(target_db, target_hid, target_from, target_to, source_db, source_hid, source_from, source_to, variables, output):
v_lut = [{},{}]
v_lut[0]['hid'] = target_hid or 'hid'
v_lut[1]['hid'] = source_hid or 'hid'
v_lut[0]['from'] = target_from or 'from'
v_lut[1]['from'] = source_from or 'from'
v_lut[0]['to'] = target_to or 'to'
v_lut[1]['to'] = source_to or 'to'
dfs = [pd_load_dataframe(target_db), pd_load_dataframe(source_db)]
dfs[0]['tmp_target_from'] = dfs[0][v_lut[0]['from']]
odf = pd_join_interval(dfs, v_lut)
odf.reset_index(drop=1, inplace=True)
# pd_join_interval modifies the input array which is bad behavior
# but datasets may be huge so its best to just cleanup after
dfs[0].reset_index(drop=1, inplace=True)
variables = commalist().parse(variables)
ttf = 'tmp_target_from'
vl_a = [[ttf], [v_lut[0]['hid']]] + [[_[0] + '=' + _[0], _[1]] for _ in variables]
odf = pd_breakdown(odf, vl_a)
odf =
|
pd.merge(dfs[0], odf, 'outer', [v_lut[0]['hid'], ttf])
|
pandas.merge
|
import sys, os
import scipy.sparse
import pandas as pd
from load_public_data import anes_opinion_data, anes_codebook
from find_public_opinion_question import find_anes_question
PATH = ".\\data\\tf-idf\\public_opinion\\"
#load tfidf matrix
TFIDF_MATRIX_FILENAME = "tfidf_matrix.npz"
TFIDF_MATRIX_PATH = os.path.join(PATH,TFIDF_MATRIX_FILENAME)
tfidf_matrix = scipy.sparse.load_npz(TFIDF_MATRIX_PATH)
#load {row index : opinion id} mapping
OPINION_ID_FILENAME = "tfidf_rows.csv"
OPINION_ID_PATH = os.path.join(PATH,OPINION_ID_FILENAME)
opin_id_df =
|
pd.read_csv(OPINION_ID_PATH)
|
pandas.read_csv
|
import databricks.koalas as ks
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from gators.data_cleaning.drop_datatype_columns import DropDatatypeColumns
@pytest.fixture
def data():
X = pd.DataFrame({"A": [1, 2], "B": [1.0, 2.0], "C": ["q", "w"]})
obj = DropDatatypeColumns(dtype=float).fit(X)
X_expected = pd.DataFrame({"A": [1, 2], "C": ["q", "w"]})
return obj, X, X_expected
@pytest.fixture
def data_ks():
X = ks.DataFrame({"A": [1, 2], "B": [1.0, 2.0], "C": ["q", "w"]})
obj = DropDatatypeColumns(dtype=float).fit(X)
X_expected = pd.DataFrame({"A": [1, 2], "C": ["q", "w"]})
return obj, X, X_expected
def test_pd(data):
obj, X, X_expected = data
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_ks(data_ks):
obj, X, X_expected = data_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_pd_np(data):
obj, X, X_expected = data
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new, columns=X_expected.columns)
assert_frame_equal(X_new, X_expected.astype(object))
@pytest.mark.koalas
def test_ks_np(data_ks):
obj, X, X_expected = data_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new =
|
pd.DataFrame(X_numpy_new, columns=X_expected.columns)
|
pandas.DataFrame
|
# sw: script used to scrape travel time data from Google API.
# No need to run the script for replication.
# TBD: need to set up the global environment variables.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import geopandas as gpd
import geoplot
from pysal.lib import weights
import networkx as nx
from scipy.spatial import distance
import googlemaps
# system path
import sys
import os
# util path
utility_path = os.path.join(os.getcwd(),'src/d00_utils/')
sys.path.append(utility_path)
import utilities as util
# data path
raw_data_path = os.path.join(os.getcwd(),'data/01_raw/')
intermediate_data_path = os.path.join(os.getcwd(),'data/02_intermediate/')
# read files
sa2_adelaide = gpd.read_file(intermediate_data_path + 'shapefiles/sa2_adelaide.shp')
sa2_adelaide['centroids'] = sa2_adelaide.centroid
sa2_adelaide['Lat'] = sa2_adelaide.centroids.y
sa2_adelaide['Long'] = sa2_adelaide.centroids.x
#
# create a new dataframe
OD = {}
OD['o_idx'] = []
OD['d_idx'] = []
OD['o_sa2_idx'] = []
OD['d_sa2_idx'] = []
OD['o_lat'] = []
OD['o_long'] = []
OD['d_lat'] = []
OD['d_long'] = []
for i in range(sa2_adelaide.shape[0]):
print("Origin Index is: ", i)
o_idx = i
o_sa2_idx = sa2_adelaide.loc[i, 'SA2_MAIN16']
o_lat = sa2_adelaide.loc[i, 'Lat']
o_long = sa2_adelaide.loc[i, 'Long']
for j in range(sa2_adelaide.shape[0]):
d_idx = j
d_sa2_idx = sa2_adelaide.loc[j, 'SA2_MAIN16']
d_lat = sa2_adelaide.loc[j, 'Lat']
d_long = sa2_adelaide.loc[j, 'Long']
# append
OD['o_idx'].append(o_idx)
OD['d_idx'].append(d_idx)
OD['o_sa2_idx'].append(o_sa2_idx)
OD['d_sa2_idx'].append(d_sa2_idx)
OD['o_lat'].append(o_lat)
OD['o_long'].append(o_long)
OD['d_lat'].append(d_lat)
OD['d_long'].append(d_long)
# create the data frame
OD_df = pd.DataFrame(OD)
# Need to specify your API_key
gmaps = googlemaps.Client(key=API_key)
OD_time_dic = {}
for idx in range(OD_df.shape[0]):
# scraping codes - Google does not allow it.
if idx%100 == 0:
print(idx)
o_lat,o_long,d_lat,d_long = OD_df.loc[idx, ['o_lat','o_long','d_lat','d_long']]
origin = (o_lat,o_long)
destination = (d_lat,d_long)
result = gmaps.distance_matrix(origin, destination, mode = 'driving')
OD_time_dic[idx] = result
# Augment Google data
OD_from_google_api = {}
OD_from_google_api['idx'] = [] # Important for combining two dfs
OD_from_google_api['d_address'] = []
OD_from_google_api['o_address'] = []
OD_from_google_api['od_duration_text'] = []
OD_from_google_api['od_duration_value'] = []
OD_from_google_api['od_distance_text'] = []
OD_from_google_api['od_distance_value'] = []
for key in OD_time_dic.keys():
if key%100 == 0:
print(key)
OD_from_google_api['idx'].append(key)
OD_from_google_api['d_address'].append(OD_time_dic[key]['destination_addresses'][0])
OD_from_google_api['o_address'].append(OD_time_dic[key]['origin_addresses'][0])
OD_from_google_api['od_duration_text'].append(OD_time_dic[key]['rows'][0]['elements'][0]['duration']['text'])
OD_from_google_api['od_duration_value'].append(OD_time_dic[key]['rows'][0]['elements'][0]['duration']['value'])
OD_from_google_api['od_distance_text'].append(OD_time_dic[key]['rows'][0]['elements'][0]['distance']['text'])
OD_from_google_api['od_distance_value'].append(OD_time_dic[key]['rows'][0]['elements'][0]['distance']['value'])
OD_from_google_api_df =
|
pd.DataFrame(OD_from_google_api)
|
pandas.DataFrame
|
"""Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- <NAME> & <NAME>
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from unittest import TestCase
from datetime import datetime, timedelta
from numpy.ma.testutils import assert_equal
from pandas.tseries.period import Period, PeriodIndex
from pandas.tseries.index import DatetimeIndex, date_range
from pandas.tseries.tools import to_datetime
import pandas.core.datetools as datetools
import numpy as np
from pandas import Series, TimeSeries
from pandas.util.testing import assert_series_equal
class TestPeriodProperties(TestCase):
"Test properties such as year, month, weekday, etc...."
#
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_interval_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEquals(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assert_(i1 != i4)
self.assertEquals(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/12/12', freq='D')
self.assertEquals(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEquals(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEquals(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEquals(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEquals(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i1 = Period('05Q1')
self.assertEquals(i1, i2)
lower = Period('05q1')
self.assertEquals(i1, lower)
i1 = Period('1Q2005')
self.assertEquals(i1, i2)
lower = Period('1q2005')
self.assertEquals(i1, lower)
i1 = Period('1Q05')
self.assertEquals(i1, i2)
lower = Period('1q05')
self.assertEquals(i1, lower)
i1 = Period('4Q1984')
self.assertEquals(i1.year, 1984)
lower = Period('4q1984')
self.assertEquals(i1, lower)
i1 = Period('1982', freq='min')
i2 = Period('1982', freq='MIN')
self.assertEquals(i1, i2)
i2 = Period('1982', freq=('Min', 1))
self.assertEquals(i1, i2)
def test_freq_str(self):
i1 = Period('1982', freq='Min')
self.assert_(i1.freq[0] != '1')
i2 = Period('11/30/2005', freq='2Q')
self.assertEquals(i2.freq[0], '2')
def test_to_timestamp(self):
intv = Period('1982', freq='A')
start_ts = intv.to_timestamp(which_end='S')
aliases = ['s', 'StarT', 'BEGIn']
for a in aliases:
self.assertEquals(start_ts, intv.to_timestamp(which_end=a))
end_ts = intv.to_timestamp(which_end='E')
aliases = ['e', 'end', 'FINIsH']
for a in aliases:
self.assertEquals(end_ts, intv.to_timestamp(which_end=a))
from_lst = ['A', 'Q', 'M', 'W', 'B',
'D', 'H', 'Min', 'S']
for i, fcode in enumerate(from_lst):
intv = Period('1982', freq=fcode)
result = intv.to_timestamp().to_period(fcode)
self.assertEquals(result, intv)
self.assertEquals(intv.start_time(), intv.to_timestamp('S'))
self.assertEquals(intv.end_time(), intv.to_timestamp('E'))
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq='A', year=2007)
assert_equal(a_date.year, 2007)
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert_equal((qd + x).qyear, 2007)
assert_equal((qd + x).quarter, x + 1)
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq='M', year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert_equal(m_ival_x.year, 2007)
if 1 <= x + 1 <= 3:
assert_equal(m_ival_x.quarter, 1)
elif 4 <= x + 1 <= 6:
assert_equal(m_ival_x.quarter, 2)
elif 7 <= x + 1 <= 9:
assert_equal(m_ival_x.quarter, 3)
elif 10 <= x + 1 <= 12:
assert_equal(m_ival_x.quarter, 4)
assert_equal(m_ival_x.month, x + 1)
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='WK', year=2007, month=1, day=7)
#
assert_equal(w_date.year, 2007)
assert_equal(w_date.quarter, 1)
assert_equal(w_date.month, 1)
assert_equal(w_date.week, 1)
assert_equal((w_date - 1).week, 52)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq='B', year=2007, month=1, day=1)
#
assert_equal(b_date.year, 2007)
assert_equal(b_date.quarter, 1)
assert_equal(b_date.month, 1)
assert_equal(b_date.day, 1)
assert_equal(b_date.weekday, 0)
assert_equal(b_date.day_of_year, 1)
#
d_date = Period(freq='D', year=2007, month=1, day=1)
#
assert_equal(d_date.year, 2007)
assert_equal(d_date.quarter, 1)
assert_equal(d_date.month, 1)
assert_equal(d_date.day, 1)
assert_equal(d_date.weekday, 0)
assert_equal(d_date.day_of_year, 1)
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date = Period(freq='H', year=2007, month=1, day=1, hour=0)
#
assert_equal(h_date.year, 2007)
assert_equal(h_date.quarter, 1)
assert_equal(h_date.month, 1)
assert_equal(h_date.day, 1)
assert_equal(h_date.weekday, 0)
assert_equal(h_date.day_of_year, 1)
assert_equal(h_date.hour, 0)
#
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
#
assert_equal(t_date.quarter, 1)
assert_equal(t_date.month, 1)
assert_equal(t_date.day, 1)
assert_equal(t_date.weekday, 0)
assert_equal(t_date.day_of_year, 1)
assert_equal(t_date.hour, 0)
assert_equal(t_date.minute, 0)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
#
assert_equal(s_date.year, 2007)
assert_equal(s_date.quarter, 1)
assert_equal(s_date.month, 1)
assert_equal(s_date.day, 1)
assert_equal(s_date.weekday, 0)
assert_equal(s_date.day_of_year, 1)
assert_equal(s_date.hour, 0)
assert_equal(s_date.minute, 0)
assert_equal(s_date.second, 0)
def noWrap(item):
return item
class TestFreqConversion(TestCase):
"Test frequency conversion of date objects"
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='WK', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
assert_equal(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
assert_equal(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
assert_equal(ival_A.asfreq('M', 's'), ival_A_to_M_start)
assert_equal(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
assert_equal(ival_A.asfreq('WK', 'S'), ival_A_to_W_start)
assert_equal(ival_A.asfreq('WK', 'E'), ival_A_to_W_end)
assert_equal(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
assert_equal(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
assert_equal(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
assert_equal(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
assert_equal(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
assert_equal(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
assert_equal(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
assert_equal(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
assert_equal(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
assert_equal(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
assert_equal(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
assert_equal(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
assert_equal(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
assert_equal(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
assert_equal(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='WK', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
assert_equal(ival_Q.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
assert_equal(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
assert_equal(ival_Q.asfreq('WK', 'S'), ival_Q_to_W_start)
assert_equal(ival_Q.asfreq('WK', 'E'), ival_Q_to_W_end)
assert_equal(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
assert_equal(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
assert_equal(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
assert_equal(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
assert_equal(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
assert_equal(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
assert_equal(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
assert_equal(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
assert_equal(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
assert_equal(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
assert_equal(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
assert_equal(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
assert_equal(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
assert_equal(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
assert_equal(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='WK', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
assert_equal(ival_M.asfreq('A'), ival_M_to_A)
assert_equal(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
assert_equal(ival_M.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M.asfreq('WK', 'S'), ival_M_to_W_start)
assert_equal(ival_M.asfreq('WK', 'E'), ival_M_to_W_end)
assert_equal(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
assert_equal(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
assert_equal(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
assert_equal(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
assert_equal(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
assert_equal(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
assert_equal(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
assert_equal(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
assert_equal(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
assert_equal(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
assert_equal(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='WK', year=2007, month=1, day=1)
ival_WSUN = Period(freq='WK', year=2007, month=1, day=7)
ival_WSAT = Period(freq='WK-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='WK-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='WK-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='WK-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='WK-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='WK-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq='WK', year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq='WK', year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq='WK', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq='A', year=2007)
else:
ival_W_to_A_end_of_year = Period(freq='A', year=2008)
if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=2)
if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
assert_equal(ival_W.asfreq('A'), ival_W_to_A)
assert_equal(ival_W_end_of_year.asfreq('A'),
ival_W_to_A_end_of_year)
assert_equal(ival_W.asfreq('Q'), ival_W_to_Q)
assert_equal(ival_W_end_of_quarter.asfreq('Q'),
ival_W_to_Q_end_of_quarter)
assert_equal(ival_W.asfreq('M'), ival_W_to_M)
assert_equal(ival_W_end_of_month.asfreq('M'),
ival_W_to_M_end_of_month)
assert_equal(ival_W.asfreq('B', 'S'), ival_W_to_B_start)
assert_equal(ival_W.asfreq('B', 'E'), ival_W_to_B_end)
assert_equal(ival_W.asfreq('D', 'S'), ival_W_to_D_start)
assert_equal(ival_W.asfreq('D', 'E'), ival_W_to_D_end)
assert_equal(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start)
assert_equal(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end)
assert_equal(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start)
assert_equal(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end)
assert_equal(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start)
assert_equal(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end)
assert_equal(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start)
assert_equal(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end)
assert_equal(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start)
assert_equal(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end)
assert_equal(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start)
assert_equal(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end)
assert_equal(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start)
assert_equal(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end)
assert_equal(ival_W.asfreq('H', 'S'), ival_W_to_H_start)
assert_equal(ival_W.asfreq('H', 'E'), ival_W_to_H_end)
assert_equal(ival_W.asfreq('Min', 'S'), ival_W_to_T_start)
assert_equal(ival_W.asfreq('Min', 'E'), ival_W_to_T_end)
assert_equal(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
assert_equal(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
assert_equal(ival_W.asfreq('WK'), ival_W)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq='B', year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq='B', year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq='B', year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq='B', year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq='B', year=2007, month=1, day=5)
ival_B_to_A = Period(freq='A', year=2007)
ival_B_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_B_to_M = Period(freq='M', year=2007, month=1)
ival_B_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_B_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_B_to_H_end = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_B_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_B_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_B_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_B_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_B.asfreq('A'), ival_B_to_A)
assert_equal(ival_B_end_of_year.asfreq('A'), ival_B_to_A)
assert_equal(ival_B.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B_end_of_quarter.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B.asfreq('M'), ival_B_to_M)
assert_equal(ival_B_end_of_month.asfreq('M'), ival_B_to_M)
assert_equal(ival_B.asfreq('WK'), ival_B_to_W)
assert_equal(ival_B_end_of_week.asfreq('WK'), ival_B_to_W)
assert_equal(ival_B.asfreq('D'), ival_B_to_D)
assert_equal(ival_B.asfreq('H', 'S'), ival_B_to_H_start)
assert_equal(ival_B.asfreq('H', 'E'), ival_B_to_H_end)
assert_equal(ival_B.asfreq('Min', 'S'), ival_B_to_T_start)
assert_equal(ival_B.asfreq('Min', 'E'), ival_B_to_T_end)
assert_equal(ival_B.asfreq('S', 'S'), ival_B_to_S_start)
assert_equal(ival_B.asfreq('S', 'E'), ival_B_to_S_end)
assert_equal(ival_B.asfreq('B'), ival_B)
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq='D', year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq='D', year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq='D', year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq='D', year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq='D', year=2007, month=1, day=7)
ival_D_friday = Period(freq='D', year=2007, month=1, day=5)
ival_D_saturday = Period(freq='D', year=2007, month=1, day=6)
ival_D_sunday = Period(freq='D', year=2007, month=1, day=7)
ival_D_monday = Period(freq='D', year=2007, month=1, day=8)
ival_B_friday = Period(freq='B', year=2007, month=1, day=5)
ival_B_monday = Period(freq='B', year=2007, month=1, day=8)
ival_D_to_A = Period(freq='A', year=2007)
ival_Deoq_to_AJAN = Period(freq='A-JAN', year=2008)
ival_Deoq_to_AJUN = Period(freq='A-JUN', year=2007)
ival_Deoq_to_ADEC = Period(freq='A-DEC', year=2007)
ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4)
ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3)
ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1)
ival_D_to_M = Period(freq='M', year=2007, month=1)
ival_D_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_D_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_D_to_H_end = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_D_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_D_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_D_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_D_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_D.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('A-JAN'),
ival_Deoq_to_AJAN)
assert_equal(ival_D_end_of_quarter.asfreq('A-JUN'),
ival_Deoq_to_AJUN)
assert_equal(ival_D_end_of_quarter.asfreq('A-DEC'),
ival_Deoq_to_ADEC)
assert_equal(ival_D_end_of_year.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('Q'), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq("Q-JAN"), ival_D_to_QEJAN)
assert_equal(ival_D.asfreq("Q-JUN"), ival_D_to_QEJUN)
assert_equal(ival_D.asfreq("Q-DEC"), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq('M'), ival_D_to_M)
assert_equal(ival_D_end_of_month.asfreq('M'), ival_D_to_M)
assert_equal(ival_D.asfreq('WK'), ival_D_to_W)
assert_equal(ival_D_end_of_week.asfreq('WK'), ival_D_to_W)
assert_equal(ival_D_friday.asfreq('B'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D_sunday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_sunday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D.asfreq('H', 'S'), ival_D_to_H_start)
assert_equal(ival_D.asfreq('H', 'E'), ival_D_to_H_end)
assert_equal(ival_D.asfreq('Min', 'S'), ival_D_to_T_start)
assert_equal(ival_D.asfreq('Min', 'E'), ival_D_to_T_end)
assert_equal(ival_D.asfreq('S', 'S'), ival_D_to_S_start)
assert_equal(ival_D.asfreq('S', 'E'), ival_D_to_S_end)
assert_equal(ival_D.asfreq('D'), ival_D)
def test_conv_hourly(self):
# frequency conversion tests: from Hourly Frequency"
ival_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_H_end_of_year = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_H_end_of_quarter = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_H_end_of_month = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_H_end_of_week = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_H_end_of_day = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_end_of_bus = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_to_A = Period(freq='A', year=2007)
ival_H_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_H_to_M = Period(freq='M', year=2007, month=1)
ival_H_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_H_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_H_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_H_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_H_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_H_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_H_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
assert_equal(ival_H.asfreq('A'), ival_H_to_A)
assert_equal(ival_H_end_of_year.asfreq('A'), ival_H_to_A)
assert_equal(ival_H.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H_end_of_quarter.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H.asfreq('M'), ival_H_to_M)
assert_equal(ival_H_end_of_month.asfreq('M'), ival_H_to_M)
assert_equal(ival_H.asfreq('WK'), ival_H_to_W)
assert_equal(ival_H_end_of_week.asfreq('WK'), ival_H_to_W)
assert_equal(ival_H.asfreq('D'), ival_H_to_D)
assert_equal(ival_H_end_of_day.asfreq('D'), ival_H_to_D)
assert_equal(ival_H.asfreq('B'), ival_H_to_B)
assert_equal(ival_H_end_of_bus.asfreq('B'), ival_H_to_B)
assert_equal(ival_H.asfreq('Min', 'S'), ival_H_to_T_start)
assert_equal(ival_H.asfreq('Min', 'E'), ival_H_to_T_end)
assert_equal(ival_H.asfreq('S', 'S'), ival_H_to_S_start)
assert_equal(ival_H.asfreq('S', 'E'), ival_H_to_S_end)
assert_equal(ival_H.asfreq('H'), ival_H)
def test_conv_minutely(self):
# frequency conversion tests: from Minutely Frequency"
ival_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_T_end_of_year = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_T_end_of_quarter = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_T_end_of_month = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_T_end_of_week = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_T_end_of_day = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_bus = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_hour = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_T_to_A = Period(freq='A', year=2007)
ival_T_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_T_to_M = Period(freq='M', year=2007, month=1)
ival_T_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_T_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_T_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_T_to_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_T_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_T_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
assert_equal(ival_T.asfreq('A'), ival_T_to_A)
assert_equal(ival_T_end_of_year.asfreq('A'), ival_T_to_A)
assert_equal(ival_T.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T_end_of_quarter.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T.asfreq('M'), ival_T_to_M)
assert_equal(ival_T_end_of_month.asfreq('M'), ival_T_to_M)
assert_equal(ival_T.asfreq('WK'), ival_T_to_W)
assert_equal(ival_T_end_of_week.asfreq('WK'), ival_T_to_W)
assert_equal(ival_T.asfreq('D'), ival_T_to_D)
assert_equal(ival_T_end_of_day.asfreq('D'), ival_T_to_D)
assert_equal(ival_T.asfreq('B'), ival_T_to_B)
assert_equal(ival_T_end_of_bus.asfreq('B'), ival_T_to_B)
assert_equal(ival_T.asfreq('H'), ival_T_to_H)
assert_equal(ival_T_end_of_hour.asfreq('H'), ival_T_to_H)
assert_equal(ival_T.asfreq('S', 'S'), ival_T_to_S_start)
assert_equal(ival_T.asfreq('S', 'E'), ival_T_to_S_end)
assert_equal(ival_T.asfreq('Min'), ival_T)
def test_conv_secondly(self):
# frequency conversion tests: from Secondly Frequency"
ival_S = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_S_end_of_year = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_quarter = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_month = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_week = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
ival_S_end_of_day = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_bus = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_hour = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
ival_S_end_of_minute = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
ival_S_to_A = Period(freq='A', year=2007)
ival_S_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_S_to_M = Period(freq='M', year=2007, month=1)
ival_S_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_S_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_S_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_S_to_H = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_S_to_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
assert_equal(ival_S.asfreq('A'), ival_S_to_A)
assert_equal(ival_S_end_of_year.asfreq('A'), ival_S_to_A)
assert_equal(ival_S.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S_end_of_quarter.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S.asfreq('M'), ival_S_to_M)
assert_equal(ival_S_end_of_month.asfreq('M'), ival_S_to_M)
assert_equal(ival_S.asfreq('WK'), ival_S_to_W)
assert_equal(ival_S_end_of_week.asfreq('WK'), ival_S_to_W)
assert_equal(ival_S.asfreq('D'), ival_S_to_D)
assert_equal(ival_S_end_of_day.asfreq('D'), ival_S_to_D)
assert_equal(ival_S.asfreq('B'), ival_S_to_B)
assert_equal(ival_S_end_of_bus.asfreq('B'), ival_S_to_B)
assert_equal(ival_S.asfreq('H'), ival_S_to_H)
assert_equal(ival_S_end_of_hour.asfreq('H'), ival_S_to_H)
assert_equal(ival_S.asfreq('Min'), ival_S_to_T)
assert_equal(ival_S_end_of_minute.asfreq('Min'), ival_S_to_T)
assert_equal(ival_S.asfreq('S'), ival_S)
class TestPeriodIndex(TestCase):
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_make_time_series(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index)
self.assert_(isinstance(series, TimeSeries))
def test_to_timestamp(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index, name='foo')
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = series.to_timestamp('D', 'end')
self.assert_(result.index.equals(exp_index))
self.assertEquals(result.name, 'foo')
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-DEC')
result = series.to_timestamp('D', 'start')
self.assert_(result.index.equals(exp_index))
def _get_with_delta(delta, freq='A-DEC'):
return date_range(to_datetime('1/1/2001') + delta,
to_datetime('12/31/2009') + delta, freq=freq)
delta = timedelta(hours=23)
result = series.to_timestamp('H', 'end')
exp_index = _get_with_delta(delta)
self.assert_(result.index.equals(exp_index))
delta = timedelta(hours=23, minutes=59)
result = series.to_timestamp('T', 'end')
exp_index = _get_with_delta(delta)
self.assert_(result.index.equals(exp_index))
result = series.to_timestamp('S', 'end')
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
self.assert_(result.index.equals(exp_index))
def test_constructor(self):
ii = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert_equal(len(ii), 9)
ii = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert_equal(len(ii), 4 * 9)
ii = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
assert_equal(len(ii), 12 * 9)
ii = PeriodIndex(freq='D', start='1/1/2001', end='12/31/2009')
assert_equal(len(ii), 365 * 9 + 2)
ii = PeriodIndex(freq='B', start='1/1/2001', end='12/31/2009')
assert_equal(len(ii), 261 * 9)
ii = PeriodIndex(freq='H', start='1/1/2001', end='12/31/2001 23:00')
assert_equal(len(ii), 365 * 24)
ii = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 23:59')
assert_equal(len(ii), 24 * 60)
ii = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 23:59:59')
assert_equal(len(ii), 24 * 60 * 60)
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
assert_equal(len(i1), 20)
assert_equal(i1.freq, start.freq)
assert_equal(i1[0], start)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), 10)
assert_equal(i1.freq, end_intv.freq)
assert_equal(i1[-1], end_intv)
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assert_((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assert_((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
try:
PeriodIndex(start=start, end=end_intv)
raise AssertionError('Cannot allow mixed freq for start and end')
except ValueError:
pass
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
try:
PeriodIndex(start=start)
raise AssertionError('Must specify periods if missing start or end')
except ValueError:
pass
def test_shift(self):
ii1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
ii2 = PeriodIndex(freq='A', start='1/1/2002', end='12/1/2010')
assert_equal(len(ii1), len(ii2))
assert_equal(ii1.shift(1).values, ii2.values)
ii1 =
|
PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
|
pandas.tseries.period.PeriodIndex
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import os.path as osp
import pickle
import cv2
import numpy as np
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import jsk_apc2015_common
def get_object_sizes(data_dir):
cache_file = 'object_sizes.pkl'
if osp.exists(cache_file):
return pickle.load(open(cache_file, 'rb'))
img_shape = None
objects = jsk_apc2015_common.get_object_list()
df = []
for obj in objects:
mask_files = os.listdir(osp.join(data_dir, obj, 'masks'))
for f in mask_files:
if f.startswith('NP'):
continue
mask = cv2.imread(osp.join(data_dir, obj, 'masks', f), 0)
if img_shape is None:
img_shape = mask.shape
else:
assert img_shape == mask.shape
mask = (mask > 127).astype(int)
size = mask.sum()
df.append([objects.index(obj), obj, f, size])
df =
|
pd.DataFrame(df)
|
pandas.DataFrame
|
"""Tests for the sdv.constraints.tabular module."""
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
ColumnFormula, CustomConstraint, GreaterThan, UniqueCombinations)
def dummy_transform():
pass
def dummy_reverse_transform():
pass
def dummy_is_valid():
pass
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid'
# Run
instance = CustomConstraint(
transform=dummy_transform,
reverse_transform=dummy_reverse_transform,
is_valid=is_valid_fqn
)
# Assert
assert instance.transform == dummy_transform
assert instance.reverse_transform == dummy_reverse_transform
assert instance.is_valid == dummy_is_valid
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test__valid_separator_valid(self):
"""Test ``_valid_separator`` for a valid separator.
If the separator and data are valid, result is ``True``.
Input:
- Table data (pandas.DataFrame)
Output:
- True (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data, instance._separator, columns)
# Assert
assert is_valid
def test__valid_separator_non_valid_separator_contained(self):
"""Test ``_valid_separator`` passing a column that contains the separator.
If any of the columns contains the separator string, result is ``False``.
Input:
- Table data (pandas.DataFrame) with a column that contains the separator string ('#')
Output:
- False (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', '#', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data, instance._separator, columns)
# Assert
assert not is_valid
def test__valid_separator_non_valid_name_joined_exists(self):
"""Test ``_valid_separator`` passing a column whose name is obtained after joining
the column names using the separator.
If the column name obtained after joining the column names using the separator
already exists, result is ``False``.
Input:
- Table data (pandas.DataFrame) with a column name that will be obtained by joining
the column names and the separator.
Output:
- False (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'b#c': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data, instance._separator, columns)
# Assert
assert not is_valid
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b#c': ['d#g', 'e#h', 'f#i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(
|
pd.DataFrame({'a': ['a', 'b', 'c']})
|
pandas.DataFrame
|
import pandas as pd
import pytest
from datetime import timedelta, date, datetime
from string import ascii_letters
from pandas.testing import assert_frame_equal, assert_series_equal
from siuba.dply.verbs import bind_cols, bind_rows
from .helpers import data_frame
@pytest.mark.skip
def test_bind_cols_shallow_copies():
# https://github.com/tidyverse/dplyr/blob/main/tests/testthat/test-bind.R#L3
pass
@pytest.mark.skip
def test_bind_cols_lists():
# see https://github.com/tidyverse/dplyr/issues/1104
# the siuba analog would probably be dictionaries?
exp = data_frame(x = 1, y = "a", z = 2)
pass
# Note: omitting other bind_cols list-based tests
@pytest.mark.skip
def test_that_bind_cols_repairs_names():
pass
@pytest.mark.skip
def test_that_bind_cols_honors_name_repair():
pass
# rows ------------------------------------------------------------------------
@pytest.fixture
def df_var():
today = date.today()
now = datetime.now()
return data_frame(
l = [True, False, False],
i = [1, 1, 2],
d = [today + timedelta(days=i) for i in [1, 1, 2]],
f = pd.Categorical(["a", "a", "b"]),
n = [1.5, 1.5, 2.5],
t = [now + timedelta(seconds=i) for i in [1, 1, 2]],
c = ["a", "a", "b"],
)
def test_bind_rows_equiv_to_concat(df_var):
exp = pd.concat([df_var, df_var, df_var], axis=0)
res = bind_rows(df_var, df_var, df_var)
assert_frame_equal(res, exp)
def test_bind_rows_reorders_columns(df_var):
new_order = list(df_var.columns[3::-1]) + list(df_var.columns[:3:-1])
df_var_scramble = df_var[new_order]
assert_frame_equal(
bind_rows(df_var, df_var_scramble),
bind_rows(df_var, df_var)
)
@pytest.mark.skip
def test_bind_rows_ignores_null():
pass
def test_bind_rows_list_columns():
vals = [[1,2], [1,2,3]]
dfl = data_frame(x = vals)
res = bind_rows(dfl, dfl)
exp = data_frame(x = vals*2, _index = [0,1]*2)
assert_frame_equal(res, exp)
@pytest.mark.xfail
def test_bind_rows_list_of_dfs():
# https://github.com/tidyverse/dplyr/issues/1389
df = data_frame(x = 1)
res = bind_rows([df, df], [df, df])
assert length(res) == 4
assert_frame_equal(res, bind_rows(*[df]*4))
def test_bind_rows_handles_dfs_no_rows():
df1 = data_frame(x = 1, y = pd.Categorical(["a"]))
df0 = df1.loc[pd.Index([]), :]
assert_frame_equal(bind_rows(df0), df0)
assert_frame_equal(bind_rows(df0, df0), df0)
assert_frame_equal(bind_rows(df0, df1), df1)
def test_bind_rows_handles_dfs_no_cols():
df1 = data_frame(x = 1, y = pd.Categorical(["a"]))
df0 = df1.loc[:,pd.Index([])]
assert_frame_equal(bind_rows(df0), df0)
assert bind_rows(df0, df0).shape == (2, 0)
@pytest.mark.skip
def test_bind_rows_lists_with_nulls():
pass
@pytest.mark.skip
def test_bind_rows_lists_with_list_values():
pass
def test_that_bind_rows_order_even_no_cols():
df2 = data_frame(x = 2, y = "b")
df1 = df2.loc[:, pd.Index([])]
res = bind_rows(df1, df2).convert_dtypes()
indx = [0,0]
assert_series_equal(res.x, pd.Series([pd.NA, 2], index=indx, dtype="Int64", name="x"))
assert_series_equal(res.y, pd.Series([pd.NA, "b"], index=indx, dtype="string", name="y"))
# Column coercion -------------------------------------------------------------
# Note: I think most of these are handled by pandas or unavoidable
@pytest.mark.xfail
def test_bind_rows_creates_column_of_identifiers():
df = data_frame(x = [1,2,3], y = ["a", "b", "c"])
data1 = df.iloc[1:,]
data2 = df.iloc[:1,]
out = bind_rows(data1, data2, _id = "col")
# Note: omitted test of bind_rows(list(...))
assert out.columns[0] == "col"
# TODO(question): should it use 0 indexing? Would say yes, since then it just
# corresponds to the arg index
assert (out.col == ["0", "0", "1"]).all()
out_labelled = bind_rows(zero = data1, one = data2)
assert out_labelled.col == ["zero", "zero", "one"]
@pytest.mark.xfail
def test_bind_cols_accepts_null():
df1 = data_frame(a = list(range(10)), b = list(range(10)))
df2 = data_frame(c = list(range(10)), d = list(range(10)))
res1 = bind_cols(df1, df2)
res2 = bind_cols(None, df1, df2)
res3 = bind_cols(df1, None, df2)
res4 = bind_cols(df1, df2, None)
assert_frame_equal(res1, res2)
|
assert_frame_equal(res1, res3)
|
pandas.testing.assert_frame_equal
|
import re
import requests
import sys
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
from pdb import set_trace as pb
max_fallback = 2
class Currency:
def __init__(self):
self.data = {}
self.data_hist = {}
def get(self, currency_pair):
'''
Parameters
----------
currency_pair : str
Returns
-------
dictionary of the currency pair
'''
if currency_pair not in self.data:
curr = get_historical_currency(currency_pair)
self.data[currency_pair] = curr.T.to_dict()[curr.index[0]]
return self.data[currency_pair]
def get_hist(self, currency_pair, dates):
if currency_pair not in self.data_hist:
self.data_hist[currency_pair] = get_historical_currency(currency_pair, dates)
return self.data_hist[currency_pair]
def fill(self):
'''
Fill entire data cross pair
'''
if self.data == {}: self.get('USD')
i = self.data.keys()[0]
for k in self.data[i].keys():
self.get(k)
def get_historical_currency(base, date=pd.datetime.today().strftime('%Y-%m-%d')):
'''
Parameters
----------
base : str
currency base
date : str/datetime - list
list of dates
Returns
-------
pandas dataframe of currency pairs
Example
-------
get_historical_currency(
'USD',
pd.bdate_range('2017-01-03', '2019-01-04')
)
'''
if type(date) in [list, pd.Series, pd.core.indexes.datetimes.DatetimeIndex]:
return pd.concat([get_historical_currency(base=base, date=d) for d in date]).sort_index()
date = pd.to_datetime(date).strftime('%Y-%m-%d')
url = 'https://www.xe.com/currencytables/?from={base_currency}&date={date}'.format(
base_currency=base,
date=date
)
count = 0
while count<=10:
try:
curr = pd.read_html(url)
assert curr.shape[1] >=4
break
except:
count+=1
curr = curr[0].iloc[:,]
curr['date'] = date
try:
curr = curr.iloc[:,[4,0,2]]
except:
print(curr)
print(date)
assert False
curr.columns=['date','currency','value']
curr = curr.pivot_table(values='value', index='date', columns='currency')
return curr
def _clean_bb_ticker(symbol, fallback):
if fallback == 0:
exchange_dict = {
'CN': 'TO',
'AU': 'AX',
'HK': 'HK',
'LN': 'L',
'TI': 'IS',
'SW': 'SW',
'US': None,
}
elif fallback == 1:
exchange_dict = {
'CN': 'V',
}
else:
exchange_dict = {}
symbol = symbol.upper()
symbol = symbol.replace(' EQUITY', '')
str_split = symbol.split(' ')
if len(str_split)==1: return symbol
symb, exchange = str_split
if exchange.upper() in exchange_dict:
correct_symbol = exchange_dict[exchange.upper()]
else:
print('Did not find symbol: {} in exchange_dict ({})'.format(exchange.upper(), symb))
correct_symbol = exchange.upper()
if correct_symbol != None:
symbol = symb+'.'+correct_symbol
else:
symbol = symb
return symbol
def statistics(symbols, currency=None, date=None, **args):
'''
Parameters
----------
symbols : str/list/pd.Series
symbols
convert_currency : None - str
convert to currency
e.g. ['USD', 'IDR', 'GBP', 'ETH', 'CAD',
'JPY', 'HUF', 'MYR', 'SEK', 'SGD', 'HKD',
'AUD', 'CHF', 'CNY', 'NZD', 'THB', 'EUR',
'RUB', 'INR', 'MXN', 'BTC', 'PHP', 'ZAR']
date : None, str/datetime
convert market cap and other price measures to
a previous date. Does not adjust for share count
changes
Returns
-------
pandas dataframe of stats from ticker
'''
convert_currency = currency
if '_curr' in args:
curr = args['_curr']
else:
curr = None
if type(symbols) in [list, pd.Series, set]:
global _currency
_currency = Currency()
return pd.concat([statistics(symb, currency=currency) for symb in symbols], sort=True)
elif not '_currency' in globals():
_currency = Currency()
if 'fallback' in args:
fallback = args['fallback']
else:
fallback = 0
ticker = _clean_bb_ticker(symbols, fallback)
url = 'https://finance.yahoo.com/quote/{ticker}/key-statistics'.format(
ticker=ticker
)
req = requests.get(url)
soup = BeautifulSoup(req.text, 'lxml')
main = soup.find_all('tr')
data = {}
dig_dict = {'B': 1000000000,'M': 1000000,'K': 1000}
for i in main:
table_cells = i.find_all('td')
if len(table_cells)==2:
k, v = table_cells
k = str(k.find_all('span')[0].getText())
try:
v = str(v.getText())
except:
v = pd.np.nan
try:
pd.to_datetime(v)
isdate = True
except:
isdate = False
try:
if v == pd.np.nan:
pass
elif str(v[-1]).upper() in dig_dict and str(v[:-1]).replace(',','').replace('.','').replace('-','').isdigit():
v = float(v[:-1])*dig_dict[v[-1].upper()]
elif (str(v[-1]) == '%') and (str(v)[:-1].replace(',','').replace('.','').replace('-','').isdigit()):
v = float(v[:-1])*1.0/100.0
elif (str(v).replace(',','').replace('.','').replace('-','').isdigit()):
v = float(v)
elif isdate:
v = pd.to_datetime(v).date().strftime('%Y-%m-%d')
except:
pass
data[k] = v
if data == {} and 'retry' not in args and fallback < max_fallback:
fallback += 1
data = statistics(symbols, fallback=fallback)
data.index = [symbols]
elif data == {} and 'retry' not in args:
data = statistics(symbols.split(' ')[0]+' Equity', retry=True)
else:
data = pd.DataFrame([data], index=[symbols])
if 'local_currency' not in data.columns:
spans = [i for i in soup.find_all('span') if 'Currency in' in i.get_text()]
spans = [i.get_text().split('Currency in ')[-1] for i in spans]
if spans!=[]:
data['local_currency'] = spans[0]
else:
data['local_currency'] = None
if convert_currency != None:
currency_divider = []
for iid, row in data.iterrows():
curr = _currency.get(row['local_currency'])
currency_divider.append(1/curr[convert_currency])
data['currency_divider'] = currency_divider
for col in ['EBITDA', 'Gross Profit', 'Levered Free Cash Flow', 'Market Cap (intraday)', 'Revenue',
'Operating Cash Flow', 'Revenue Per Share', 'Gross Profit', 'Net Income Avi to Common',
'Diluted EPS', 'Total Cash', 'Total Cash Per Share', 'Total Debt']:
if col in data.columns:
data[col] = pd.to_numeric(data[col].replace('N/A', np.nan), errors='ignore')/data['currency_divider']
if date != None:
prices = download(symbol=symbols, start_date=pd.to_datetime(date), end_date=pd.datetime.today().date())
multiplier = prices['Close'].iloc[0]/prices['Close'].iloc[-1]
for col in ['Market Cap (intraday)']:
if col in data.columns:
data[col]*=multiplier
return data
def get_currency(ticker):
'''
Parameters
----------
ticker : str
ticker
Returns
-------
currency that the ticker is priced in
'''
return statistics(ticker)['local_currency'].iloc[0]
def download(symbol, start_date, end_date, interval='1d', events='history', currency=None, **args):
'''
Parameters
----------
symbol : str/list/pd.Series
list of symbols
start_date : str/datetime
start date
end_date : str/datetime
end date
interval : str
'1d'
events : str
'history', 'div'
currency : str
currency to convert to
Returns
-------
pandas dataframe of prices
Example
-------
df = get_prices('AAPL', '2019-01-01', '2019-01-31')
'''
if 'fallback' in args:
fallback = args['fallback']
else:
fallback = 0
if type(symbol) is pd.Series:
symbol = symbol.tolist()
if '_currency' in args:
_currency = args['_currency']
else:
_currency = Currency()
if currency != None:
dates = pd.bdate_range(start_date, end_date)
_currency.get_hist(currency.upper(), dates)
if type(symbol) is list:
output = {}
for symb in symbol:
output[symb] = download(
symbol=symb,
start_date=start_date,
end_date=end_date,
interval=interval,
events=events,
currency=currency,
_currency=_currency,
)
comb = pd.concat(output, axis=1, sort=True)
comb.columns.names=[None, None]
comb.index.name='Date'
return comb
if not '_currency' in globals(): _currency = Currency()
symbol = _clean_bb_ticker(symbol, fallback)
sd = pd.to_datetime(start_date)
sd = ((sd - pd.to_datetime('1970-01-01')).days)*24*60*60
ed = pd.to_datetime(end_date)
ed = ((ed - pd.to_datetime('1970-01-01')).days)*24*60*60
crumble_link = 'https://finance.yahoo.com/quote/{0}/history?p={0}'
crumble_regex = r'CrumbStore":{"crumb":"(.*?)"}'
cookie_regex = r'set-cookie: (.*?);'
quote_link = 'https://query1.finance.yahoo.com/v7/finance/download/{}?period1={}&period2={}&interval={}&events={}&crumb={}'
link = crumble_link.format(symbol)
session = requests.Session()
proxy = '{}.{}.{}:{}'.format(
pd.np.random.randint(10,99),
pd.np.random.randint(10,99),
pd.np.random.randint(0,9),
pd.np.random.randint(10,999),
pd.np.random.randint(1000,9999),
)
response = session.get(link, proxies={'http': 'http://{}'.format(proxy)})
# get crumbs
text = str(response.content)
match = re.search(crumble_regex, text)
try:
crumbs = match.group(1)
except:
return
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
"""
This module provides ideas for evaluating some machine learning algorithms.
"""
from __future__ import print_function
import operator
import warnings
import time
import pickle
import json
import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
# import plotly.plotly as py
import plotly.graph_objs as go
import cufflinks as cf # Needed
#sklearn warning
warnings.filterwarnings("ignore", category=DeprecationWarning)
from collections import OrderedDict
from plotly.offline.offline import _plot_html
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
#Clasification algorithms
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
#Ensembles algorithms
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.ensemble import BaggingClassifier
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
# Regression algorithms
from sklearn.svm import SVR
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import BayesianRidge
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
#Ensembles algorithms
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
class Evaluate:
""" A class for resampling and evaluation """
def __init__(self, definer=None, preparer=None, selector=None):
self.definer = definer
self.preparer = preparer
self.selector = selector
if definer is not None:
self.problem_type = definer.problem_type
self.plot_html = None
self.report = None
self.raw_report = None
self.best_pipelines = None
self.pipelines = None
self.estimators = None
self.X_train = None
self.y_train = None
self.X_test = None
self.y_test = None
self.y_pred = None
self.metrics = dict()
self.feature_importance = dict()
self.test_size = 0.3
self.num_folds = 10
self.seed = 7
def pipeline(self, list_models):
self.build_pipelines(list_models)
self.split_data(self.test_size, self.seed)
self.evaluate_pipelines()
self.set_best_pipelines()
# self.plot_metrics('AdaBoostClassifier')
#[m() for m in evaluators]
return self
def set_models(self, list_models=None):
models = []
rs = 1
if self.problem_type == "classification":
# Ensemble Methods
if 'AdaBoostClassifier' in list_models:
models.append( ('AdaBoostClassifier', AdaBoostClassifier(random_state=rs)) )
if 'GradientBoostingClassifier' in list_models:
models.append( ('GradientBoostingClassifier', GradientBoostingClassifier(random_state=rs)) )
if 'BaggingClassifier' in list_models:
models.append( ('BaggingClassifier', BaggingClassifier(random_state=rs)))
if 'RandomForestClassifier' in list_models:
models.append( ('RandomForestClassifier', RandomForestClassifier(random_state=rs)) )
if 'ExtraTreesClassifier' in list_models:
models.append( ('ExtraTreesClassifier', ExtraTreesClassifier(random_state=rs)) )
# Non linear Methods
if 'KNeighborsClassifier' in list_models:
models.append( ('KNeighborsClassifier', KNeighborsClassifier()) )
if 'DecisionTreeClassifier' in list_models:
models.append( ('DecisionTreeClassifier', DecisionTreeClassifier(random_state=rs)) )
if 'MLPClassifier' in list_models:
models.append( ('MLPClassifier', MLPClassifier(max_iter=1000,random_state=rs)) )
if 'SVC' in list_models:
models.append( ('SVC', SVC(random_state=rs)) )
# Linear Methods
if 'LinearDiscriminantAnalysis' in list_models:
models.append( ('LinearDiscriminantAnalysis', LinearDiscriminantAnalysis()) )
if 'GaussianNB' in list_models:
models.append( ('GaussianNB', GaussianNB()) )
if 'LogisticRegression' in list_models:
models.append( ('LogisticRegression', LogisticRegression()) )
if 'XGBoostClassifier' in list_models:
models.append( ('XGBoostClassifier', XGBClassifier(n_jobs=-1)) )
if 'LGBMClassifier' in list_models:
models.append( ('LGBMClassifier', LGBMClassifier()) )
# Voting
estimators = list()
estimators.append( ("Voting_GradientBoostingClassifier", GradientBoostingClassifier(random_state=rs)) )
estimators.append( ("Voting_ExtraTreesClassifier", ExtraTreesClassifier(random_state=rs)) )
voting = VotingClassifier(estimators)
if 'VotingClassifier' in list_models:
models.append( ('VotingClassifier', voting) )
elif self.problem_type == "regression":
# Ensemble Methods
if 'AdaBoostRegressor' in list_models:
models.append( ('AdaBoostRegressor', AdaBoostRegressor(random_state=rs)))
if 'GradientBoostingRegressor' in list_models:
models.append( ('GradientBoostingRegressor', GradientBoostingRegressor(random_state=rs)) )
if 'BaggingRegressor' in list_models:
models.append( ('BaggingRegressor', BaggingRegressor(random_state=rs)))
if 'RandomForestRegressor' in list_models:
models.append( ('RandomForestRegressor',RandomForestRegressor(random_state=rs)) )
if 'ExtraTreesRegressor' in list_models:
models.append( ('ExtraTreesRegressor', ExtraTreesRegressor(random_state=rs)) )
# Non linear Methods
if 'KNeighborsRegressor' in list_models:
models.append( ('KNeighborsRegressor', KNeighborsRegressor()) )
if 'DecisionTreeRegressor' in list_models:
models.append( ('DecisionTreeRegressor', DecisionTreeRegressor(random_state=rs)) )
if 'MLPRegressor' in list_models:
models.append( ('MLPRegressor', MLPRegressor(max_iter=1000, random_state=rs)) )
if 'SVR' in list_models:
models.append( ('SVR', SVR()) )
# Linear Methods
if 'LinearRegression' in list_models:
models.append( ('LinearRegression', LinearRegression()) )
if 'BayesianRidge' in list_models:
models.append( ('BayesianRidge', BayesianRidge()) )
if 'XGBoostRegressor' in list_models:
models.append( ('XGBoostRegressor', XGBRegressor(n_jobs=-1)) )
if 'LGBMRegressor' in list_models:
models.append( ('LGBMRegressor', LGBMRegressor()) )
return models
def split_data(self, test_size=0.30, seed=7):
""" Need to fill """
X_train, X_test, y_train, y_test = train_test_split(
self.definer.X, self.definer.y, test_size=test_size, random_state=seed)
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
# return X_train, X_test, y_train, y_test
def build_pipelines(self, list_models=None):
pipelines = []
models = self.set_models(list_models)
if self.definer.n_features > 200:
for m in models:
pipelines.append((m[0],
Pipeline([
#('preparer', FunctionTransformer(self.preparer)),
('preparer', self.preparer),
('selector', self.selector),
m,
])
))
else:
for m in models:
pipelines.append((m[0],
Pipeline([
#('preparer', FunctionTransformer(self.preparer)),
('preparer', self.preparer),
# ('selector', self.selector),
m,
])
))
self.pipelines = pipelines
return pipelines
def evaluate_pipelines(self, ax=None):
test_size = self.test_size
num_folds = self.num_folds
seed = self.seed
if self.definer.problem_type == 'classification':
scoring = 'accuracy'
else:
scoring = 'r2'
#pipelines = self.build_pipelines(self.set_models())
#pipelines = self.pipelines
#self.report = {}
#report_element = {}
self.report = [["Model", "Mean", "STD", "Time"]]
results = []
names = []
grid_search = dict()
for name, model in self.pipelines:
print("Modeling...", name)
kfold = KFold(n_splits=num_folds, random_state=seed)
start = time.time()
# cv_results = cross_val_score(model, self.X_train, self.y_train, cv=kfold, \
# scoring=scoring)
params = dict()
# name = 'LogisticRegression'
for k, v in model.get_params().items():
# params[name+'__'+k] = [v]
params[k] = [v]
grid_search_t = GridSearchCV(model, params, n_jobs=-1,
verbose=1, cv=kfold, return_train_score=True,
scoring=scoring)
grid_search_t.fit(self.X_train, self.y_train)
end = time.time()
duration = end - start
# save the model to disk
#filename = name+'.ml'
#pickle.dump(model, open('./models/'+filename, 'wb'))
# print(cv_results)
#results.append(cv_results)
mean = grid_search_t.cv_results_['mean_test_score'][0]
std = grid_search_t.cv_results_['std_test_score'][0]
# mean = cv_results.mean()
# std = cv_results.std()
cv_results = []
for i in range(num_folds):
name_t = 'split' + str(i) + '_test_score'
cv_results.append(grid_search_t.cv_results_[name_t][0])
d = {'name': name, 'values': cv_results, 'mean': round(mean, 3),
'std': round(std, 3)}
results.append(d)
grid_search[name] = grid_search_t.best_estimator_
#results['result'] = cv_results
#names.append(name)
#report_element[name] = {'mean':mean, 'std':std}
#self.report.update(report_element)
#report_print = "Model: {}, mean: {}, std: {}".format(name,
#mean, std)
self.report.append([name, round(mean, 3), round(std, 3),
round(duration, 3)])
print("Score ", mean)
print("---------------------")
#print(report_print)
self.raw_report = sorted(results, key=lambda k: k['mean'], reverse=True)
self.estimators = grid_search
#print(self.raw_report)
headers = self.report.pop(0)
df_report = pd.DataFrame(self.report, columns=headers)
#print(df_report)
#print(self.report)
#self.sort_report(self.report)
self.sort_report(df_report)
#self.plotModels(results, names)
def sort_report(self, report):
"""" Choose the best two algorithms"""
#sorted_t = sorted(report.items(), key=operator.itemgetter(1))
report.sort_values(['Mean'], ascending=[False], inplace=True)
#self.bestAlgorithms = sorted_t[-2:]
self.report = report.copy()
#print(self.report)
def get_metrics(self):
if self.problem_type == 'classification':
models = self.estimators.keys()
for name_model in models:
metric_model = dict()
estimator = self.estimators[name_model]
y_pred = estimator.predict(self.X_test.values)
# print(f'The accuracy of the {name_model} is:', accuracy_score(y_pred, self.y_test))
cm = confusion_matrix(list(self.y_test.reset_index(drop=True)), list(y_pred))
cm_n = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
metric_model['confusion_matrix_normalized'] = cm_n.tolist()
metric_model['confusion_matrix'] = cm.tolist()
metric_model['accuracy'] = accuracy_score(y_pred, self.y_test)
metric_model['estimator_classes'] = estimator.classes_.tolist()
self.metrics[name_model] = metric_model
# print(self.metrics)
def get_feature_importance(self):
non_tree_based_models = ['KNeighborsClassifier', 'MLPClassifier', 'SVC',
'LinearDiscriminantAnalysis', 'GaussianNB',
'LogisticRegression', 'KNeighborsRegressor',
'MLPRegressor', 'SVR', 'LinearRegression',
'BayesianRidge']
models = self.estimators.keys()
if self.problem_type == 'classification':
for name_model in models:
feature_imp = {'feature': [], 'importance':[]}
if name_model in non_tree_based_models:
# estimator = self.estimators[name_model]
# y_pred = estimator.predict(self.X_test.values)
kbest = SelectKBest(score_func=chi2, k=self.X_train.shape[1])
kbest = kbest.fit(self.X_train, self.y_train)
print(kbest.scores_)
feature_importance = kbest.scores_
feature_names = list(self.X_train.columns)
for score, name in sorted(zip(feature_importance, feature_names), reverse=True):
feature_imp['feature'].append(name)
feature_imp['importance'].append(score)
df_fi =
|
pd.DataFrame(feature_imp)
|
pandas.DataFrame
|
"""
Discriminating GH13 ASs and SHs with Random Forest.
"""
# Imports
#=====================#
import pandas as pd
import numpy as np
from scipy import stats
import random
from Bio import SeqIO
import os
import subprocess
from imblearn.under_sampling import RandomUnderSampler
from Bio.Blast.Applications import NcbiblastpCommandline
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import KFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
import warnings
warnings.filterwarnings("ignore")
import bioinformatics as bioinf
# Prepare sequences and data
#=====================================================#
GH13_df = pd.read_csv('results_final/ncbi_subtypes.csv')
GH13_SH = GH13_df[(GH13_df.ncbi_pred_class==0)]
accession_SH = GH13_SH.Accession.tolist()
accession_all = bioinf.get_accession('fasta/initial_blast/nrblast_all.fasta')
GH13 = [1 if x in accession_SH else 0 for x in accession_all]
# class labels
y = pd.Series(GH13)
GH13_not_SH = y[y==0]
GH13_yes_SH = y[y==1]
# Derive features for machine learning with one-hot encoding
#============================================================#
cat_domain_fasta = 'fasta/GH13_positions_only/GH13_cat.fasta'
sequence_df = bioinf.fasta_to_df(cat_domain_fasta)
X_features = pd.DataFrame() # empty dataframe for storing features
for i in range(len(sequence_df.columns)):
# Convert amino acids to integers
X_resid = list(sequence_df.iloc[:,i])
labelencoder = LabelEncoder()
X_label = list(labelencoder.fit_transform(X_resid))
X_resid_unique = sorted(set(X_resid))
X_label_unique = sorted(set(X_label))
# Map integer labels to amino acids
label_resid = [X_label.index(num) for num in X_label_unique]
label_resid = [X_resid[num] for num in label_resid]
# Convert labels to binary features (one-hot encoding)
onehotencoder = OneHotEncoder()
X_label = pd.DataFrame(X_label) # convert to 2D array
X_encoded = onehotencoder.fit_transform(X_label).toarray()
X_encoded = pd.DataFrame(X_encoded)
# Name encoded features (residue + position, e.g G434)
X_encoded.columns = ['{0}{1}'.format(res,i+1) for res in label_resid]
if X_encoded.columns[0][0:1]=='-' :
del X_encoded['-{0}'.format(i+1)] # remove encoded features from gaps
# Append features to dataframe store
for col in X_encoded.columns:
X_features[col] = X_encoded[col]
# Randomly split data to validation set and test set
#====================================================#
# Test set data (10% of total data)
SH_test_size = int(0.1 * len(GH13_yes_SH))
AS_test_size = int(0.1 * len(GH13_not_SH))
SH_test_indices = random.sample(list(GH13_yes_SH.index), SH_test_size)
AS_test_indices = random.sample(list(GH13_not_SH.index), AS_test_size)
test_indices = SH_test_indices + AS_test_indices
test_indices = sorted(test_indices)
# Validation set data (90% of total data)
val_indices = [x for x in list(y.index) if x not in test_indices]
# X (features) and y for validation and test sets
X_val = X_features.iloc[val_indices,:]
y_val = y.iloc[val_indices]
X_test_sep = X_features.iloc[test_indices,:]
y_test_sep = y.iloc[test_indices]
# Apply random forests to validation set using all features
#=============================================================#
# Empty lists for storing final results
sens_store, spec_store, acc_store, mcc_store, featimp_store = [], [], [], [], []
# Function for evaluating performance
def evalPerf(y_test, y_pred):
'''Return (sensitivity, specificity, accuracy, MCC, p_value)'''
cm = confusion_matrix(y_test, y_pred)
tn, tp, fn, fp = cm[0][0], cm[1][1], cm[1][0], cm[0][1]
n = tp + fp + tn + fn
accuracy = (tp + tn)/n * 100
mcc = ((tp*tn) - (fp*fn))/np.sqrt((tp+fp)*(tn+fn)*(tp+fp)*(tn+fp))
sens = tp/(tp + fn) * 100 if tp + fp != 0 else 0
spec = tn/(tn + fp) * 100 if tn + fn != 0 else 0
if tp == 1 or fp == 0 or fn ==0 or tn == 1:
p_value = 0
else:
table = np.array([[tp, fp], [fn, tn]]) # AS and SH have same contingency table
p_value = stats.chi2_contingency(table)[1]
return [sens, spec, accuracy, mcc, p_value]
# 100 repetitions of 10-fold cross validation
for r in range(100):
RUS = RandomUnderSampler(random_state=None)
X_select, y_select = RUS.fit_resample(X_val, y_val)
X_select, y_select = pd.DataFrame(X_select), pd.Series(y_select)
# 10-fold cross validation
kf = KFold(n_splits=10, shuffle=True, random_state=None)
kf_indices = kf.split(X_select)
for train_index, test_index in kf_indices:
X_train, y_train = X_select.iloc[train_index, :], y_select.iloc[train_index]
X_test, y_test = X_select.iloc[test_index, :], y_select.iloc[test_index]
# Fit random forest classifier to training data
classifier = RandomForestClassifier(n_estimators=800, n_jobs=-1)
classifier.fit(X_train, y_train)
# Test classifier and evaluate performance
y_pred = classifier.predict(X_test)
sens, spec, accuracy, mcc, pvalue = evalPerf(y_test, y_pred)
featimp = list(classifier.feature_importances_)
# Save results
sens_store.append(sens)
spec_store.append(spec)
acc_store.append(accuracy)
mcc_store.append(mcc)
featimp_store.append(featimp)
# Average results over all 500 repetitions
store = [np.mean(sens_store), np.std(sens_store), np.mean(spec_store), np.std(spec_store),
np.mean(acc_store), np.std(acc_store), np.mean(mcc_store), np.std(mcc_store)]
store = pd.DataFrame(store, index=['sens_mean', 'sens_std', 'spec_mean', 'spec_std',
'acc_mean', 'acc_std', 'mcc_mean', 'mcc_std'])
featimp_mean = pd.DataFrame(featimp_store).mean(axis=0)
featimp_std = pd.DataFrame(featimp_store).std(axis=0)
store_featimp = pd.DataFrame([X_val.columns, featimp_mean, featimp_std],
index=['features', 'mean', 'std']).transpose()
# Write results to spreadsheet
store.to_csv('results_final/ml_rf_pred/perf_all.csv')
store_featimp.to_csv('results_final/ml_rf_pred/featimp_all.csv')
# Use only top 50 features
#===================================#
# Top 50 features
top50_index = list(store_featimp.sort_values(by='mean', ascending=False).iloc[:50,:].index)
X_val_top50 = X_val.iloc[:,top50_index]
# Empty lists for storing final results
sens_store, spec_store, acc_store, mcc_store, featimp_store = [], [], [], [], []
# 100 repetitions of 10-fold cross validation
for r in range(100):
RUS = RandomUnderSampler(random_state=None)
X_select, y_select = RUS.fit_resample(X_val_top50, y_val)
X_select, y_select = pd.DataFrame(X_select),
|
pd.Series(y_select)
|
pandas.Series
|
# Implementacja modelu Rabbits Grass Weeds w Python
import simpy
import random
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.colors
# Zarejestrowanie kolorow: bialy-krolik, zielony-trawa, fiolet-chwasty
map_colors = matplotlib.colors.ListedColormap(["black", "white", "green", "violet"])
plt.register_cmap(cmap=map_colors)
# Zdefiniowanie klasy krolika
class Agent_rabbit:
def __init__(self, color, energy, born_energy, born_p, grass_energy, weed_energy, city):
self.city = city
self.color = color
self.first_energy = energy
self.energy = energy
self.born_energy = born_energy
self.born_p = born_p
self.grass_energy = grass_energy
self.weed_energy = weed_energy
self.loc = self.gen_loc()
self.city.env.process(self.iteration())
def gen_loc(self):
while True:
loc = (random.randrange(self.city.city_dim),
random.randrange(self.city.city_dim))
if loc not in self.city.occupied:
self.city.occupied[loc] = self
return(loc)
# Krolik losuje miejsce dookola siebie, gdzie sprawdzi, czy jest jedzenie, jesli tak, to je
def eating(self):
dx = random.sample([-1,0,1],1)[0]
dy = random.sample([-1,0,1],1)[0]
if dx != 0 and dy != 0:
ref_loc = ((self.loc[0] + dx) % self.city.city_dim,
(self.loc[1] + dy) % self.city.city_dim)
if ref_loc in self.city.occupied:
# Warunek jesli napotkanym miejscem jest trawa
if self.city.occupied[ref_loc].color == 2:
self.energy += self.grass_energy
self.city.occupied[ref_loc] = self
del self.city.occupied[self.loc]
self.loc = ref_loc
# Warunek jesli napotkanym miejscem jest chwast
if self.city.occupied[ref_loc].color == 3:
self.energy += self.weed_energy
self.city.occupied[ref_loc] = self
del self.city.occupied[self.loc]
self.loc = ref_loc
# Dodatkowo, gdy krolik zje dany fragment - przenosi sie na jego miejsce
return(self.energy > 0)
def move(self):
yield self.city.env.timeout(random.random())
new_loc = self.gen_loc()
del self.city.occupied[self.loc]
self.loc = new_loc
def die(self):
yield self.city.env.timeout(random.random())
del self.city.occupied[self.loc]
def iteration(self):
yield self.city.env.timeout(random.random())
while True:
yield self.city.env.timeout(1)
# Sprawdzam, czy krolik ma jeszcze jakakolwiek energie, jesli tak - ide dalej
if self.energy>0:
# Za kazdy ruch odejmuje jeden punkt energii krolika
self.energy -= 1
# Krolik probuje cos zjesc, tj. zyskac energii
self.eating()
jedzenie = self.eating()
if jedzenie:
# Jesli krolik ma energie i los mu sprzyja (dla przyjetego prawdopodobienstwa),
# reprodukuje sie :)
if self.energy>self.born_energy and self.born_p>random.uniform(0,1):
Agent_rabbit(self.color, self.first_energy, self.born_energy, self.born_p,
self.grass_energy, self.weed_energy, self.city)
# Za reprodukcje odejmujemy dodatkowa energie
self.energy = self.energy-self.born_energy
# Dodajemy nowego krolika do zliczonej populacji
self.city.rabbit_final += 1
# Jesli krolik ma jeszcze sile, idzie dalej
if self.energy >= 1:
self.move()
# Jesli krolik nie ma energii - umiera
else:
self.die()
# Zdefiniowanie klasy dla rosnacej trawy i chwastow
class Agent:
def __init__(self, color, energy, prob, city):
self.city = city
self.color = color
self.energy = energy
self.prob = prob
self.loc = self.gen_loc()
self.city.env.process(self.born())
def gen_loc(self):
while True:
loc = (random.randrange(self.city.city_dim),
random.randrange(self.city.city_dim))
if loc not in self.city.occupied:
self.city.occupied[loc] = self
return(loc)
# Trawa lub chwast rosna w losowym miejscu z okreslonym prawdopodobienstwem
def born(self):
yield self.city.env.timeout(random.random())
if random.uniform(0, 1) < self.prob:
Agent(self.color, self.energy, self.prob, self.city)
class City:
def __init__(self, city_dim, grass_density, weed_density, max_iter, rabbit_num, rabbit_energy,
rabbit_born_energy, rabbit_born_p, grass_energy, weed_energy, grass_prob, weed_prob):
self.city_dim = city_dim
self.grass_density = grass_density
self.weed_density = weed_density
self.max_iter = max_iter
self.rabbit_num = rabbit_num
self.rabbit_energy = rabbit_energy
self.rabbit_born_energy = rabbit_born_energy
self.rabbit_born_p = rabbit_born_p
self.grass_energy = grass_energy
self.weed_energy = weed_energy
self.grass_prob = grass_prob
self.weed_prob = weed_prob
self.rabbit_final = rabbit_num
def plot(self, start):
if start:
plt.subplot(1, 2, 1)
plt.title("Start")
else:
plt.subplot(1, 2, 2)
plt.title("Stop")
data = np.zeros((self.city_dim, self.city_dim))
for agent in self.occupied:
data[agent[0], agent[1]] = self.occupied[agent].color
plt.imshow(data, cmap=map_colors, interpolation="none")
def run(self, plotting=True):
self.occupied = dict()
self.env = simpy.Environment()
grass_count = int(self.city_dim * self.city_dim * self.grass_density)
weed_count = int(self.city_dim * self.city_dim * self.weed_density)
for i in range(self.rabbit_num):
Agent_rabbit(1, self.rabbit_energy, self.rabbit_born_energy, self.rabbit_born_p, self.grass_energy, self.weed_energy, self)
for i in range(grass_count):
Agent(2, self.grass_energy, self.grass_prob, self)
for i in range(weed_count):
Agent(3, self.weed_energy, self.weed_prob, self)
if plotting:
plt.figure(1)
self.plot(True)
self.env.run(until=self.max_iter)
if plotting:
self.plot(False)
plt.show()
# Zwraca informacje o liczbie krolikow w populacji
return(self.rabbit_final)
## Ustawienie parametrow
DIM = 50 # Wymiar mapy
GRASS_DENSITY_FROM = 0.02 # Minimalna procentowa zajetosc mapy przez trawe
GRASS_DENSITY_TO = 0.13 # Maksymalna procentowa zajetosc mapy przez trawe
WEED_DENSITY_FROM = 0.02 # Minimalna procentowa zajetosc mapy przez chwasty
WEED_DENSITY_TO = 0.13 # Maksymalna procentowa zajetosc mapy przez chwasty
RAB_EN_FROM = 5 # Minimalna poczatkowa energia, jaka ma krolik
RAB_EN_TO = 11 # Maksymalna poczatkowa energia, jaka ma krolik
RAB_NUM = 150 # Liczba krolikow w poczatkowej populacji
MAX_ITER = 500 # Maksymalna liczba iteracji
GRASS_P = 0.3 # Prawdopodobienstwo pojawienia sie nowej trawy
WEED_P = 0.5 # Prawdopodobienstwo pojawienia sie nowego chwasta
GRASS_EN = 3 # Energia, jaka daje jedzenie trawy
WEED_EN = 1 # Energia, jak daje jedzenie chwasta
RAB_BORN_EN = 7 # Energia, potrzebna krolikowi do reprodukcji
RAB_P = 0.4 # Prawdopodobienstwo reprodukcji krolika
## Budowanie kombinacji
grass_dt = pd.DataFrame({'grass_dt': np.arange(GRASS_DENSITY_FROM, GRASS_DENSITY_TO, 0.01)})
weed_dt = pd.DataFrame({'weed_dt': np.arange(WEED_DENSITY_FROM, WEED_DENSITY_TO, 0.01)})
rab_energy = pd.DataFrame({'rab_energy': np.arange(RAB_EN_FROM, RAB_EN_TO, 1)})
wyniki =
|
pd.DataFrame(columns=['grass_dt', 'weed_dt', 'rab_energy', 'rab_number'])
|
pandas.DataFrame
|
import pandas as __pd
import datetime as __dt
from dateutil import relativedelta as __rd
from multiprocessing import Pool as __Pool
import multiprocessing as __mp
import requests as __requests
from seffaflik.__ortak.__araclar import make_requests as __make_requests
from seffaflik.__ortak import __dogrulama as __dogrulama
__first_part_url = "production/"
def santraller(tarih=__dt.datetime.now().strftime("%Y-%m-%d")):
"""
İlgili tarihte EPİAŞ sistemine kayıtlı YEKDEM santral bilgilerini vermektedir.
Parametre
----------
tarih : %YYYY-%AA-%GG formatında tarih (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Santral Bilgileri(Id, Adı, EIC Kodu, Kısa Adı)
"""
if __dogrulama.__tarih_dogrulama(tarih):
try:
particular_url = __first_part_url + "renewable-sm-licensed-power-plant-list?period=" + tarih
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["powerPlantList"])
df.rename(index=str, columns={"id": "Id", "name": "Adı", "eic": "EIC Kodu",
"shortName": "Kısa Adı"}, inplace=True)
df = df[["Id", "Adı", "EIC Kodu", "Kısa Adı"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def kurulu_guc(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığına tekabül eden aylar için EPİAŞ sistemine kayıtlı YEKDEM santrallerin kaynak bazlı toplam
kurulu güç bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Kurulu Güç Bilgisi (Tarih, Kurulu Güç)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
ilk = __dt.datetime.strptime(baslangic_tarihi[:7], '%Y-%m')
son = __dt.datetime.strptime(bitis_tarihi[:7], '%Y-%m')
date_list = []
while ilk <= son and ilk <= __dt.datetime.today():
date_list.append(ilk.strftime("%Y-%m-%d"))
ilk = ilk + __rd.relativedelta(months=+1)
with __Pool(__mp.cpu_count()) as p:
df_list = p.map(__yekdem_kurulu_guc, date_list)
return __pd.concat(df_list, sort=False)
def lisansli_uevm(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için saatlik YEKDEM kapsamındaki lisanslı santrallerin kaynak bazında uzlaştırmaya esas veriş
miktarı (UEVM) bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Saatlik YEKDEM Lisanslı UEVM (MWh)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = \
__first_part_url + "renewable-sm-licensed-injection-quantity" + "?startDate=" + baslangic_tarihi + \
"&endDate=" + bitis_tarihi
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["renewableSMProductionList"])
df["Saat"] = df["date"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"canalType": "Kanal Tipi", "riverType": "Nehir Tipi", "biogas": "Biyogaz",
"biomass": "Biyokütle", "landfillGas": "Çöp Gazı", "sun": "Güneş",
"geothermal": "Jeotermal", "reservoir": "Rezervuarlı", "wind": "Rüzgar",
"total": "Toplam", "others": "Diğer"},
inplace=True)
df = df[
["Tarih", "Saat", "Rüzgar", "Jeotermal", "Rezervuarlı", "Kanal Tipi", "Nehir Tipi", "Çöp Gazı",
"Biyogaz", "Güneş", "Biyokütle", "Diğer", "Toplam"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def lisanssiz_uevm(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için saatlik YEKDEM kapsamındaki lisanssiz santrallerin kaynak bazında uzlaştırmaya esas veriş
miktarı (UEVM) bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Saatlik YEKDEM Lisanssiz UEVM (MWh)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = \
__first_part_url + "renewable-unlicenced-generation-amount" + "?startDate=" + baslangic_tarihi + \
"&endDate=" + bitis_tarihi
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["renewableUnlicencedGenerationAmountList"])
df["Saat"] = df["date"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"canalType": "Kanal Tipi", "riverType": "Nehir Tipi", "biogas": "Biyogaz",
"biomass": "Biyokütle", "lfg": "Çöp Gazı", "sun": "Güneş",
"geothermal": "Jeotermal", "reservoir": "Rezervuarlı", "wind": "Rüzgar",
"total": "Toplam", "others": "Diğer"},
inplace=True)
df = df[
["Tarih", "Saat", "Rüzgar", "Kanal Tipi", "Biyogaz", "Güneş", "Biyokütle", "Diğer", "Toplam"]]
except (KeyError, TypeError):
return
|
__pd.DataFrame()
|
pandas.DataFrame
|
""" Matrix profile anomaly detection.
Reference:
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. (2016, December).
Matrix profile I: all pairs similarity joins for time series: a unifying view that includes motifs, discords and shapelets.
In Data Mining (ICDM), 2016 IEEE 16th International Conference on (pp. 1317-1322). IEEE.
"""
# Authors: <NAME>, 2018.
import math
import numpy as np
import pandas as pd
import scipy.signal as sps
from tqdm import tqdm
from .BaseDetector import BaseDetector
# -------------
# CLASSES
# -------------
class MatrixProfileAD(BaseDetector):
""" Anomaly detection in time series using the matrix profile
Parameters
----------
m : int (default=10)
Window size.
contamination : float (default=0.1)
Estimate of the expected percentage of anomalies in the data.
Comments
--------
- This only works on time series data.
"""
def __init__(self, m=10, contamination=0.1,
tol=1e-8, verbose=False):
super(MatrixProfileAD, self).__init__()
self.m = int(m)
self.contamination = float(contamination)
self.tol = float(tol)
self.verbose = bool(verbose)
def ab_join(self, T, split):
""" Compute the ABjoin and BAjoin side-by-side,
where `split` determines the splitting point.
"""
# algorithm options
excZoneLen = int(np.round(self.m * 0.5))
radius = 1.1
dataLen = len(T)
proLen = dataLen - self.m + 1
# change Nan and Inf to zero
T = np.nan_to_num(T)
# precompute the mean, standard deviation
s =
|
pd.Series(T)
|
pandas.Series
|
import ast
import numpy as np
import pandas as pd
from pathlib import Path
from itertools import zip_longest
def from_np_array(array_string):
array_string = ','.join(array_string.replace('[ ', '[').split())
return np.array(ast.literal_eval(array_string))
class Dataset:
EPISODE = 'episode'
REWARD = 'reward'
STATE = 'state'
ACTION = 'action'
NEW = 'new_state'
FAILED = 'failed'
DONE = 'done'
_INDEX = 'index'
DEFAULT_COLUMNS = [EPISODE, REWARD, STATE, ACTION, NEW, FAILED, DONE]
DEFAULT_COLUMNS_WO_EPISODE = [REWARD, STATE, ACTION, NEW, FAILED, DONE]
DEFAULT_ARRAY_CAST = [STATE, ACTION, NEW]
def __init__(self, *columns, group_name=None, name=None):
self.group_name = group_name if group_name is not None\
else self.EPISODE
self.columns = self.DEFAULT_COLUMNS if len(columns) == 0\
else list(columns)
self.columns_wo_group = [cname for cname in self.columns
if cname != self.group_name]
self.columns = [self.group_name] + self.columns_wo_group
self.df = pd.DataFrame(columns=self.columns)
self.df.index.name = Dataset._INDEX
self.name = name
def __getattr__(self, item):
if item in self.__dict__:
return getattr(self, item)
return getattr(self.df, item)
def _complete_args(self, args):
return [[arg] for _, arg in zip_longest(self.columns, args)]
def _list_wrap(self, args):
if isinstance(args, dict):
return {argname: [arg] for argname, arg in args.items()}
else:
return [[arg] for arg in args]
def add_entry(self, *args, **kwargs):
entry = {kw: [arg] for kw, arg in zip(self.columns, args)}
entry.update({kw: [arg] for kw, arg in kwargs.items()})
self.df = self.df.append(pd.DataFrame(entry), ignore_index=True)
def add_group(self, group, group_number=None):
if group.get(self.group_name) is None:
if group_number is None:
group_number = self.df[self.group_name].max() + 1
if
|
pd.isna(group_number)
|
pandas.isna
|
import pandas as pd
import numpy as np
import pickle
from scipy.stats import ranksums, chisquare
import numpy as np
# PART 1 ----------------------------------------------------------------------
with open('/project/M-ABeICU176709/delirium/data/inputs/master/ids/ids_train.pickle', 'rb') as f :
ids_train = pickle.load(f)
with open('/project/M-ABeICU176709/delirium/data/inputs/master/ids/ids_validation.pickle', 'rb') as f :
ids_validation = pickle.load(f)
with open('/project/M-ABeICU176709/delirium/data/inputs/master/ids/ids_calibration.pickle', 'rb') as f :
ids_calibration = pickle.load(f)
with open('/project/M-ABeICU176709/delirium/data/inputs/master/ids/ids_test.pickle', 'rb') as f :
ids_test = pickle.load(f)
ids_all = ids_train + ids_validation + ids_calibration + ids_test
ADMISSIONS = pd.read_pickle('/project/M-ABeICU176709/ABeICU/data/ADMISSIONS.pickle', compression = 'zip')
ADMISSIONS = ADMISSIONS[(ADMISSIONS['ADMISSION_ID'].isin(ids_all))]
ADMISSIONS['ICU_ADMIT_DATETIME'] = pd.to_datetime(ADMISSIONS['ICU_ADMIT_DATETIME'])
ADMISSIONS['ICU_DISCH_DATETIME'] = pd.to_datetime(ADMISSIONS['ICU_DISCH_DATETIME'])
ADMISSIONS = ADMISSIONS.loc[ADMISSIONS['ADMISSION_ID'].isin(ids_all)].reset_index(drop=True)
ADMISSIONS = ADMISSIONS[['ADMISSION_ID', 'ICU_ADMIT_DATETIME', 'ICU_DISCH_DATETIME', 'ICU_EXPIRE_FLAG', 'DELIRIUM_FLAG']]
ADMISSIONS['delta'] = ADMISSIONS.apply(lambda x:
(x['ICU_DISCH_DATETIME'] - x['ICU_ADMIT_DATETIME']).total_seconds() / 86400, axis=1)
wdel = list(ADMISSIONS.loc[ADMISSIONS['DELIRIUM_FLAG'] == 1]['ADMISSION_ID'].unique())
wodel = list(ADMISSIONS.loc[ADMISSIONS['DELIRIUM_FLAG'] == 0]['ADMISSION_ID'].unique())
p1_wdel = ADMISSIONS.loc[ADMISSIONS['ADMISSION_ID'].isin(wdel)].copy()
p1_wodel = ADMISSIONS.loc[ADMISSIONS['ADMISSION_ID'].isin(wodel)].copy()
# p-value LOS
p1_wdel_np = p1_wdel['delta'].to_numpy()
p1_wodel_np = p1_wodel['delta'].to_numpy()
_, pval_los = ranksums(p1_wdel_np, p1_wodel_np)
# PART 2 ----------------------------------------------------------------------
files = [
'master_train.pickle',
'master_validation.pickle',
'master_calibration.pickle',
'master_test.pickle'
]
PATH = '/project/M-ABeICU176709/delirium/data/inputs/master/'
df = pd.DataFrame()
for f in files:
print(f)
temp =
|
pd.read_pickle(PATH+f, compression='zip')
|
pandas.read_pickle
|
import requests
import pandas as pd
import numpy as np
import time
from pandas.tseries.offsets import Day
from urllib import parse
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from concurrent.futures import ThreadPoolExecutor
import time
import random
class Driver(object):
def __init__(self):
chrome_options = Options()
chrome_options.add_argument('--no-sandbox') # 解决DevToolsActivePort文件不存在的报错
chrome_options.add_argument('window-size=1920x3000') # 指定浏览器分辨率
# 加代理ip池
# chrome_options.add_argument("--proxy-server=http://172.16.17.32:8086")
chrome_options.add_argument('--disable-gpu') # 谷歌文档提到需要加上这个属性来规避bug
chrome_options.add_argument('--hide-scrollbars') # 隐藏滚动条, 应对一些特殊页面
chrome_options.add_argument('blink-settings=imagesEnabled=false') # 不加载图片, 提升速度
#chrome_options.add_argument('--headless') # 浏览器不提供可视化页面. linux下如果系统不支持可视化不加这条会启动失败
self.driver = webdriver.Chrome(options=chrome_options)
class Load_Data(object):
def __init__(self):
'''
chrome_options = Options()
chrome_options.add_argument('--no-sandbox') # 解决DevToolsActivePort文件不存在的报错
chrome_options.add_argument('window-size=1920x3000') # 指定浏览器分辨率
# 加代理ip池
# chrome_options.add_argument("--proxy-server=http://172.16.17.32:8086")
chrome_options.add_argument('--disable-gpu') # 谷歌文档提到需要加上这个属性来规避bug
chrome_options.add_argument('--hide-scrollbars') # 隐藏滚动条, 应对一些特殊页面
chrome_options.add_argument('blink-settings=imagesEnabled=false') # 不加载图片, 提升速度
chrome_options.add_argument('--headless') # 浏览器不提供可视化页面. linux下如果系统不支持可视化不加这条会启动失败
self.driver = webdriver.Chrome(options=chrome_options)
'''
# 地区:树结构
self.area_tree={
"area_name":[],
"area_parent":[],
"area_price":[],
"area_x_value":[],
"area_y_value":[],
"area_url_val":[],
"area_stage":[]
}
# 小区详细数据
self.estate_obj={
'estate_name':[], #小区名
'estate_house_resources':[], #房源数
'estate_sales_count':[], #销量
'estate_activity_rate':[], #活跃度评级
'estate_property_rate':[], #物业评级
'estate_education_rate':[], #教育评级
'estate_plate_rate':[], #板块评级
#'estate_search_rate':[], #搜索热度
'estate_basic_info':[], #基础信息
'estate_amenities_info':[], #配套设施信息
'estate_traffic_info':[], #交通信息信息
"estate_around_instrument_info":[] #周边设施信息
}
# 月度房价数据
self.detail_obj={
'detail_estate':[], #小区名
'detail_date':[], #日期
'detail_price':[] #价格
}
def get_block_list(self):
block_url='https://fangjia.fang.com/fangjia/map/getmapdata/hz?district=&commerce=&x1=undefined&y1=undefined&x2=undefined&y2=undefined&v=20150116&newcode='
block_res=self.get_req(block_url)
self.block_list=block_res.json()['project']
print("杭州市区数:",len(self.block_list))
return self.block_list
def load_block(self,block,driver):
self.area_tree["area_name"].append(block["name"])
self.area_tree["area_price"].append(block["price"])
self.area_tree["area_x_value"].append(block["px"])
self.area_tree["area_y_value"].append(block["py"])
self.area_tree["area_url_val"].append(block["url"])
self.area_tree["area_parent"].append('杭州')
self.area_tree["area_stage"].append(1)
estate_url='https://fangjia.fang.com/fangjia/map/getmapdata/hz?district=%s&commerce=&x1=undefined&y1=undefined&x2=undefined&y2=undefined&v=20150116&newcode=' % parse.quote(block['name'])
estate_res=self.get_req(estate_url)
estate_list=estate_res.json()
print(block["name"],"区有",len(estate_list['project']),"个片区")
for estate in estate_list['project']:
self.load_estate(estate,block,driver)
def load_estate(self,estate,block,driver):
self.area_tree["area_name"].append(estate["name"])
self.area_tree['area_price'].append(estate["price"])
self.area_tree['area_x_value'].append(estate["px"])
self.area_tree['area_y_value'].append(estate["py"])
self.area_tree['area_url_val'].append(estate["url"])
self.area_tree['area_parent'].append(block["name"])
self.area_tree['area_stage'].append(2)
valiage_url='https://fangjia.fang.com/fangjia/map/getmapdata/hz?district=%s&commerce=%s&x1=%s&y1=%s&x2=%s&y2=%s&v=20150116&newcode=' % (parse.quote(block['name']),parse.quote(estate['name']),block['px'],block['py'],estate['px'],estate['py'])
valiage_res=self.get_req(valiage_url)
valiage_list=valiage_res.json()
print(estate["name"],"片区有",len(valiage_list['project']),"个小区")
for valiage in valiage_list['project']:
self.load_valiage(valiage,estate,block,driver)
def load_valiage(self,valiage,estate,block,driver):
self.area_tree['area_name'].append(valiage["name"])
self.area_tree['area_price'].append(valiage["price"])
self.area_tree['area_x_value'].append(valiage["px"])
self.area_tree['area_y_value'].append(valiage["py"])
self.area_tree['area_url_val'].append(valiage["url"])
self.area_tree['area_parent'].append(estate["name"])
self.area_tree['area_stage'].append(3)
self.estate_obj['estate_name'].append(valiage["name"])
print(valiage["name"])
url_res=self.get_page_url(valiage,driver)
if url_res:
pass
else:
# 验证码异常处理
self.load_valiage(valiage,estate,block,driver)
return False
star_res,estate_param = self.get_page_star(driver)
detail_res=self.get_detail_info(estate_param,valiage,estate,driver)
#self.get_grade_data()
self.get_history_price(valiage)
def get_page_url(self,valiage,driver):
driver.get("http:%s" % valiage["url"])
return True
# try:
# self.estate_obj['estate_search_rate'].append(driver.find_element_by_xpath('/html/body/div[3]/div[3]/div[2]/div[3]/ul/li[1]/b').text)
# except Exception as e:
# print("获取搜索指数报错:",e)
# if driver.current_url.find('code')>-1:
# #driver.find_element_by_xpath('//*[@id="verify_page"]/div/div[2]/p').text=="请输入图片中的验证码:":
# # 重新导入这条
# return False
# else:
# self.estate_obj['estate_search_rate'].append(" ")
# return True
def get_page_star(self,driver):
try:
estate_param=driver.find_element_by_xpath('//*[@id="pc<PASSWORD>"]').get_attribute('href')
driver.get(estate_param)
# 点击,获取静态元素
js = "document.getElementById('main').children[1].style.display='block'"
driver.execute_script(js)
self.estate_obj['estate_house_resources'].append(driver.find_element_by_xpath('//*[@id="<PASSWORD>C<PASSWORD>"]/a[1]/div/p[2]').text)
self.estate_obj['estate_sales_count'].append(driver.find_element_by_xpath('//*[@id="<PASSWORD>"]/a[2]/div/p[2]').text)
tag=driver.find_element_by_xpath('//*[@id="main"]/div[2]')
try:
driver.find_element_by_xpath('//*[@id="main"]/div[1]').click()
self.estate_obj['estate_activity_rate'].append(tag.text.split('\n')[1].split(':')[1].replace(' ','')) #活跃度评级
self.estate_obj['estate_property_rate'].append(tag.text.split('\n')[2].split(':')[1].replace(' ','')) #物业评级
self.estate_obj['estate_education_rate'].append(tag.text.split('\n')[3].split(':')[1].replace(' ','')) #教育评级
self.estate_obj['estate_plate_rate'].append(tag.text.split('\n')[4].split(':')[1].replace(' ','')) #板块评级
except Exception as e:
print("点击隐藏标签报错:",e)
self.estate_obj['estate_activity_rate'].append(" ")
self.estate_obj['estate_property_rate'].append(" ")
self.estate_obj['estate_education_rate'].append(" ")
self.estate_obj['estate_plate_rate'].append(" ")
#===================请求次数多进入验证码模式=====================#
#===================图像识别=====================#
#===================或者打断点手动输入=====================#
#===================验证码输入几次之后会失去作用,无法请求页面=====================#
return True,estate_param
except Exception as e:
print("获取小区评星报错:",e)
if driver.current_url.find('code')>-1:
#driver.find_element_by_xpath('//*[@id="verify_page"]/div/div[2]/p').text=="请输入图片中的验证码:":
# 重新导入这条
return False,''
else:
self.estate_obj['estate_house_resources'].append(" ")
self.estate_obj['estate_sales_count'].append(" ")
return True,estate_param
def get_detail_info(self,estate_param,valiage,estate,driver):
try:
#维度,全部信息,待清洗
#driver.get(estate_param+"/xiangqing/")
de=driver.find_element_by_xpath('//*[@id="kesfxqxq_A01_03_01"]/a')
driver.get(de.get_attribute('href'))
basic_info=driver.find_element_by_xpath('/html/body/div[3]/div[4]/div/div[2]/div[2]').text
amenities_info=driver.find_element_by_xpath('/html/body/div[3]/div[4]/div/div[3]/div[2]/dl').text
traffic_info=driver.find_element_by_xpath('//*[@id="trafficBox"]/div[2]/dl/dt').text
around_instrument=driver.find_element_by_xpath('/html/body/div[3]/div[4]/div/div[5]/div[2]/dl').text
self.estate_obj['estate_basic_info'].append(basic_info)
self.estate_obj['estate_amenities_info'].append(amenities_info)
self.estate_obj['estate_traffic_info'].append(traffic_info)
self.estate_obj['estate_around_instrument_info'].append(around_instrument)
return True
except Exception as e:
print("获取小区信息报错:",e)
if driver.current_url.find('code')>-1:
#driver.find_element_by_xpath('//*[@id="verify_page"]/div/div[2]/p').text=="请输入图片中的验证码:":
# 重新导入这条
#self.load_valiage(valiage,estate,block)
return False
else:
self.estate_obj['estate_basic_info'].append(" ")
self.estate_obj['estate_amenities_info'].append(" ")
self.estate_obj['estate_traffic_info'].append(" ")
self.estate_obj['estate_around_instrument_info'].append(" ")
return True
'''
def get_grade_data(self):
try:
# 评级数据完善
driver.get(estate_param+"/pingji/")
basic_info=driver.find_element_by_xpath('/html/body/div[3]/div[4]/div[1]/div[2]/div[2]/dl').text
amenities_info=driver.find_element_by_xpath('/html/body/div[3]/div[4]/div[1]/div[3]/div[2]/dl').text
traffic_info=driver.find_element_by_xpath('/html/body/div[3]/div[4]/div[1]/div[4]/div[2]/dl').text
around_instrument=driver.find_element_by_xpath('/html/body/div[3]/div[4]/div[1]/div[5]/div[2]/dl').text
self.estate_obj['estate_basic_info'].append(basic_info)
self.estate_obj['estate_amenities_info'].append(amenities_info)
self.estate_obj['estate_traffic_info'].append(traffic_info)
self.estate_obj['estate_around_instrument_info'].append(around_instrument)
except Exception as e:
print("获取小区评级报错:",e)
'''
def get_history_price(self,valiage):
detail_url='https://fangjia.fang.com/fangjia/common/ajaxdetailtrenddata/hz?dataType=proj&projcode=%s&year=100' % valiage["url"].split('/')[-1].split('.')[0]
detail_res=self.get_req(detail_url)
detail_list=detail_res.json() #两年的详细数据
for detail in detail_list:
#print(valiage["name"],"小区数据",len(detail_list))
self.detail_obj['detail_estate'].append(valiage["name"])
self.detail_obj['detail_price'].append(detail[1])
timeArray = time.localtime(detail[0]/1000)
otherStyleTime = time.strftime("%Y-%m", timeArray)
self.detail_obj['detail_date'].append(otherStyleTime)
def data2csv(self):
geography_data = {
"name":pd.Series(self.area_tree['area_name']),
"parent":pd.Series(self.area_tree['area_parent']),
"price": pd.Series(self.area_tree['area_price']),
"longitude":pd.Series(self.area_tree['area_x_value']),
"latitude": pd.Series(self.area_tree['area_y_value']),
"url":pd.Series(self.area_tree['area_url_val']),
"area_stage":pd.Series(self.area_tree['area_stage'])
}
geography_df = pd.DataFrame(geography_data,index=None)
geography_df.to_csv('./data/geography_tree.csv',index=False)
horse_info_data = {
"name":pd.Series(self.estate_obj['estate_name']),
"house_resources":pd.Series(self.estate_obj['estate_house_resources']),
"sales_count":pd.Series(self.estate_obj['estate_sales_count']),
"activity_rate":pd.Series(self.estate_obj['estate_activity_rate']),
"property_rate":pd.Series(self.estate_obj['estate_property_rate']),
"education_rate":pd.Series(self.estate_obj['estate_education_rate']),
"plate_rate":pd.Series(self.estate_obj['estate_plate_rate']),
#"search_rate":pd.Series(self.estate_obj['estate_search_rate']),
"basic_info":pd.Series(self.estate_obj['estate_basic_info']),
"amenities_info":pd.Series(self.estate_obj['estate_amenities_info']),
"traffic_info":
|
pd.Series(self.estate_obj['estate_traffic_info'])
|
pandas.Series
|
""" test the scalar Timestamp """
import pytz
import pytest
import dateutil
import calendar
import locale
import numpy as np
from dateutil.tz import tzutc
from pytz import timezone, utc
from datetime import datetime, timedelta
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tseries import offsets
from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz
from pandas.errors import OutOfBoundsDatetime
from pandas.compat import long, PY3
from pandas.compat.numpy import np_datetime64_compat
from pandas import Timestamp, Period, Timedelta, NaT
class TestTimestampProperties(object):
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
control = Timestamp('2017-10-01')
assert ts.dayofweek == 6
assert not ts.is_month_start # not a weekday
assert not ts.is_quarter_start # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp('2017-09-30', freq='B')
control = Timestamp('2017-09-30')
assert ts.dayofweek == 5
assert not ts.is_month_end # not a weekday
assert not ts.is_quarter_end # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
def test_fields(self):
def check(value, equal):
# that we are int/long like
assert isinstance(value, (int, long))
assert value == equal
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.minute, 6)
check(ts.second, 3)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
# GH 13303
ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
check(ts.year, 2014)
check(ts.month, 12)
check(ts.day, 31)
check(ts.hour, 23)
check(ts.minute, 59)
check(ts.second, 0)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
check(ts.quarter, 4)
check(ts.dayofyear, 365)
check(ts.week, 1)
check(ts.daysinmonth, 31)
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
assert getattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
assert getattr(ts, end)
# GH 12806
@pytest.mark.parametrize('data',
[Timestamp('2017-08-28 23:00:00'),
Timestamp('2017-08-28 23:00:00', tz='EST')])
@pytest.mark.parametrize('time_locale', [
None] if tm.get_locales() is None else [None] + tm.get_locales())
def test_names(self, data, time_locale):
# GH 17354
# Test .weekday_name, .day_name(), .month_name
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
assert data.weekday_name == 'Monday'
if time_locale is None:
expected_day = 'Monday'
expected_month = 'August'
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calendar.day_name[0].capitalize()
expected_month = calendar.month_name[8].capitalize()
assert data.day_name(time_locale) == expected_day
assert data.month_name(time_locale) == expected_month
# Test NaT
nan_ts = Timestamp(NaT)
assert np.isnan(nan_ts.day_name(time_locale))
assert np.isnan(nan_ts.month_name(time_locale))
@pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo'])
def test_is_leap_year(self, tz):
# GH 13727
dt = Timestamp('2000-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
assert isinstance(dt.is_leap_year, bool)
dt = Timestamp('1999-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
dt = Timestamp('2004-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
dt = Timestamp('2100-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013, 12, 31)
result =
|
Timestamp(d)
|
pandas.Timestamp
|
import pandas
import numpy
import sys
import unittest
import os
import copy
import warnings
import tempfile
from isatools import isatab
sys.path.append("..")
import nPYc
from nPYc.enumerations import AssayRole, SampleType
from nPYc.utilities._nmr import qcCheckBaseline
from generateTestDataset import generateTestDataset
class test_nmrdataset_synthetic(unittest.TestCase):
def setUp(self):
self.noSamp = numpy.random.randint(50, high=100, size=None)
self.noFeat = numpy.random.randint(200, high=400, size=None)
self.dataset = generateTestDataset(self.noSamp, self.noFeat, dtype='NMRDataset',
variableType=nPYc.enumerations.VariableType.Spectral,
sop='GenericNMRurine')
def test_getsamplemetadatafromfilename(self):
"""
Test we are parsing NPC MS filenames correctly (PCSOP.081).
"""
# Create an empty object with simple filenames
dataset = nPYc.NMRDataset('', fileType='empty')
dataset.sampleMetadata['Sample File Name'] = ['Test1_serum_Rack1_SLT_090114/101',
'Test_serum_Rack10_SLR_090114/10',
'Test2_serum_Rack100_DLT_090114/102',
'Test2_urine_Rack103_MR_090114/20',
'Test2_serum_Rack010_JTP_090114/80',
'Test1_water_Rack10_TMP_090114/90']
dataset._getSampleMetadataFromFilename(dataset.Attributes['filenameSpec'])
rack = pandas.Series([1, 10, 100, 103, 10, 10],
name='Rack',
dtype=int)
pandas.testing.assert_series_equal(dataset.sampleMetadata['Rack'], rack)
study = pandas.Series(['Test1', 'Test', 'Test2', 'Test2', 'Test2', 'Test1'],
name='Study',
dtype=str)
pandas.testing.assert_series_equal(dataset.sampleMetadata['Study'], study)
def test_nmrdataset_raises(self):
self.assertRaises(NotImplementedError, nPYc.NMRDataset, '', fileType='Unknown import type')
self.assertRaises(TypeError, nPYc.NMRDataset, '', fileType='Bruker', bounds='not a list')
self.assertRaises(TypeError, nPYc.NMRDataset, '', fileType='Bruker', calibrateTo='not a number')
self.assertRaises(TypeError, nPYc.NMRDataset, '', fileType='Bruker', variableSize=0.1)
def test_load_npc_lims_masking_reruns(self):
limspath = os.path.join('..', '..', 'npc-standard-project', 'Derived_Worklists', 'UnitTest1_NMR_urine_PCSOP.011.csv')
dataset = nPYc.NMRDataset('', 'empty')
dataset.sampleMetadata = pandas.DataFrame([], columns=['Sample File Name'])
dataset.sampleMetadata['Sample File Name'] = ['UnitTest1_Urine_Rack1_SLL_270814/10', 'UnitTest1_Urine_Rack1_SLL_270814/12', 'UnitTest1_Urine_Rack1_SLL_270814/20', 'UnitTest1_Urine_Rack1_SLL_270814/30', 'UnitTest1_Urine_Rack1_SLL_270814/40','UnitTest1_Urine_Rack1_SLL_270814/51', 'UnitTest1_Urine_Rack1_SLL_270814/52', 'UnitTest1_Urine_Rack1_SLL_270814/50', 'UnitTest1_Urine_Rack1_SLL_270814/60', 'UnitTest1_Urine_Rack1_SLL_270814/70', 'UnitTest1_Urine_Rack1_SLL_270814/80', 'UnitTest1_Urine_Rack1_SLL_270814/81', 'UnitTest1_Urine_Rack1_SLL_270814/90']
dataset.intensityData = numpy.zeros((13, 2))
dataset.intensityData[:, 0] = numpy.arange(1, 14, 1)
dataset.initialiseMasks()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
dataset.addSampleInfo(descriptionFormat='NPC LIMS', filePath=limspath)
# check
assert issubclass(w[0].category, UserWarning)
assert "previous acquisitions masked, latest is kept" in str(w[0].message)
with self.subTest(msg='Masking of reruns'):
expectedMask = numpy.array([False, True, True, True, True, False, True, False, True, True, False, True, True], dtype=bool)
numpy.testing.assert_array_equal(dataset.sampleMask, expectedMask)
def test_updateMasks_samples(self):
from nPYc.enumerations import VariableType, DatasetLevel, AssayRole, SampleType
dataset = generateTestDataset(18, 5, dtype='NMRDataset',
variableType=nPYc.enumerations.VariableType.Spectral,
sop='GenericNMRurine')
dataset.Attributes.pop('LWFailThreshold', None)
dataset.Attributes.pop('baselineCheckRegion', None)
dataset.Attributes.pop('solventPeakCheckRegion', None)
dataset.sampleMetadata['AssayRole'] = pandas.Series([AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference],
name='AssayRole',
dtype=object)
dataset.sampleMetadata['SampleType'] = pandas.Series([SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.ExternalReference,
SampleType.MethodReference],
name='SampleType',
dtype=object)
with self.subTest(msg='Default Parameters'):
expectedSampleMask = numpy.array([True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True], dtype=bool)
dataset.initialiseMasks()
dataset.updateMasks(filterFeatures=False)
numpy.testing.assert_array_equal(expectedSampleMask, dataset.sampleMask)
with self.subTest(msg='Export SP and ER'):
expectedSampleMask = numpy.array([False, False, False, False, False, True, True, True, True, True, True, False, False, False, False, False, True, False], dtype=bool)
dataset.initialiseMasks()
dataset.updateMasks(filterFeatures=False,
sampleTypes=[SampleType.StudyPool, SampleType.ExternalReference],
assayRoles=[AssayRole.PrecisionReference])
numpy.testing.assert_array_equal(expectedSampleMask, dataset.sampleMask)
with self.subTest(msg='Export Dilution Samples only'):
expectedSampleMask = numpy.array([True, True, True, True, True, False, False, False, False, False, False, False, False, False, False, False, False, False], dtype=bool)
dataset.initialiseMasks()
dataset.updateMasks(filterFeatures=False,
sampleTypes=[SampleType.StudyPool],
assayRoles=[AssayRole.LinearityReference])
numpy.testing.assert_array_equal(expectedSampleMask, dataset.sampleMask)
def test_updateMasks_features(self):
noSamp = 10
noFeat = numpy.random.randint(1000, high=10000, size=None)
dataset = generateTestDataset(noSamp, noFeat, dtype='NMRDataset',
variableType=nPYc.enumerations.VariableType.Spectral,
sop='GenericNMRurine')
dataset.Attributes.pop('LWFailThreshold', None)
dataset.Attributes.pop('baselineCheckRegion', None)
dataset.Attributes.pop('solventPeakCheckRegion', None)
ppm = numpy.linspace(-10, 10, noFeat)
dataset.featureMetadata = pandas.DataFrame(ppm, columns=['ppm'])
with self.subTest(msg='Single range'):
ranges = (-1.1, 1.2)
dataset.initialiseMasks()
dataset.updateMasks(filterFeatures=True,
filterSamples=False,
exclusionRegions=ranges)
expectedFeatureMask = numpy.logical_or(ppm < ranges[0],
ppm > ranges[1])
numpy.testing.assert_array_equal(expectedFeatureMask, dataset.featureMask)
with self.subTest(msg='Reversed range'):
ranges = (7.1, 1.92)
dataset.initialiseMasks()
dataset.updateMasks(filterFeatures=True,
filterSamples=False,
exclusionRegions=ranges)
expectedFeatureMask = numpy.logical_or(ppm < ranges[1],
ppm > ranges[0])
numpy.testing.assert_array_equal(expectedFeatureMask, dataset.featureMask)
with self.subTest(msg='list of ranges'):
ranges = [(-5,-1), (1,5)]
dataset.initialiseMasks()
dataset.updateMasks(filterFeatures=True,
filterSamples=False,
exclusionRegions=ranges)
expectedFeatureMask1 = numpy.logical_or(ppm < ranges[0][0],
ppm > ranges[0][1])
expectedFeatureMask2 = numpy.logical_or(ppm < ranges[1][0],
ppm > ranges[1][1])
expectedFeatureMask = numpy.logical_and(expectedFeatureMask1,
expectedFeatureMask2)
numpy.testing.assert_array_equal(expectedFeatureMask, dataset.featureMask)
def test_updateMasks_raises(self):
with self.subTest(msg='No Ranges'):
self.dataset.Attributes['exclusionRegions'] = None
self.assertRaises(ValueError, self.dataset.updateMasks, filterFeatures=True, filterSamples=False, exclusionRegions=None)
def test_updateMasks_warns(self):
with self.subTest(msg='Range low == high'):
self.dataset.Attributes['exclusionRegions'] = None
self.assertWarnsRegex(UserWarning, 'Low \(1\.10\) and high \(1\.10\) bounds are identical, skipping region', self.dataset.updateMasks, filterFeatures=True, filterSamples=False, exclusionRegions=(1.1,1.1))
def test_nmrQCchecks(self):
self.dataset.Attributes.pop('LWFailThreshold', None)
self.dataset.Attributes.pop('baselineCheckRegion', None)
self.dataset.Attributes.pop('solventPeakCheckRegion', None)
with self.subTest('Calibration'):
bounds = numpy.std(self.dataset.sampleMetadata['Delta PPM']) * 3
self.dataset.sampleMetadata.loc[0::30, 'Delta PPM'] = bounds * 15
self.dataset._nmrQCChecks()
# Check mask
expected = numpy.zeros_like(self.dataset.sampleMask, dtype=bool)
expected[0::30] = True
numpy.testing.assert_array_equal(expected, self.dataset.sampleMetadata['CalibrationFail'].values)
# Check other tests have not happened
# Commented out assuming the test nmr dataset obtained with generateTestDataset always has these columns
#for skipedCheck in ['LineWidthFail', 'BaselineFail', 'WaterPeakFail']:
# self.assertFalse(skipedCheck in self.dataset.sampleMetadata.columns)
with self.subTest('Line Width'):
self.dataset.Attributes['LWFailThreshold'] = 2
self.dataset.sampleMetadata['Line Width (Hz)'] = 1.5
self.dataset.sampleMetadata.loc[0::5, 'Line Width (Hz)'] = 3
self.dataset._nmrQCChecks()
expected = numpy.zeros_like(self.dataset.sampleMask, dtype=bool)
expected[0::5] = True
numpy.testing.assert_array_equal(expected, self.dataset.sampleMetadata['LineWidthFail'].values)
# Check other tests have not happened
# Commented out assuming the test nmr dataset obtained with generateTestDataset always has these columns
#for skipedCheck in ['BaselineFail', 'WaterPeakFail']:
# self.assertFalse(skipedCheck in self.dataset.sampleMetadata.columns)
with self.subTest('Baseline'):
self.dataset.Attributes['baselineCheckRegion'] = [(-2, -0.5), (9.5, 12.5)]
self.dataset.intensityData[0,:] = 100
self.dataset.intensityData[2,:] = -100
self.dataset._nmrQCChecks()
expected = numpy.zeros_like(self.dataset.sampleMask, dtype=bool)
expected[0] = True
expected[2] = True
numpy.testing.assert_array_equal(expected, self.dataset.sampleMetadata['BaselineFail'].values)
# Check other tests have not happened
# Commented out assuming the test nmr dataset obtained with generateTestDataset always has these columns
#self.assertFalse('WaterPeakFail' in self.dataset.sampleMetadata.columns)
with self.subTest('Solvent Peak'):
self.dataset.Attributes['solventPeakCheckRegion'] = [(-2, -0.5), (9.5, 12.5)]
self.dataset._nmrQCChecks()
expected = numpy.zeros_like(self.dataset.sampleMask, dtype=bool)
expected[0] = True
# expected[2] = True
numpy.testing.assert_array_equal(expected, self.dataset.sampleMetadata['SolventPeakFail'].values)
def test_baselineAreaAndNeg(self):
"""
Validate baseline/WP code, creates random spectra and values that should always fail ie <0 and high extreme and diagonal.
"""
variableSize = 20000
X = numpy.random.rand(86, variableSize)*1000
X = numpy.r_[X, numpy.full((1, variableSize), -10000)] # add a minus val row r_ shortcut notation for vstack
X = numpy.r_[X, numpy.full((1, variableSize), 200000)] # add a minus val row r_ shortcut notation for vstack
a1 = numpy.arange(0,variableSize,1)[numpy.newaxis] #diagonal ie another known fail
X = numpy.concatenate((X, a1), axis=0)#concatenate into X
X = numpy.r_[X, numpy.random.rand(2, variableSize)* 10000]
#add more fails random but more variablility than the average 86 above
#create ppm
ppm = numpy.linspace(-1,10, variableSize) #
ppm_high = numpy.where(ppm >= 9.5)[0]
ppm_low = numpy.where(ppm <= -0.5)[0]
high_baseline = qcCheckBaseline(X[:, ppm_high], 0.05)
low_baseline = qcCheckBaseline(X[:, ppm_low], 0.05)
baseline_fail_calculated = high_baseline | low_baseline
baseline_fail_expected = numpy.zeros(91, dtype=bool)
baseline_fail_expected[86:89] = True
numpy.testing.assert_array_equal(baseline_fail_expected, baseline_fail_calculated)
class test_nmrdataset_bruker(unittest.TestCase):
def setUp(self):
"""
setup the pulseprogram and path for purpose of testing NMR bruker data functions
"""
self.pulseProgram = 'noesygppr1d'
self.path = os.path.join('..', '..', 'npc-standard-project', 'unitTest_Data', 'nmr')
def test_addSampleInfo_npclims(self):
with self.subTest(msg='Urine dataset (UnitTest1).'):
dataPath = os.path.join('..', '..', 'npc-standard-project', 'Raw_Data', 'nmr', 'UnitTest1')
limsFilePath = os.path.join('..', '..', 'npc-standard-project', 'Derived_Worklists', 'UnitTest1_NMR_urine_PCSOP.011.csv')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
dataset = nPYc.NMRDataset(dataPath, pulseProgram='noesygppr1d', sop='GenericNMRurine')
dataset.sampleMetadata.sort_values('Sample File Name', inplace=True)
sortIndex = dataset.sampleMetadata.index.values
dataset.intensityData = dataset.intensityData[sortIndex, :]
dataset.sampleMetadata = dataset.sampleMetadata.reset_index(drop=True)
expected = copy.deepcopy(dataset.sampleMetadata)
dataset.addSampleInfo(descriptionFormat='NPC LIMS', filePath=limsFilePath)
testSeries = ['Sample ID', 'Status', 'AssayRole', 'SampleType']
expected['Sample ID'] = ['UT1_S2_u1', 'UT1_S3_u1', 'UT1_S4_u1', 'UT1_S4_u2', 'UT1_S4_u3',
'UT1_S4_u4', 'External Reference Sample', 'Study Pool Sample']
expected['Status'] = ['Sample', 'Sample', 'Sample', 'Sample', 'Sample', 'Sample', 'Long Term Reference', 'Study Reference']
expected['AssayRole'] = [AssayRole.Assay, AssayRole.Assay, AssayRole.Assay, AssayRole.Assay,
AssayRole.Assay, AssayRole.Assay, AssayRole.PrecisionReference, AssayRole.PrecisionReference]
expected['SampleType'] = [SampleType.StudySample, SampleType.StudySample, SampleType.StudySample, SampleType.StudySample,
SampleType.StudySample, SampleType.StudySample, SampleType.ExternalReference, SampleType.StudyPool]
for series in testSeries:
with self.subTest(msg='Testing %s' % series):
pandas.testing.assert_series_equal(dataset.sampleMetadata[series], expected[series])
with self.subTest(msg='Serum dataset (UnitTest3).'):
dataPath = os.path.join('..', '..', 'npc-standard-project', 'Raw_Data', 'nmr', 'UnitTest3')
limsFilePath = os.path.join('..', '..', 'npc-standard-project', 'Derived_Worklists', 'UnitTest3_NMR_serum_PCSOP.012.csv')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
dataset = nPYc.NMRDataset(dataPath, pulseProgram='cpmgpr1d', sop='GenericNMRurine') # Use blood sop to avoid calibration of empty spectra
dataset.sampleMetadata.sort_values('Sample File Name', inplace=True)
sortIndex = dataset.sampleMetadata.index.values
dataset.intensityData = dataset.intensityData[sortIndex, :]
dataset.sampleMetadata = dataset.sampleMetadata.reset_index(drop=True)
expected = copy.deepcopy(dataset.sampleMetadata)
dataset.addSampleInfo(descriptionFormat='NPC LIMS', filePath=limsFilePath)
testSeries = ['Sample ID', 'Status', 'AssayRole', 'SampleType']
expected['Sample ID'] = ['UT3_S7', 'UT3_S8', 'UT3_S6', 'UT3_S5', 'UT3_S4', 'UT3_S3', 'UT3_S2', 'External Reference Sample', 'Study Pool Sample', 'UT3_S1']
expected['Status'] = ['Sample', 'Sample', 'Sample', 'Sample', 'Sample', 'Sample', 'Sample', 'Long Term Reference', 'Study Reference', 'nan']
expected['AssayRole'] = [AssayRole.Assay, AssayRole.Assay, AssayRole.Assay, AssayRole.Assay, AssayRole.Assay, AssayRole.Assay,
AssayRole.Assay, AssayRole.PrecisionReference, AssayRole.PrecisionReference, AssayRole.Assay]
expected['SampleType'] = [SampleType.StudySample, SampleType.StudySample, SampleType.StudySample, SampleType.StudySample, SampleType.StudySample,
SampleType.StudySample, SampleType.StudySample, SampleType.ExternalReference, SampleType.StudyPool, SampleType.StudySample]
for series in testSeries:
with self.subTest(msg='Testing %s' % series):
pandas.testing.assert_series_equal(dataset.sampleMetadata[series], expected[series])
class test_nmrdataset_ISATAB(unittest.TestCase):
def test_exportISATAB(self):
nmrData = nPYc.NMRDataset('', fileType='empty')
raw_data = {
'Acquired Time': ['2016-08-09 01:36:23', '2016-08-09 01:56:23', '2016-08-09 02:16:23', '2016-08-09 02:36:23', '2016-08-09 02:56:23'],
'AssayRole': ['AssayRole.LinearityReference', 'AssayRole.LinearityReference',
'AssayRole.LinearityReference', 'AssayRole.Assay', 'AssayRole.Assay'],
#'SampleType': ['SampleType.StudyPool', 'SampleType.StudyPool', 'SampleType.StudyPool','SampleType.StudySample', 'SampleType.StudySample'],
'Status': ['SampleType.StudyPool', 'SampleType.StudyPool', 'SampleType.StudyPool','SampleType.StudySample', 'SampleType.StudySample'],
'Subject ID': ['', '', '', 'SCANS-120', 'SCANS-130'],
'Sampling ID': ['', '', '', 'T0-7-S', 'T0-9-S'],
'Sample File Name': ['sfn1', 'sfn2', 'sfn3', 'sfn4', 'sfn5'],
'Study': ['TestStudy', 'TestStudy', 'TestStudy', 'TestStudy', 'TestStudy'],
'Gender': ['', '', '', 'Female', 'Male'],
'Age': ['', '', '', '55', '66'],
'Sampling Date': ['', '', '', '27/02/2006', '28/02/2006'],
'Sample batch': ['', '', '', 'SB 1', 'SB 2'],
'Batch': ['1', '2', '3', '4', '5'],
'Run Order': ['0', '1', '2', '3', '4'],
'Instrument': ['QTOF 2', 'QTOF 2', 'QTOF 2', 'QTOF 2', 'QTOF 2'],
'Assay data name': ['', '', '', 'SS_LNEG_ToF02_S1W4', 'SS_LNEG_ToF02_S1W5']
}
nmrData.sampleMetadata = pandas.DataFrame(raw_data,
columns=['Acquired Time', 'AssayRole', 'Status', 'Subject ID',
'Sampling ID', 'Study', 'Gender', 'Age', 'Sampling Date',
'Sample batch', 'Batch',
'Run Order', 'Instrument', 'Assay data name','Sample File Name'])
with tempfile.TemporaryDirectory() as tmpdirname:
details = {
'investigation_identifier' : "i1",
'investigation_title' : "Give it a title",
'investigation_description' : "Add a description",
'investigation_submission_date' : "2016-11-03", #use today if not specified
'investigation_public_release_date' : "2016-11-03",
'first_name' : "Noureddin",
'last_name' : "Sadawi",
'affiliation' : "University",
'study_filename' : "my_nmr_study",
'study_material_type' : "Serum",
'study_identifier' : "s1",
'study_title' : "Give the study a title",
'study_description' : "Add study description",
'study_submission_date' : "2016-11-03",
'study_public_release_date' : "2016-11-03",
'assay_filename' : "my_nmr_assay"
}
nmrData.initialiseMasks()
nmrData.exportDataset(destinationPath=tmpdirname, isaDetailsDict=details, saveFormat='ISATAB')
investigatio_file = os.path.join(tmpdirname,'i_investigation.txt')
numerrors = 0
with open(investigatio_file) as fp:
report = isatab.validate(fp)
numerrors = len(report['errors'])
#self.assertTrue(os.path.exists(a))
self.assertEqual(numerrors, 0, msg="ISATAB Validator found {} errors in the ISA-Tab archive".format(numerrors))
class test_nmrdataset_initialiseFromCSV(unittest.TestCase):
def test_init(self):
noSamp = numpy.random.randint(5, high=10, size=None)
noFeat = numpy.random.randint(500, high=1000, size=None)
dataset = generateTestDataset(noSamp, noFeat, dtype='NMRDataset', sop='GenericNMRurine')
dataset.name = 'Testing'
with tempfile.TemporaryDirectory() as tmpdirname:
dataset.exportDataset(destinationPath=tmpdirname, saveFormat='CSV', withExclusions=False)
pathName = os.path.join(tmpdirname, 'Testing_sampleMetadata.csv')
rebuitData = nPYc.NMRDataset(pathName, fileType='CSV Export')
numpy.testing.assert_array_equal(rebuitData.intensityData, dataset.intensityData)
for column in ['Sample File Name', 'SampleType', 'AssayRole', 'Acquired Time', 'Run Order']:
|
pandas.testing.assert_series_equal(rebuitData.sampleMetadata[column], dataset.sampleMetadata[column], check_dtype=False)
|
pandas.testing.assert_series_equal
|
#!/usr/bin/env python3
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import util
import pandas as pd
import sklearn as skl
from sklearn.model_selection import StratifiedKFold
import scipy.optimize as optz
from sklearn.metrics import auc, roc_curve
#from sklearn.metrics import average_precision_score
from sklearn.metrics import precision_recall_curve
import sys
import os
from six.moves import range
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
#sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
class FindWeight():
def __init__(self):
self.W=None
@staticmethod
def norm_weight(Weights=None):
if Weights is None:
return (0.5, 0.5)
else:
tot=sum(Weights)
w1=Weights[0]*1.0/tot
w2=Weights[1]*1.0/tot
return (w1, w2)
@staticmethod
def auto_weight(y):
w1=sum(y)
w0=len(y)-w1
return FindWeight.norm_weight([1.0/w1, 1.0/w0])
@staticmethod
def f(W, X, y, penalty, Weights=None):
"""If Weights [Weight(y==1), Weight(y==1)] array is None, samples are equally weight,
otherwise, the LR prob is no longer the real probability of the sample, needs to be corrected
by passing the same Weights array to predict()"""
#l_weighted=False # if we weight, then the logistic formula no longer predicts the probability
W2=np.sum(W[1:]*W[1:])
W=np.reshape(W,(-1,1))
q=np.exp(np.clip(np.dot(X, W), -100, 100))
q=np.clip(q/(1.0+q), 1e-15, 1-1e-15)
# cross-entropy
w1, w2 = FindWeight.norm_weight(Weights)
return w1*np.sum(-np.log(q[y==1]))+w2*np.sum(-np.log(1-q[y==0]))+penalty*W2
@staticmethod
def accuracy(W, X, y):
q=np.exp(np.dot(X, W))
q=q/(1.0+q)
y_pred=np.array(q>=0.5, dtype=int)
n=len(y)
pos=sum(y)
neg=n-pos
R_w=y*0.5/pos+(1-y)*0.5/neg
#print pos, neg, sum(y_pred == y)*1.0/n, R_w
r=1-np.sum((y_pred != y)*R_w)
return r
@staticmethod
def F1(W, X, y):
q=np.exp(np.dot(X, W))
q=q/(1.0+q)
y_pred=np.array(q>=0.5, dtype=int)
n=len(y)
pos=sum(y)
neg=n-pos
tp=sum(y[y_pred>0.5])
precision=tp*1.0/(sum(y_pred)+1e-5)
recall=tp*1.0/(pos+1e-5)
f1=2*precision*recall/(precision+recall+1e-5)
return f1
@staticmethod
def metrics(W, X, y):
q=np.exp(np.dot(X, W))
q=q/(1.0+q)
y_pred=np.array(q>=0.5, dtype=int)
n=len(y)
P=sum(y)
N=n-P
TP=sum(y[y_pred>=0.5])
FP=sum(y_pred>=0.5)-TP
TN=sum(1-y[y_pred<0.5])
FN=sum(y_pred<0.5)-TN
precision=TP*1.0/(TP+FP+1e-5)
recall=TP*1.0/(P+1e-5)
accuracy=(TP+TN)/(n+1e-5)
avg_accuracy=(TP/(P+1e-5)+TN/(N+1e-5))/2
F1=2*precision*recall/(precision+recall+1e-5)
MCC=(TP*TN-FP*FN)/np.sqrt((TP+FP)*(TP+FN)*(TN+FP)*(TN+FN)+1e-5)
return {'accuracy':accuracy, 'avg_accuracy':avg_accuracy, 'F1':F1, 'MCC':MCC}
def fit(self, X, y, penalty=0.0, signs=1, Weights=None):
n,m=X.shape
kwargs={'maxiter':1000, 'ftol':1e-6}
bounds=[(None,None)]
if type(signs) is int:
signs=[signs]*(m-1)
for x in signs:
if x>0.01:
bounds.append((0, None)) # must be >=0
elif x<-0.01:
bounds.append((None, 0)) # must be <=0
else:
bounds.append((None, None)) # no constrain
#W0=np.zeros(m)
# set initial vector to point from the center of 0 to 1
m0=X[y==0].mean(axis=0)
m1=X[y==1].mean(axis=0)
W0=m1-m0
W0/=np.sqrt(sum(W0*W0))
for j,x in enumerate(signs):
if x>0:
W0[j+1]=abs(W0[j+1])
else:
W0[j+1]=-abs(W0[j+1])
res=optz.minimize(FindWeight.f, W0, (X, y, penalty, Weights), method='L-BFGS-B', bounds=bounds, options=kwargs)
best_score=res.fun
self.W=res.x
def predict(self, X):
q=np.exp(np.dot(X, self.W))
q=q/(1.0+q)
return q
def auc(self, y, y_pred):
fpr, tpr, thresholds=roc_curve(y, y_pred, pos_label=1)
return auc(fpr, tpr)
@staticmethod
def evidence_weight(X, y, signs=1, folds=5, lb_penalty=-5, ub_penalty=5, num_penalty=11, Weights=None):
"""Give X (n*m) and y (n*1), returns weights (n+1) elements, and estimated weighted accuracy
Weights is for sample weight, for unbalanced case"""
fw=FindWeight()
R_penalty=np.logspace(lb_penalty, ub_penalty, num=num_penalty)
out=[]
n,m=X.shape
#print(n,m)
X=np.hstack([np.ones([n,1]), X]) # add 1 as a dummie column for bias
kf=StratifiedKFold(folds, shuffle=True)
for k_train, k_test in kf.split(X, y):
X_train=X[k_train]
y_train=y[k_train]
X_test=X[k_test]
y_test=y[k_test]
for penalty in R_penalty:
fw.fit(X_train, y_train, penalty=penalty, signs=signs, Weights=Weights)
#y_pred=fw.predict(X_test)
#score=fw.auc(y[k_test], y_pred)
score_cross_entropy=FindWeight.f(fw.W, X_test, y_test, 0, Weights)
#score_error_rate=1-FindWeight.accuracy(fw.W, X_test, y_test)
#f1=FindWeight.F1(fw.W, X_test, y_test)
c=FindWeight.metrics(fw.W, X_test, y_test)
#print(c)
out.append({'Penalty':penalty, 'ErrorRate':(1-c['avg_accuracy']), 'CrossEntropy':score_cross_entropy, 'F1':c['F1'], 'MCC':c['MCC']})
t_score=
|
pd.DataFrame(out)
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/14 18:19
Desc: 新浪财经-股票期权
https://stock.finance.sina.com.cn/option/quotes.html
期权-中金所-沪深 300 指数
https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php
期权-上交所-50ETF
期权-上交所-300ETF
https://stock.finance.sina.com.cn/option/quotes.html
"""
import json
import datetime
from typing import Dict, List, Tuple
import requests
from bs4 import BeautifulSoup
import pandas as pd
# 期权-中金所-沪深300指数
def option_cffex_hs300_list_sina() -> Dict[str, List[str]]:
"""
新浪财经-中金所-沪深300指数-所有合约, 返回的第一个合约为主力合约
目前新浪财经-中金所只有 沪深300指数 一个品种的数据
:return: 中金所-沪深300指数-所有合约
:rtype: dict
"""
url = "https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
symbol = soup.find(attrs={"id": "option_symbol"}).find("li").text
temp_attr = soup.find(attrs={"id": "option_suffix"}).find_all("li")
contract = [item.text for item in temp_attr]
return {symbol: contract}
def option_cffex_hs300_spot_sina(symbol: str = "io2104") -> pd.DataFrame:
"""
中金所-沪深300指数-指定合约-实时行情
https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php
:param symbol: 合约代码; 用 option_cffex_hs300_list_sina 函数查看
:type symbol: str
:return: 中金所-沪深300指数-指定合约-看涨看跌实时行情
:rtype: pd.DataFrame
"""
url = "https://stock.finance.sina.com.cn/futures/api/openapi.php/OptionService.getOptionData"
params = {
"type": "futures",
"product": "io",
"exchange": "cffex",
"pinzhong": symbol,
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{") : data_text.rfind("}") + 1])
option_call_df = pd.DataFrame(
data_json["result"]["data"]["up"],
columns=[
"看涨合约-买量",
"看涨合约-买价",
"看涨合约-最新价",
"看涨合约-卖价",
"看涨合约-卖量",
"看涨合约-持仓量",
"看涨合约-涨跌",
"行权价",
"看涨合约-标识",
],
)
option_put_df = pd.DataFrame(
data_json["result"]["data"]["down"],
columns=[
"看跌合约-买量",
"看跌合约-买价",
"看跌合约-最新价",
"看跌合约-卖价",
"看跌合约-卖量",
"看跌合约-持仓量",
"看跌合约-涨跌",
"看跌合约-标识",
],
)
data_df = pd.concat([option_call_df, option_put_df], axis=1)
data_df['看涨合约-买量'] = pd.to_numeric(data_df['看涨合约-买量'])
data_df['看涨合约-买价'] = pd.to_numeric(data_df['看涨合约-买价'])
data_df['看涨合约-最新价'] = pd.to_numeric(data_df['看涨合约-最新价'])
data_df['看涨合约-卖价'] = pd.to_numeric(data_df['看涨合约-卖价'])
data_df['看涨合约-卖量'] = pd.to_numeric(data_df['看涨合约-卖量'])
data_df['看涨合约-持仓量'] = pd.to_numeric(data_df['看涨合约-持仓量'])
data_df['看涨合约-涨跌'] = pd.to_numeric(data_df['看涨合约-涨跌'])
data_df['行权价'] = pd.to_numeric(data_df['行权价'])
data_df['看跌合约-买量'] = pd.to_numeric(data_df['看跌合约-买量'])
data_df['看跌合约-买价'] = pd.to_numeric(data_df['看跌合约-买价'])
data_df['看跌合约-最新价'] = pd.to_numeric(data_df['看跌合约-最新价'])
data_df['看跌合约-卖价'] = pd.to_numeric(data_df['看跌合约-卖价'])
data_df['看跌合约-卖量'] = pd.to_numeric(data_df['看跌合约-卖量'])
data_df['看跌合约-持仓量'] = pd.to_numeric(data_df['看跌合约-持仓量'])
data_df['看跌合约-涨跌'] = pd.to_numeric(data_df['看跌合约-涨跌'])
return data_df
def option_cffex_hs300_daily_sina(symbol: str = "io2202P4350") -> pd.DataFrame:
"""
新浪财经-中金所-沪深300指数-指定合约-日频行情
:param symbol: 具体合约代码(包括看涨和看跌标识), 可以通过 ak.option_cffex_hs300_spot_sina 中的 call-标识 获取
:type symbol: str
:return: 日频率数据
:rtype: pd.DataFrame
"""
year = datetime.datetime.now().year
month = datetime.datetime.now().month
day = datetime.datetime.now().day
url = f"https://stock.finance.sina.com.cn/futures/api/jsonp.php/var%20_{symbol}{year}_{month}_{day}=/FutureOptionAllService.getOptionDayline"
params = {"symbol": symbol}
r = requests.get(url, params=params)
data_text = r.text
data_df = pd.DataFrame(
eval(data_text[data_text.find("[") : data_text.rfind("]") + 1])
)
data_df.columns = ["open", "high", "low", "close", "volume", "date"]
data_df = data_df[[
"date",
"open",
"high",
"low",
"close",
"volume",
]]
data_df['date'] = pd.to_datetime(data_df['date']).dt.date
data_df['open'] = pd.to_numeric(data_df['open'])
data_df['high'] = pd.to_numeric(data_df['high'])
data_df['low'] = pd.to_numeric(data_df['low'])
data_df['close'] = pd.to_numeric(data_df['close'])
data_df['volume'] = pd.to_numeric(data_df['volume'])
return data_df
# 期权-上交所-50ETF
def option_sse_list_sina(symbol: str = "50ETF", exchange: str = "null") -> List[str]:
"""
新浪财经-期权-上交所-50ETF-合约到期月份列表
https://stock.finance.sina.com.cn/option/quotes.html
:param symbol: 50ETF or 300ETF
:type symbol: str
:param exchange: null
:type exchange: str
:return: 合约到期时间
:rtype: list
"""
url = "http://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionService.getStockName"
params = {"exchange": f"{exchange}", "cate": f"{symbol}"}
r = requests.get(url, params=params)
data_json = r.json()
date_list = data_json["result"]["data"]["contractMonth"]
return ["".join(i.split("-")) for i in date_list][1:]
def option_sse_expire_day_sina(
trade_date: str = "202102", symbol: str = "50ETF", exchange: str = "null"
) -> Tuple[str, int]:
"""
指定到期月份指定品种的剩余到期时间
:param trade_date: 到期月份: 202002, 20203, 20206, 20209
:type trade_date: str
:param symbol: 50ETF or 300ETF
:type symbol: str
:param exchange: null
:type exchange: str
:return: (到期时间, 剩余时间)
:rtype: tuple
"""
url = "http://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionService.getRemainderDay"
params = {
"exchange": f"{exchange}",
"cate": f"{symbol}",
"date": f"{trade_date[:4]}-{trade_date[4:]}",
}
r = requests.get(url, params=params)
data_json = r.json()
data = data_json["result"]["data"]
if int(data["remainderDays"]) < 0:
url = "http://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionService.getRemainderDay"
params = {
"exchange": f"{exchange}",
"cate": f"{'XD' + symbol}",
"date": f"{trade_date[:4]}-{trade_date[4:]}",
}
r = requests.get(url, params=params)
data_json = r.json()
data = data_json["result"]["data"]
return data["expireDay"], int(data["remainderDays"])
def option_sse_codes_sina(symbol: str = "看涨期权", trade_date: str = "202202", underlying: str = "510050") -> pd.DataFrame:
"""
上海证券交易所-所有看涨和看跌合约的代码
:param symbol: choice of {"看涨期权", "看跌期权"}
:type symbol: str
:param trade_date: 期权到期月份
:type trade_date: "202002"
:param underlying: 标的产品代码 华夏上证 50ETF: 510050 or 华泰柏瑞沪深 300ETF: 510300
:type underlying: str
:return: 看涨看跌合约的代码
:rtype: Tuple[List, List]
"""
if symbol == "看涨期权":
url = "".join(
["http://hq.sinajs.cn/list=OP_UP_", underlying, str(trade_date)[-4:]]
)
else:
url = "".join(
["http://hq.sinajs.cn/list=OP_DOWN_", underlying, str(trade_date)[-4:]]
)
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Host': 'hq.sinajs.cn',
'Pragma': 'no-cache',
'Referer': 'https://stock.finance.sina.com.cn/',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'Sec-Fetch-Dest': 'script',
'Sec-Fetch-Mode': 'no-cors',
'Sec-Fetch-Site': 'cross-site',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
r = requests.get(url, headers=headers)
data_text = r.text
data_temp = data_text.replace('"', ",").split(",")
temp_list = [i[7:] for i in data_temp if i.startswith("CON_OP_")]
temp_df = pd.DataFrame(temp_list)
temp_df.reset_index(inplace=True)
temp_df['index'] = temp_df.index + 1
temp_df.columns = [
'序号',
'期权代码',
]
return temp_df
def option_sse_spot_price_sina(symbol: str = "10003720") -> pd.DataFrame:
"""
新浪财经-期权-期权实时数据
:param symbol: 期权代码
:type symbol: str
:return: 期权量价数据
:rtype: pandas.DataFrame
"""
url = f"http://hq.sinajs.cn/list=CON_OP_{symbol}"
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Host': 'hq.sinajs.cn',
'Pragma': 'no-cache',
'Referer': 'https://stock.finance.sina.com.cn/',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'Sec-Fetch-Dest': 'script',
'Sec-Fetch-Mode': 'no-cors',
'Sec-Fetch-Site': 'cross-site',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
r = requests.get(url, headers=headers)
data_text = r.text
data_list = data_text[data_text.find('"') + 1 : data_text.rfind('"')].split(",")
field_list = [
"买量",
"买价",
"最新价",
"卖价",
"卖量",
"持仓量",
"涨幅",
"行权价",
"昨收价",
"开盘价",
"涨停价",
"跌停价",
"申卖价五",
"申卖量五",
"申卖价四",
"申卖量四",
"申卖价三",
"申卖量三",
"申卖价二",
"申卖量二",
"申卖价一",
"申卖量一",
"申买价一",
"申买量一 ",
"申买价二",
"申买量二",
"申买价三",
"申买量三",
"申买价四",
"申买量四",
"申买价五",
"申买量五",
"行情时间",
"主力合约标识",
"状态码",
"标的证券类型",
"标的股票",
"期权合约简称",
"振幅",
"最高价",
"最低价",
"成交量",
"成交额",
]
data_df = pd.DataFrame(list(zip(field_list, data_list)), columns=["字段", "值"])
return data_df
def option_sse_underlying_spot_price_sina(symbol: str = "sh510300") -> pd.DataFrame:
"""
期权标的物的实时数据
:param symbol: sh510050 or sh510300
:type symbol: str
:return: 期权标的物的信息
:rtype: pandas.DataFrame
"""
url = f"http://hq.sinajs.cn/list={symbol}"
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Host': 'hq.sinajs.cn',
'Pragma': 'no-cache',
'Proxy-Connection': 'keep-alive',
'Referer': 'http://vip.stock.finance.sina.com.cn/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
r = requests.get(url, headers=headers)
data_text = r.text
data_list = data_text[data_text.find('"') + 1 : data_text.rfind('"')].split(",")
field_list = [
"证券简称",
"今日开盘价",
"昨日收盘价",
"最近成交价",
"最高成交价",
"最低成交价",
"买入价",
"卖出价",
"成交数量",
"成交金额",
"买数量一",
"买价位一",
"买数量二",
"买价位二",
"买数量三",
"买价位三",
"买数量四",
"买价位四",
"买数量五",
"买价位五",
"卖数量一",
"卖价位一",
"卖数量二",
"卖价位二",
"卖数量三",
"卖价位三",
"卖数量四",
"卖价位四",
"卖数量五",
"卖价位五",
"行情日期",
"行情时间",
"停牌状态",
]
data_df = pd.DataFrame(list(zip(field_list, data_list)), columns=["字段", "值"])
return data_df
def option_sse_greeks_sina(symbol: str = "10003045") -> pd.DataFrame:
"""
期权基本信息表
:param symbol: 合约代码
:type symbol: str
:return: 期权基本信息表
:rtype: pandas.DataFrame
"""
url = f"http://hq.sinajs.cn/list=CON_SO_{symbol}"
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Host': 'hq.sinajs.cn',
'Pragma': 'no-cache',
'Proxy-Connection': 'keep-alive',
'Referer': 'http://vip.stock.finance.sina.com.cn/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
r = requests.get(url, headers=headers)
data_text = r.text
data_list = data_text[data_text.find('"') + 1: data_text.rfind('"')].split(",")
field_list = [
"期权合约简称",
"成交量",
"Delta",
"Gamma",
"Theta",
"Vega",
"隐含波动率",
"最高价",
"最低价",
"交易代码",
"行权价",
"最新价",
"理论价值",
]
data_df = pd.DataFrame(
list(zip(field_list, [data_list[0]] + data_list[4:])), columns=["字段", "值"]
)
return data_df
def option_sse_minute_sina(symbol: str = "10003720") -> pd.DataFrame:
"""
指定期权品种在当前交易日的分钟数据, 只能获取当前交易日的数据, 不能获取历史分钟数据
https://stock.finance.sina.com.cn/option/quotes.html
:param symbol: 期权代码
:type symbol: str
:return: 指定期权的当前交易日的分钟数据
:rtype: pandas.DataFrame
"""
url = "https://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionDaylineService.getOptionMinline"
params = {"symbol": f"CON_OP_{symbol}"}
headers = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'referer': 'https://stock.finance.sina.com.cn/option/quotes.html',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'script',
'sec-fetch-mode': 'no-cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36',
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = data_json["result"]["data"]
data_df = pd.DataFrame(temp_df)
data_df.columns = ["时间", "价格", "成交", "持仓", "均价", "日期"]
data_df = data_df[[
"日期",
"时间",
"价格",
"成交",
"持仓",
"均价"
]]
data_df['日期'] = pd.to_datetime(data_df['日期']).dt.date
data_df['日期'].ffill(inplace=True)
data_df['价格'] = pd.to_numeric(data_df['价格'])
data_df['成交'] = pd.to_numeric(data_df['成交'])
data_df['持仓'] = pd.to_numeric(data_df['持仓'])
data_df['均价'] = pd.to_numeric(data_df['均价'])
return data_df
def option_sse_daily_sina(symbol: str = "10003889") -> pd.DataFrame:
"""
指定期权的日频率数据
:param symbol: 期权代码
:type symbol: str
:return: 指定期权的所有日频率历史数据
:rtype: pandas.DataFrame
"""
url = "http://stock.finance.sina.com.cn/futures/api/jsonp_v2.php//StockOptionDaylineService.getSymbolInfo"
params = {"symbol": f"CON_OP_{symbol}"}
headers = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'referer': 'https://stock.finance.sina.com.cn/option/quotes.html',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'script',
'sec-fetch-mode': 'no-cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36',
}
r = requests.get(url, params=params, headers=headers)
data_text = r.text
data_json = json.loads(data_text[data_text.find("(") + 1 : data_text.rfind(")")])
temp_df = pd.DataFrame(data_json)
temp_df.columns = ["日期", "开盘", "最高", "最低", "收盘", "成交量"]
temp_df['日期'] = pd.to_datetime(temp_df['日期']).dt.date
temp_df['开盘'] = pd.to_numeric(temp_df['开盘'])
temp_df['最高'] = pd.to_numeric(temp_df['最高'])
temp_df['最低'] = pd.to_numeric(temp_df['最低'])
temp_df['收盘'] = pd.to_numeric(temp_df['收盘'])
temp_df['成交量'] = pd.to_
|
numeric(temp_df['成交量'])
|
pandas.to_numeric
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pytest
import numpy as np
import pandas
from pandas.testing import assert_index_equal
import matplotlib
import modin.pandas as pd
import sys
from modin.pandas.test.utils import (
NROWS,
RAND_LOW,
RAND_HIGH,
df_equals,
arg_keys,
name_contains,
test_data,
test_data_values,
test_data_keys,
axis_keys,
axis_values,
int_arg_keys,
int_arg_values,
create_test_dfs,
eval_general,
generate_multiindex,
extra_test_parameters,
)
from modin.config import NPartitions
NPartitions.put(4)
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
def eval_setitem(md_df, pd_df, value, col=None, loc=None):
if loc is not None:
col = pd_df.columns[loc]
value_getter = value if callable(value) else (lambda *args, **kwargs: value)
eval_general(
md_df, pd_df, lambda df: df.__setitem__(col, value_getter(df)), __inplace__=True
)
@pytest.mark.parametrize(
"dates",
[
["2018-02-27 09:03:30", "2018-02-27 09:04:30"],
["2018-02-27 09:03:00", "2018-02-27 09:05:00"],
],
)
@pytest.mark.parametrize("subset", ["a", "b", ["a", "b"], None])
def test_asof_with_nan(dates, subset):
data = {"a": [10, 20, 30, 40, 50], "b": [None, None, None, None, 500]}
index = pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
)
modin_where = pd.DatetimeIndex(dates)
pandas_where = pandas.DatetimeIndex(dates)
compare_asof(data, index, modin_where, pandas_where, subset)
@pytest.mark.parametrize(
"dates",
[
["2018-02-27 09:03:30", "2018-02-27 09:04:30"],
["2018-02-27 09:03:00", "2018-02-27 09:05:00"],
],
)
@pytest.mark.parametrize("subset", ["a", "b", ["a", "b"], None])
def test_asof_without_nan(dates, subset):
data = {"a": [10, 20, 30, 40, 50], "b": [70, 600, 30, -200, 500]}
index = pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
)
modin_where = pd.DatetimeIndex(dates)
pandas_where = pandas.DatetimeIndex(dates)
compare_asof(data, index, modin_where, pandas_where, subset)
@pytest.mark.parametrize(
"lookup",
[
[60, 70, 90],
[60.5, 70.5, 100],
],
)
@pytest.mark.parametrize("subset", ["col2", "col1", ["col1", "col2"], None])
def test_asof_large(lookup, subset):
data = test_data["float_nan_data"]
index = list(range(NROWS))
modin_where = pd.Index(lookup)
pandas_where = pandas.Index(lookup)
compare_asof(data, index, modin_where, pandas_where, subset)
def compare_asof(
data, index, modin_where: pd.Index, pandas_where: pandas.Index, subset
):
modin_df = pd.DataFrame(data, index=index)
pandas_df = pandas.DataFrame(data, index=index)
df_equals(
modin_df.asof(modin_where, subset=subset),
pandas_df.asof(pandas_where, subset=subset),
)
df_equals(
modin_df.asof(modin_where.values, subset=subset),
pandas_df.asof(pandas_where.values, subset=subset),
)
df_equals(
modin_df.asof(list(modin_where.values), subset=subset),
pandas_df.asof(list(pandas_where.values), subset=subset),
)
df_equals(
modin_df.asof(modin_where.values[0], subset=subset),
pandas_df.asof(pandas_where.values[0], subset=subset),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_first_valid_index(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.first_valid_index() == (pandas_df.first_valid_index())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_head(data, n):
# Test normal dataframe head
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.head(n), pandas_df.head(n))
df_equals(modin_df.head(len(modin_df) + 1), pandas_df.head(len(pandas_df) + 1))
# Test head when we call it from a QueryCompilerView
modin_result = modin_df.loc[:, ["col1", "col3", "col3"]].head(n)
pandas_result = pandas_df.loc[:, ["col1", "col3", "col3"]].head(n)
df_equals(modin_result, pandas_result)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iat(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.iat()
@pytest.mark.gpu
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iloc(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, ["empty_data"]):
# Scaler
np.testing.assert_equal(modin_df.iloc[0, 1], pandas_df.iloc[0, 1])
# Series
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.iloc[1:, 0], pandas_df.iloc[1:, 0])
df_equals(modin_df.iloc[1:2, 0], pandas_df.iloc[1:2, 0])
# DataFrame
df_equals(modin_df.iloc[[1, 2]], pandas_df.iloc[[1, 2]])
# See issue #80
# df_equals(modin_df.iloc[[1, 2], [1, 0]], pandas_df.iloc[[1, 2], [1, 0]])
df_equals(modin_df.iloc[1:2, 0:2], pandas_df.iloc[1:2, 0:2])
# Issue #43
modin_df.iloc[0:3, :]
# Write Item
modin_df.iloc[[1, 2]] = 42
pandas_df.iloc[[1, 2]] = 42
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.iloc[0] = modin_df.iloc[1]
pandas_df.iloc[0] = pandas_df.iloc[1]
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.iloc[:, 0] = modin_df.iloc[:, 1]
pandas_df.iloc[:, 0] = pandas_df.iloc[:, 1]
df_equals(modin_df, pandas_df)
# From issue #1775
df_equals(
modin_df.iloc[lambda df: df.index.get_indexer_for(df.index[:5])],
pandas_df.iloc[lambda df: df.index.get_indexer_for(df.index[:5])],
)
else:
with pytest.raises(IndexError):
modin_df.iloc[0, 1]
@pytest.mark.gpu
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_index(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.index, pandas_df.index)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.index = [str(i) for i in modin_df_cp.index]
pandas_df_cp.index = [str(i) for i in pandas_df_cp.index]
df_equals(modin_df_cp.index, pandas_df_cp.index)
@pytest.mark.gpu
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_indexing_duplicate_axis(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.index = pandas_df.index = [i // 3 for i in range(len(modin_df))]
assert any(modin_df.index.duplicated())
assert any(pandas_df.index.duplicated())
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.iloc[0, 0:4], pandas_df.iloc[0, 0:4])
df_equals(
modin_df.loc[0, modin_df.columns[0:4]],
pandas_df.loc[0, pandas_df.columns[0:4]],
)
@pytest.mark.gpu
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_keys(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.keys(), pandas_df.keys())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_loc(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
key1 = modin_df.columns[0]
key2 = modin_df.columns[1]
# Scaler
df_equals(modin_df.loc[0, key1], pandas_df.loc[0, key1])
# Series
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.loc[1:, key1], pandas_df.loc[1:, key1])
df_equals(modin_df.loc[1:2, key1], pandas_df.loc[1:2, key1])
# DataFrame
df_equals(modin_df.loc[[1, 2]], pandas_df.loc[[1, 2]])
# List-like of booleans
indices = [i % 3 == 0 for i in range(len(modin_df.index))]
columns = [i % 5 == 0 for i in range(len(modin_df.columns))]
modin_result = modin_df.loc[indices, columns]
pandas_result = pandas_df.loc[indices, columns]
df_equals(modin_result, pandas_result)
modin_result = modin_df.loc[:, columns]
pandas_result = pandas_df.loc[:, columns]
df_equals(modin_result, pandas_result)
modin_result = modin_df.loc[indices]
pandas_result = pandas_df.loc[indices]
df_equals(modin_result, pandas_result)
# See issue #80
# df_equals(modin_df.loc[[1, 2], ['col1']], pandas_df.loc[[1, 2], ['col1']])
df_equals(modin_df.loc[1:2, key1:key2], pandas_df.loc[1:2, key1:key2])
# From issue #421
df_equals(modin_df.loc[:, [key2, key1]], pandas_df.loc[:, [key2, key1]])
df_equals(modin_df.loc[[2, 1], :], pandas_df.loc[[2, 1], :])
# From issue #1023
key1 = modin_df.columns[0]
key2 = modin_df.columns[-2]
df_equals(modin_df.loc[:, key1:key2], pandas_df.loc[:, key1:key2])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.loc[[1, 2]] = 42
pandas_df_copy.loc[[1, 2]] = 42
df_equals(modin_df_copy, pandas_df_copy)
# From issue #1775
df_equals(
modin_df.loc[lambda df: df.iloc[:, 0].isin(list(range(1000)))],
pandas_df.loc[lambda df: df.iloc[:, 0].isin(list(range(1000)))],
)
# From issue #1374
with pytest.raises(KeyError):
modin_df.loc["NO_EXIST"]
def test_loc_multi_index():
modin_df = pd.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
pandas_df = pandas.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
df_equals(modin_df.loc[1], pandas_df.loc[1])
df_equals(modin_df.loc[1, "Presidents"], pandas_df.loc[1, "Presidents"])
df_equals(
modin_df.loc[1, ("Presidents", "Pure mentions")],
pandas_df.loc[1, ("Presidents", "Pure mentions")],
)
assert (
modin_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
== pandas_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
)
df_equals(modin_df.loc[(1, 2), "Presidents"], pandas_df.loc[(1, 2), "Presidents"])
tuples = [
("bar", "one"),
("bar", "two"),
("bar", "three"),
("bar", "four"),
("baz", "one"),
("baz", "two"),
("baz", "three"),
("baz", "four"),
("foo", "one"),
("foo", "two"),
("foo", "three"),
("foo", "four"),
("qux", "one"),
("qux", "two"),
("qux", "three"),
("qux", "four"),
]
modin_index = pd.MultiIndex.from_tuples(tuples, names=["first", "second"])
pandas_index = pandas.MultiIndex.from_tuples(tuples, names=["first", "second"])
frame_data = np.random.randint(0, 100, size=(16, 100))
modin_df = pd.DataFrame(
frame_data,
index=modin_index,
columns=["col{}".format(i) for i in range(100)],
)
pandas_df = pandas.DataFrame(
frame_data,
index=pandas_index,
columns=["col{}".format(i) for i in range(100)],
)
df_equals(modin_df.loc["bar", "col1"], pandas_df.loc["bar", "col1"])
assert modin_df.loc[("bar", "one"), "col1"] == pandas_df.loc[("bar", "one"), "col1"]
df_equals(
modin_df.loc["bar", ("col1", "col2")],
pandas_df.loc["bar", ("col1", "col2")],
)
# From issue #1456
transposed_modin = modin_df.T
transposed_pandas = pandas_df.T
df_equals(
transposed_modin.loc[transposed_modin.index[:-2], :],
transposed_pandas.loc[transposed_pandas.index[:-2], :],
)
# From issue #1610
df_equals(modin_df.loc[modin_df.index], pandas_df.loc[pandas_df.index])
df_equals(modin_df.loc[modin_df.index[:7]], pandas_df.loc[pandas_df.index[:7]])
@pytest.mark.parametrize("index", [["row1", "row2", "row3"]])
@pytest.mark.parametrize("columns", [["col1", "col2"]])
def test_loc_assignment(index, columns):
md_df, pd_df = create_test_dfs(index=index, columns=columns)
for i, ind in enumerate(index):
for j, col in enumerate(columns):
value_to_assign = int(str(i) + str(j))
md_df.loc[ind][col] = value_to_assign
pd_df.loc[ind][col] = value_to_assign
df_equals(md_df, pd_df)
@pytest.fixture
def loc_iter_dfs():
columns = ["col1", "col2", "col3"]
index = ["row1", "row2", "row3"]
return create_test_dfs(
{col: ([idx] * len(index)) for idx, col in enumerate(columns)},
columns=columns,
index=index,
)
@pytest.mark.parametrize("reverse_order", [False, True])
@pytest.mark.parametrize("axis", [0, 1])
def test_loc_iter_assignment(loc_iter_dfs, reverse_order, axis):
if reverse_order and axis:
pytest.xfail(
"Due to internal sorting of lookup values assignment order is lost, see GH-#2552"
)
md_df, pd_df = loc_iter_dfs
select = [slice(None), slice(None)]
select[axis] = sorted(pd_df.axes[axis][:-1], reverse=reverse_order)
select = tuple(select)
pd_df.loc[select] = pd_df.loc[select] + pd_df.loc[select]
md_df.loc[select] = md_df.loc[select] + md_df.loc[select]
df_equals(md_df, pd_df)
@pytest.mark.parametrize("reverse_order", [False, True])
@pytest.mark.parametrize("axis", [0, 1])
def test_loc_order(loc_iter_dfs, reverse_order, axis):
md_df, pd_df = loc_iter_dfs
select = [slice(None), slice(None)]
select[axis] = sorted(pd_df.axes[axis][:-1], reverse=reverse_order)
select = tuple(select)
df_equals(pd_df.loc[select], md_df.loc[select])
@pytest.mark.gpu
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_loc_nested_assignment(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
key1 = modin_df.columns[0]
key2 = modin_df.columns[1]
modin_df[key1].loc[0] = 500
pandas_df[key1].loc[0] = 500
df_equals(modin_df, pandas_df)
modin_df[key2].loc[0] = None
pandas_df[key2].loc[0] = None
df_equals(modin_df, pandas_df)
def test_iloc_assignment():
modin_df = pd.DataFrame(index=["row1", "row2", "row3"], columns=["col1", "col2"])
pandas_df = pandas.DataFrame(
index=["row1", "row2", "row3"], columns=["col1", "col2"]
)
modin_df.iloc[0]["col1"] = 11
modin_df.iloc[1]["col1"] = 21
modin_df.iloc[2]["col1"] = 31
modin_df.iloc[0]["col2"] = 12
modin_df.iloc[1]["col2"] = 22
modin_df.iloc[2]["col2"] = 32
pandas_df.iloc[0]["col1"] = 11
pandas_df.iloc[1]["col1"] = 21
pandas_df.iloc[2]["col1"] = 31
pandas_df.iloc[0]["col2"] = 12
pandas_df.iloc[1]["col2"] = 22
pandas_df.iloc[2]["col2"] = 32
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iloc_nested_assignment(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
key1 = modin_df.columns[0]
key2 = modin_df.columns[1]
modin_df[key1].iloc[0] = 500
pandas_df[key1].iloc[0] = 500
df_equals(modin_df, pandas_df)
modin_df[key2].iloc[0] = None
pandas_df[key2].iloc[0] = None
df_equals(modin_df, pandas_df)
def test_loc_series():
md_df, pd_df = create_test_dfs({"a": [1, 2], "b": [3, 4]})
pd_df.loc[pd_df["a"] > 1, "b"] = np.log(pd_df["b"])
md_df.loc[md_df["a"] > 1, "b"] = np.log(md_df["b"])
df_equals(pd_df, md_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pop(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
temp_modin_df = modin_df.copy()
temp_pandas_df = pandas_df.copy()
modin_popped = temp_modin_df.pop(key)
pandas_popped = temp_pandas_df.pop(key)
df_equals(modin_popped, pandas_popped)
df_equals(temp_modin_df, temp_pandas_df)
def test_reindex():
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.reindex([0, 3, 2, 1]), pandas_df.reindex([0, 3, 2, 1]))
df_equals(modin_df.reindex([0, 6, 2]), pandas_df.reindex([0, 6, 2]))
df_equals(
modin_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
pandas_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
)
df_equals(
modin_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
pandas_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
)
df_equals(
modin_df.reindex(index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]),
pandas_df.reindex(index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]),
)
df_equals(
modin_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
pandas_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
)
def test_reindex_like():
df1 = pd.DataFrame(
[
[24.3, 75.7, "high"],
[31, 87.8, "high"],
[22, 71.6, "medium"],
[35, 95, "medium"],
],
columns=["temp_celsius", "temp_fahrenheit", "windspeed"],
index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"),
)
df2 = pd.DataFrame(
[[28, "low"], [30, "low"], [35.1, "medium"]],
columns=["temp_celsius", "windspeed"],
index=pd.DatetimeIndex(["2014-02-12", "2014-02-13", "2014-02-15"]),
)
with pytest.warns(UserWarning):
df2.reindex_like(df1)
def test_rename_sanity():
source_df = pandas.DataFrame(test_data["int_data"])[
["col1", "index", "col3", "col4"]
]
mapping = {"col1": "a", "index": "b", "col3": "c", "col4": "d"}
modin_df = pd.DataFrame(source_df)
df_equals(modin_df.rename(columns=mapping), source_df.rename(columns=mapping))
renamed2 = source_df.rename(columns=str.lower)
df_equals(modin_df.rename(columns=str.lower), renamed2)
modin_df = pd.DataFrame(renamed2)
df_equals(modin_df.rename(columns=str.upper), renamed2.rename(columns=str.upper))
# index
data = {"A": {"foo": 0, "bar": 1}}
# gets sorted alphabetical
df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
assert_index_equal(
modin_df.rename(index={"foo": "bar", "bar": "foo"}).index,
df.rename(index={"foo": "bar", "bar": "foo"}).index,
)
assert_index_equal(
modin_df.rename(index=str.upper).index, df.rename(index=str.upper).index
)
# Using the `mapper` functionality with `axis`
assert_index_equal(
modin_df.rename(str.upper, axis=0).index, df.rename(str.upper, axis=0).index
)
assert_index_equal(
modin_df.rename(str.upper, axis=1).columns,
df.rename(str.upper, axis=1).columns,
)
# have to pass something
with pytest.raises(TypeError):
modin_df.rename()
# partial columns
renamed = source_df.rename(columns={"col3": "foo", "col4": "bar"})
modin_df = pd.DataFrame(source_df)
assert_index_equal(
modin_df.rename(columns={"col3": "foo", "col4": "bar"}).index,
source_df.rename(columns={"col3": "foo", "col4": "bar"}).index,
)
# other axis
renamed = source_df.T.rename(index={"col3": "foo", "col4": "bar"})
assert_index_equal(
source_df.T.rename(index={"col3": "foo", "col4": "bar"}).index,
modin_df.T.rename(index={"col3": "foo", "col4": "bar"}).index,
)
# index with name
index = pandas.Index(["foo", "bar"], name="name")
renamer = pandas.DataFrame(data, index=index)
modin_df = pd.DataFrame(data, index=index)
renamed = renamer.rename(index={"foo": "bar", "bar": "foo"})
modin_renamed = modin_df.rename(index={"foo": "bar", "bar": "foo"})
assert_index_equal(renamed.index, modin_renamed.index)
assert renamed.index.name == modin_renamed.index.name
def test_rename_multiindex():
tuples_index = [("foo1", "bar1"), ("foo2", "bar2")]
tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")]
index = pandas.MultiIndex.from_tuples(tuples_index, names=["foo", "bar"])
columns = pandas.MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"])
frame_data = [(0, 0), (1, 1)]
df = pandas.DataFrame(frame_data, index=index, columns=columns)
modin_df = pd.DataFrame(frame_data, index=index, columns=columns)
#
# without specifying level -> accross all levels
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
modin_renamed = modin_df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
assert_index_equal(renamed.index, modin_renamed.index)
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
assert_index_equal(renamed.columns, modin_renamed.columns)
assert renamed.index.names == modin_renamed.index.names
assert renamed.columns.names == modin_renamed.columns.names
#
# with specifying a level
# dict
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0)
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0
)
assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz")
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz"
)
assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1)
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1
)
assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz")
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz"
)
assert_index_equal(renamed.columns, modin_renamed.columns)
# function
func = str.upper
renamed = df.rename(columns=func, level=0)
modin_renamed = modin_df.rename(columns=func, level=0)
assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level="fizz")
modin_renamed = modin_df.rename(columns=func, level="fizz")
assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level=1)
modin_renamed = modin_df.rename(columns=func, level=1)
assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level="buzz")
modin_renamed = modin_df.rename(columns=func, level="buzz")
assert_index_equal(renamed.columns, modin_renamed.columns)
# index
renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
modin_renamed = modin_df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
assert_index_equal(modin_renamed.index, renamed.index)
@pytest.mark.xfail(reason="Pandas does not pass this test")
def test_rename_nocopy():
source_df = pandas.DataFrame(test_data["int_data"])[
["col1", "index", "col3", "col4"]
]
modin_df = pd.DataFrame(source_df)
modin_renamed = modin_df.rename(columns={"col3": "foo"}, copy=False)
modin_renamed["foo"] = 1
assert (modin_df["col3"] == 1).all()
def test_rename_inplace():
source_df = pandas.DataFrame(test_data["int_data"])[
["col1", "index", "col3", "col4"]
]
modin_df = pd.DataFrame(source_df)
df_equals(
modin_df.rename(columns={"col3": "foo"}),
source_df.rename(columns={"col3": "foo"}),
)
frame = source_df.copy()
modin_frame = modin_df.copy()
frame.rename(columns={"col3": "foo"}, inplace=True)
modin_frame.rename(columns={"col3": "foo"}, inplace=True)
df_equals(modin_frame, frame)
def test_rename_bug():
# rename set ref_locs, and set_index was not resetting
frame_data = {0: ["foo", "bar"], 1: ["bah", "bas"], 2: [1, 2]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df = df.rename(columns={0: "a"})
df = df.rename(columns={1: "b"})
df = df.set_index(["a", "b"])
df.columns = ["2001-01-01"]
modin_df = modin_df.rename(columns={0: "a"})
modin_df = modin_df.rename(columns={1: "b"})
modin_df = modin_df.set_index(["a", "b"])
modin_df.columns = ["2001-01-01"]
df_equals(modin_df, df)
def test_rename_axis():
data = {"num_legs": [4, 4, 2], "num_arms": [0, 0, 2]}
index = ["dog", "cat", "monkey"]
modin_df = pd.DataFrame(data, index)
pandas_df = pandas.DataFrame(data, index)
df_equals(modin_df.rename_axis("animal"), pandas_df.rename_axis("animal"))
df_equals(
modin_df.rename_axis("limbs", axis="columns"),
pandas_df.rename_axis("limbs", axis="columns"),
)
modin_df.rename_axis("limbs", axis="columns", inplace=True)
pandas_df.rename_axis("limbs", axis="columns", inplace=True)
df_equals(modin_df, pandas_df)
new_index = pd.MultiIndex.from_product(
[["mammal"], ["dog", "cat", "monkey"]], names=["type", "name"]
)
modin_df.index = new_index
pandas_df.index = new_index
df_equals(
modin_df.rename_axis(index={"type": "class"}),
pandas_df.rename_axis(index={"type": "class"}),
)
df_equals(
modin_df.rename_axis(columns=str.upper),
pandas_df.rename_axis(columns=str.upper),
)
df_equals(
modin_df.rename_axis(columns=[str.upper(o) for o in modin_df.columns.names]),
pandas_df.rename_axis(columns=[str.upper(o) for o in pandas_df.columns.names]),
)
with pytest.raises(ValueError):
df_equals(
modin_df.rename_axis(str.upper, axis=1),
pandas_df.rename_axis(str.upper, axis=1),
)
def test_rename_axis_inplace():
test_frame = pandas.DataFrame(test_data["int_data"])
modin_df = pd.DataFrame(test_frame)
result = test_frame.copy()
modin_result = modin_df.copy()
no_return = result.rename_axis("foo", inplace=True)
modin_no_return = modin_result.rename_axis("foo", inplace=True)
assert no_return is modin_no_return
df_equals(modin_result, result)
result = test_frame.copy()
modin_result = modin_df.copy()
no_return = result.rename_axis("bar", axis=1, inplace=True)
modin_no_return = modin_result.rename_axis("bar", axis=1, inplace=True)
assert no_return is modin_no_return
df_equals(modin_result, result)
def test_reorder_levels():
data = np.random.randint(1, 100, 12)
modin_df = pd.DataFrame(
data,
index=pd.MultiIndex.from_tuples(
[
(num, letter, color)
for num in range(1, 3)
for letter in ["a", "b", "c"]
for color in ["Red", "Green"]
],
names=["Number", "Letter", "Color"],
),
)
pandas_df = pandas.DataFrame(
data,
index=pandas.MultiIndex.from_tuples(
[
(num, letter, color)
for num in range(1, 3)
for letter in ["a", "b", "c"]
for color in ["Red", "Green"]
],
names=["Number", "Letter", "Color"],
),
)
df_equals(
modin_df.reorder_levels(["Letter", "Color", "Number"]),
pandas_df.reorder_levels(["Letter", "Color", "Number"]),
)
def test_reindex_multiindex():
data1, data2 = np.random.randint(1, 20, (5, 5)), np.random.randint(10, 25, 6)
index = np.array(["AUD", "BRL", "CAD", "EUR", "INR"])
modin_midx = pd.MultiIndex.from_product(
[["Bank_1", "Bank_2"], ["AUD", "CAD", "EUR"]], names=["Bank", "Curency"]
)
pandas_midx = pandas.MultiIndex.from_product(
[["Bank_1", "Bank_2"], ["AUD", "CAD", "EUR"]], names=["Bank", "Curency"]
)
modin_df1, modin_df2 = (
pd.DataFrame(data=data1, index=index, columns=index),
pd.DataFrame(data2, modin_midx),
)
pandas_df1, pandas_df2 = (
|
pandas.DataFrame(data=data1, index=index, columns=index)
|
pandas.DataFrame
|
'''
(c) 2018, <EMAIL> - Fork from QSTK
https://charlesg.github.io/pftk/
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license.
@author: <NAME>
@contact: <EMAIL>
@summary: Backtester
'''
# Python imports
from datetime import timedelta
# 3rd Party Imports
import pandas as pand
import numpy as np
from copy import deepcopy
# Pftk imports
from pftk.pftkutil import tsutil as tsu
def _calculate_leverage(values_by_stock, ts_leverage, ts_long_exposure, ts_short_exposure, ts_net_exposure):
"""
@summary calculates leverage based on the dataframe values_by_stock
and returns the updated timeseries of leverage
@param values_by_stock: Dataframe containing the values held in
in each stock in the portfolio
@param ts_leverage: time series of leverage values
@return ts_leverage : updated time series of leverage values
"""
for r_index, r_val in values_by_stock.iterrows():
f_long = 0
f_short = 0
for val in r_val.values[:-1]:
if np.isnan(val) == False:
if val >= 0:
f_long = f_long + val
else:
f_short = f_short + val
f_lev = (f_long + abs(f_short)) / (f_long + r_val.values[-1] + f_short)
f_net = (f_long - abs(f_short)) / (f_long + r_val.values[-1] + f_short)
f_long_ex = (f_long) / (f_long + r_val.values[-1] + f_short)
f_short_ex = (abs(f_short)) / (f_long + r_val.values[-1] + f_short)
if np.isnan(f_lev): f_lev = 0
if np.isnan(f_net): f_net = 0
if np.isnan(f_long): f_long = 0
if np.isnan(f_short): f_short = 0
ts_leverage = ts_leverage.append(pand.Series(f_lev, index = [r_index] ))
ts_long_exposure = ts_long_exposure.append(pand.Series(f_long_ex, index = [r_index] ))
ts_short_exposure = ts_short_exposure.append(pand.Series(f_short_ex, index = [r_index] ))
ts_net_exposure = ts_net_exposure.append(pand.Series(f_net, index = [r_index] ))
return ts_leverage, ts_long_exposure, ts_short_exposure, ts_net_exposure
def _monthly_turnover(ts_orders, ts_fund):
order_val_month = 0
last_date = ts_orders.index[0]
b_first_month = True
ts_turnover = "None"
for date in ts_orders.index:
if last_date.month == date.month:
order_val_month += ts_orders.ix[date]
else:
if b_first_month == True:
ts_turnover = pand.Series(order_val_month, index=[last_date])
b_first_month = False
else:
ts_turnover = ts_turnover.append(pand.Series(order_val_month, index=[last_date]))
order_val_month = ts_orders.ix[date]
last_date = date
if type(ts_turnover) != type("None"):
ts_turnover = ts_turnover.append(
|
pand.Series(order_val_month, index=[last_date])
|
pandas.Series
|
#!/usr/bin.env/python
# -*- coding: utf-8 -*-
"""
Gates are traditionally used to subset single cell data in one
or two dimensional space by hand-drawn polygons in a manual and laborious
process. cytopy attempts to emulate this using autonomous gates, driven
by unsupervised learning algorithms. The gate module contains the
classes that provide the infrastructure to apply these algorithms
to the context of single cell data whilst interacting with the underlying
database that houses our analysis.
Copyright 2020 <NAME>
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished
to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import typing
from cytopy.flow.transform import apply_transform
from .geometry import ThresholdGeom, PolygonGeom, inside_polygon, \
create_convex_hull, create_polygon, ellipse_to_polygon, probablistic_ellipse
from .population import Population, merge_multiple_gate_populations
from ..flow.sampling import faithful_downsampling, density_dependent_downsampling, upsample_knn, uniform_downsampling
from ..flow.dim_reduction import dimensionality_reduction
from ..flow.build_models import build_sklearn_model
from sklearn.cluster import *
from sklearn.mixture import *
from hdbscan import HDBSCAN
from shapely.geometry import Polygon as ShapelyPoly
from shapely.ops import cascaded_union
from string import ascii_uppercase
from collections import Counter
from typing import List, Dict
from functools import reduce
from KDEpy import FFTKDE
from detecta import detect_peaks
from scipy.signal import savgol_filter
import pandas as pd
import numpy as np
import mongoengine
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, cytopy"
__credits__ = ["<NAME>", "<NAME>", "<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "2.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
class Child(mongoengine.EmbeddedDocument):
"""
Base class for a gate child population. This is representative of the 'population' of cells
identified when a gate is first defined and will be used as a template to annotate
the populations identified in new data.
"""
name = mongoengine.StringField()
signature = mongoengine.DictField()
meta = {"allow_inheritance": True}
class ChildThreshold(Child):
"""
Child population of a Threshold gate. This is representative of the 'population' of cells
identified when a gate is first defined and will be used as a template to annotate
the populations identified in new data.
Attributes
-----------
name: str
Name of the child
definition: str
Definition of population e.g "+" or "-" for 1 dimensional gate or "++" etc for 2 dimensional gate
geom: ThresholdGeom
Geometric definition for this child population
signature: dict
Average of a population feature space (median of each channel); used to match
children to newly identified populations for annotating
"""
definition = mongoengine.StringField()
geom = mongoengine.EmbeddedDocumentField(ThresholdGeom)
def match_definition(self,
definition: str):
"""
Given a definition, return True or False as to whether it matches this ChildThreshold's
definition. If definition contains multiples separated by a comma, or the ChildThreshold's
definition contains multiple, first split and then compare. Return True if matches any.
Parameters
----------
definition: str
Returns
-------
bool
"""
definition = definition.split(",")
return any([x in self.definition.split(",") for x in definition])
class ChildPolygon(Child):
"""
Child population of a Polgon or Ellipse gate. This is representative of the 'population' of cells
identified when a gate is first defined and will be used as a template to annotate
the populations identified in new data.
Attributes
-----------
name: str
Name of the child
geom: ChildPolygon
Geometric definition for this child population
signature: dict
Average of a population feature space (median of each channel); used to match
children to newly identified populations for annotating
"""
geom = mongoengine.EmbeddedDocumentField(PolygonGeom)
class Gate(mongoengine.Document):
"""
Base class for a Gate. A Gate attempts to separate single cell data in one or
two-dimensional space using unsupervised learning algorithms. The algorithm is fitted
to example data to generate "children"; the populations of cells a user expects to
identify. These children are stored and then when the gate is 'fitted' to new data,
the resulting populations are matched to the expected children.
Attributes
-----------
gate_name: str (required)
Name of the gate
parent: str (required)
Parent population that this gate is applied to
x: str (required)
Name of the x-axis variable forming the one/two dimensional space this gate
is applied to
y: str (optional)
Name of the y-axis variable forming the two dimensional space this gate
is applied to
transform_x: str, optional
Method used to transform the X-axis dimension, supported methods are: logicle, hyperlog, asinh or log
transform_y: str, optional
Method used to transform the Y-axis dimension, supported methods are: logicle, hyperlog, asinh or log
transform_x_kwargs: dict, optional
Additional keyword arguments passed to Transformer object when transforming the x-axis dimension
transform_y_kwargs: dict, optional
Additional keyword arguments passed to Transformer object when transforming the y-axis dimension
sampling: dict (optional)
Options for downsampling data prior to application of gate. Should contain a
key/value pair for desired method e.g ({"method": "uniform"). Available methods
are: 'uniform', 'density' or 'faithful'. See cytopy.flow.sampling for details. Additional
keyword arguments should be provided in the sampling dictionary.
dim_reduction: dict (optional)
Experimental feature. Allows for dimension reduction to be performed prior to
applying gate. Gate will be applied to the resulting embeddings. Provide a dictionary
with a key "method" and the value as any supported method in cytopy.flow.dim_reduction.
Additional keyword arguments should be provided in this dictionary.
ctrl_x: str (optional)
If a value is given here it should be the name of a control specimen commonly associated
to the samples in an Experiment. When given this signals that the gate should use the control
data for the x-axis dimension when predicting population geometry.
ctrl_y: str (optional)
If a value is given here it should be the name of a control specimen commonly associated
to the samples in an Experiment. When given this signals that the gate should use the control
data for the y-axis dimension when predicting population geometry.
ctrl_classifier: str (default='XGBClassifier')
Ignored if both ctrl_x and ctrl_y are None. Specifies which Scikit-Learn or sklearn-like classifier
to use when estimating the control population (see cytopy.data.fcs.FileGroup.load_ctrl_population_df)
ctrl_classifier_params: dict, optional
Parameters used when creating control population classifier
ctrl_prediction_kwargs: dict, optional
Additional keyword arguments passed to cytopy.data.fcs.FileGroup.load_ctrl_population_df call
method: str (required)
Name of the underlying algorithm to use. Should have a value of: "manual", "density",
"quantile" or correspond to the name of an existing class in Scikit-Learn or HDBSCAN.
If you have a method that follows the Scikit-Learn template but isn't currently present
in cytopy and you would like it to be, please contribute to the respository on GitHub
or contact <EMAIL>
method_kwargs: dict
Keyword arguments for initiation of the above method.
"""
gate_name = mongoengine.StringField(required=True)
parent = mongoengine.StringField(required=True)
x = mongoengine.StringField(required=True)
y = mongoengine.StringField(required=False)
transform_x = mongoengine.StringField(required=False, default=None)
transform_y = mongoengine.StringField(required=False, default=None)
transform_x_kwargs = mongoengine.DictField()
transform_y_kwargs = mongoengine.DictField()
sampling = mongoengine.DictField()
dim_reduction = mongoengine.DictField()
ctrl_x = mongoengine.StringField()
ctrl_y = mongoengine.StringField()
ctrl_classifier = mongoengine.StringField(default="XGBClassifier")
ctrl_classifier_params = mongoengine.DictField()
ctrl_prediction_kwargs = mongoengine.DictField()
method = mongoengine.StringField(required=True)
method_kwargs = mongoengine.DictField()
children = mongoengine.EmbeddedDocumentListField(Child)
meta = {
'db_alias': 'core',
'collection': 'gates',
'allow_inheritance': True
}
def __init__(self, *args, **values):
method = values.get("method", None)
assert method is not None, "No method given"
err = f"Module {method} not supported. See docs for supported methods."
assert method in ["manual", "density", "quantile", "time", "AND", "OR", "NOT"] + list(globals().keys()), err
super().__init__(*args, **values)
self.model = None
self.x_transformer = None
self.y_transformer = None
if self.ctrl_classifier:
params = self.ctrl_classifier_params or {}
build_sklearn_model(klass=self.ctrl_classifier, **params)
self.validate()
def transform(self,
data: pd.DataFrame) -> pd.DataFrame:
"""
Transform dataframe prior to gating
Parameters
----------
data: Pandas.DataFrame
Returns
-------
Pandas.DataFrame
Transformed dataframe
"""
if self.transform_x is not None:
kwargs = self.transform_x_kwargs or {}
data, self.x_transformer = apply_transform(data=data,
features=[self.x],
method=self.transform_x,
return_transformer=True,
**kwargs)
if self.transform_y is not None and self.y is not None:
kwargs = self.transform_y_kwargs or {}
data, self.y_transformer = apply_transform(data=data,
features=[self.y],
method=self.transform_y,
return_transformer=True,
**kwargs)
return data
def transform_info(self) -> (dict, dict):
"""
Returns two dictionaries describing the transforms and transform settings applied to each variable
this gate acts upon
Returns
-------
dict, dict
Transform dict ({x-variable: transform, y-variable: transform}),
Transform kwargs dict ({x-variable: transform kwargs, y-variable: transform kwargs})
"""
transforms = [self.transform_x, self.transform_y]
transform_kwargs = [self.transform_x_kwargs, self.transform_y_kwargs]
transforms = {k: v for k, v in zip([self.x, self.y], transforms) if k is not None}
transform_kwargs = {k: v for k, v in zip([self.x, self.y], transform_kwargs) if k is not None}
return transforms, transform_kwargs
def _downsample(self,
data: pd.DataFrame) -> pd.DataFrame or None:
"""
Perform down-sampling prior to gating. Returns down-sampled dataframe or
None if sampling method is undefined.
Parameters
----------
data: Pandas.DataFrame
Returns
-------
Pandas.DataFrame or None
Raises
------
AssertionError
If sampling kwargs are missing
"""
data = data.copy()
if self.sampling.get("method", None) == "uniform":
n = self.sampling.get("n", None) or self.sampling.get("frac", None)
assert n is not None, "Must provide 'n' or 'frac' for uniform downsampling"
return uniform_downsampling(data=data, sample_size=n)
if self.sampling.get("method", None) == "density":
kwargs = {k: v for k, v in self.sampling.items()
if k not in ["method", "features"]}
features = [f for f in [self.x, self.y] if f is not None]
return density_dependent_downsampling(data=data,
features=features,
**kwargs)
if self.sampling.get("method", None) == "faithful":
h = self.sampling.get("h", 0.01)
return faithful_downsampling(data=data.values, h=h)
raise ValueError("Invalid downsample method, should be one of: 'uniform', 'density' or 'faithful'")
def _upsample(self,
data: pd.DataFrame,
sample: pd.DataFrame,
populations: List[Population]) -> List[Population]:
"""
Perform up-sampling after gating using KNN. Returns list of Population objects
with index updated to reflect the original data.
Parameters
----------
data: Pandas.DataFrame
Original data, prior to down-sampling
sample: Pandas.DataFrame
Sampled data
populations: list
List of populations with assigned indexes
Returns
-------
list
"""
sample = sample.copy()
sample["label"] = None
for i, p in enumerate(populations):
sample.loc[sample.index.isin(p.index), "label"] = i
sample["label"].fillna(-1, inplace=True)
labels = sample["label"].values
sample.drop("label", axis=1, inplace=True)
new_labels = upsample_knn(sample=sample,
original_data=data,
labels=labels,
features=[i for i in [self.x, self.y] if i is not None],
verbose=self.sampling.get("verbose", True),
scoring=self.sampling.get("upsample_scoring", "balanced_accuracy"),
**self.sampling.get("knn_kwargs", {}))
for i, p in enumerate(populations):
new_idx = data.index.values[np.where(new_labels == i)]
if len(new_idx) == 0:
raise ValueError(f"Up-sampling failed, no events labelled for {p.population_name}")
p.index = new_idx
return populations
def _dim_reduction(self,
data: pd.DataFrame):
"""
Experimental!
Perform dimension reduction prior to gating. Returns dataframe
with appended columns for embeddings
Parameters
----------
data: Pandas.DataFrame
Data to reduce
Returns
-------
Pandas.DataFrame
"""
method = self.dim_reduction.get("method", None)
if method is None:
return data
kwargs = {k: v for k, v in self.dim_reduction.items() if k != "method"}
data = dimensionality_reduction(data=data,
features=kwargs.get("features", data.columns.tolist()),
method=method,
n_components=2,
return_embeddings_only=False,
return_reducer=False,
**kwargs)
self.x = f"{method}1"
self.y = f"{method}2"
return data
def _xy_in_dataframe(self,
data: pd.DataFrame):
"""
Assert that the x and y variables defined for this gate are present in the given
DataFrames columns
Parameters
----------
data: Pandas.DataFrame
Returns
-------
None
Raises
-------
AssertionError
If required columns missing from provided data
"""
assert self.x in data.columns, f"{self.x} missing from given dataframe"
if self.y:
assert self.y in data.columns, f"{self.y} missing from given dataframe"
def reset_gate(self) -> None:
"""
Removes existing children and resets all parameters.
Returns
-------
None
"""
self.children = []
class ThresholdGate(Gate):
"""
ThresholdGate inherits from Gate. A Gate attempts to separate single cell data in one or
two-dimensional space using unsupervised learning algorithms. The algorithm is fitted
to example data to generate "children"; the populations of cells a user expects to
identify. These children are stored and then when the gate is 'fitted' to new data,
the resulting populations are matched to the expected children.
The ThresholdGate subsets data based on the properties of the estimated probability
density function of the underlying data. For each axis, kernel density estimation
(KDEpy.FFTKDE) is used to estimate the PDF and a straight line "threshold" applied
to the region of minimum density to separate populations.
This is achieved using a peak finding algorithm and a smoothing procedure, until either:
* Two predominant "peaks" are found and the threshold is taken as the local minima
between there peaks
* A single peak is detected and the threshold is applied as either the quantile
given in method_kwargs or the inflection point on the descending curve.
Alternatively the "method" can be "manual" for a static gate to be applied; user should
provide x_threshold and y_threshold (if two-dimensional) to "method_kwargs", or "method"
can be "quantile", where the threshold will be drawn at the given quantile, defined by
"q" in "method_kwargs".
Additional kwargs to control behaviour of ThresholdGate when method is "density"
can be given in method_kwargs:
* kernel (default="guassian") - kernel used for KDE calculation
(see KDEpy.FFTKDE for avialable kernels)
* bw (default="silverman") - bandwidth to use for KDE calculation, can either be
"silverman" or "ISJ" or a float value (see KDEpy)
* min_peak_threshold (default=0.05) - percentage of highest recorded peak below
which peaks are ignored. E.g. 0.05 would mean any peak less than 5% of the
highest peak would be ignored.
* peak_boundary (default=0.1) - bounding window around which only the highest peak
is considered. E.g. 0.1 would mean that peaks are assessed within a window the
size of peak_boundary * length of probability vector and only highest peak within
window is kept.
* inflection_point_kwargs - dictionary; see cytopy.data.gate.find_inflection_point
* smoothed_peak_finding_kwargs - dictionary; see cytopy.data.gate.smoothed_peak_finding
ThresholdGate supports control gating, whereby thresholds are fitted to control data
and then applied to primary data.
Attributes
-----------
gate_name: str (required)
Name of the gate
parent: str (required)
Parent population that this gate is applied to
x: str (required)
Name of the x-axis variable forming the one/two dimensional space this gate
is applied to
y: str (optional)
Name of the y-axis variable forming the two dimensional space this gate
is applied to
transform_x: str, optional
Method used to transform the X-axis dimension, supported methods are: logicle, hyperlog, asinh or log
transform_y: str, optional
Method used to transform the Y-axis dimension, supported methods are: logicle, hyperlog, asinh or log
transform_x_kwargs: dict, optional
Additional keyword arguments passed to Transformer object when transforming the x-axis dimension
transform_y_kwargs: dict, optional
Additional keyword arguments passed to Transformer object when transforming the y-axis dimension
sampling: dict (optional)
Options for downsampling data prior to application of gate. Should contain a
key/value pair for desired method e.g ({"method": "uniform"). Available methods
are: 'uniform', 'density' or 'faithful'. See cytopy.flow.sampling for details. Additional
keyword arguments should be provided in the sampling dictionary.
dim_reduction: dict (optional)
Experimental feature. Allows for dimension reduction to be performed prior to
applying gate. Gate will be applied to the resulting embeddings. Provide a dictionary
with a key "method" and the value as any supported method in cytopy.flow.dim_reduction.
Additional keyword arguments should be provided in this dictionary.
ctrl_x: str (optional)
If a value is given here it should be the name of a control specimen commonly associated
to the samples in an Experiment. When given this signals that the gate should use the control
data for the x-axis dimension when predicting population geometry.
ctrl_y: str (optional)
If a value is given here it should be the name of a control specimen commonly associated
to the samples in an Experiment. When given this signals that the gate should use the control
data for the y-axis dimension when predicting population geometry.
ctrl_classifier: str (default='XGBClassifier')
Ignored if both ctrl_x and ctrl_y are None. Specifies which Scikit-Learn or sklearn-like classifier
to use when estimating the control population (see cytopy.data.fcs.FileGroup.load_ctrl_population_df)
ctrl_classifier_params: dict, optional
Parameters used when creating control population classifier
ctrl_prediction_kwargs: dict, optional
Additional keyword arguments passed to cytopy.data.fcs.FileGroup.load_ctrl_population_df call
method: str (required)
Name of the underlying algorithm to use. Should have a value of: "manual", "density",
or "quantile"
method_kwargs: dict
Keyword arguments for initiation of the above method.
"""
children = mongoengine.EmbeddedDocumentListField(ChildThreshold)
def add_child(self,
child: ChildThreshold) -> None:
"""
Add a new child for this gate. Checks that definition is valid and overwrites geom with gate information.
Parameters
----------
child: ChildThreshold
Returns
-------
None
Raises
------
AssertionError
If invalid definition
"""
if self.y is not None:
definition = child.definition.split(",")
assert all(i in ["++", "+-", "-+", "--"]
for i in definition), "Invalid child definition, should be one of: '++', '+-', '-+', or '--'"
else:
assert child.definition in ["+", "-"], "Invalid child definition, should be either '+' or '-'"
child.geom.x = self.x
child.geom.y = self.y
child.geom.transform_x, child.geom.transform_y = self.transform_x, self.transform_y
child.geom.transform_x_kwargs = self.transform_x_kwargs
child.geom.transform_y_kwargs = self.transform_y_kwargs
self.children.append(child)
def _duplicate_children(self) -> None:
"""
Loop through the children and merge any with the same name.
Returns
-------
None
"""
child_counts = Counter([c.name for c in self.children])
if all([i == 1 for i in child_counts.values()]):
return
updated_children = []
for name, count in child_counts.items():
if count >= 2:
updated_children.append(merge_children([c for c in self.children if c.name == name]))
else:
updated_children.append([c for c in self.children if c.name == name][0])
self.children = updated_children
def label_children(self,
labels: dict) -> None:
"""
Rename children using a dictionary of labels where the key correspond to the existing child name
and the value is the new desired population name. If the same population name is given to multiple
children, these children will be merged.
If drop is True, then children that are absent from the given dictionary will be dropped.
Parameters
----------
labels: dict
Mapping for new children name
Returns
-------
None
"""
for c in self.children:
c.name = labels.get(c.name)
self._duplicate_children()
def _match_to_children(self,
new_populations: List[Population]) -> List[Population]:
"""
Given a list of newly create Populations, match the Populations to the gates children and
return list of Populations with correct population names.
Parameters
----------
new_populations: list
List of newly created Population objects
Returns
-------
List
"""
labeled = list()
for c in self.children:
matching_populations = [p for p in new_populations if c.match_definition(p.definition)]
if len(matching_populations) == 0:
continue
elif len(matching_populations) > 1:
pop = merge_multiple_gate_populations(matching_populations, new_population_name=c.name)
else:
pop = matching_populations[0]
pop.population_name = c.name
labeled.append(pop)
return labeled
def _quantile_gate(self,
data: pd.DataFrame) -> list:
"""
Fit gate to the given dataframe by simply drawing the threshold at the desired quantile.
Parameters
----------
data: Pandas.DataFrame
Returns
-------
list
List of thresholds (one for each dimension)
Raises
------
AssertionError
If 'q' argument not found in method kwargs and method is 'qunatile'
"""
q = self.method_kwargs.get("q", None)
assert q is not None, "Must provide a value for 'q' in method kwargs when using quantile gate"
if self.y is None:
return [data[self.x].quantile(q)]
return [data[self.x].quantile(q), data[self.y].quantile(q)]
def _process_one_peak(self,
x: np.ndarray,
x_grid: np.array,
p: np.array,
peak_idx: int):
"""
Process the results of a single peak detected. Returns the threshold for
the given dimension.
Parameters
----------
d: str
Name of the dimension (feature) under investigation. Must be a column in data.
data: Pandas.DataFrame
Events dataframe
x_grid: numpy.ndarray
x grid upon which probability vector is estimated by KDE
p: numpy.ndarray
probability vector as estimated by KDE
Returns
-------
float
Raises
------
AssertionError
If 'q' argument not found in method kwargs and method is 'qunatile'
"""
use_inflection_point = self.method_kwargs.get("use_inflection_point", True)
if not use_inflection_point:
q = self.method_kwargs.get("q", None)
assert q is not None, "Must provide a value for 'q' in method kwargs " \
"for desired quantile if use_inflection_point is False"
return np.quantile(x, q)
inflection_point_kwargs = self.method_kwargs.get("inflection_point_kwargs", {})
return find_inflection_point(x=x_grid,
p=p,
peak_idx=peak_idx,
**inflection_point_kwargs)
def _fit(self,
data: pd.DataFrame or dict) -> list:
"""
Internal method to fit threshold density gating to a given dataframe. Returns the
list of thresholds generated and the dataframe the threshold were generated from
(will be the downsampled dataframe if sampling methods defined).
Parameters
----------
data: Pandas.DataFrame
Returns
-------
List
"""
if self.method == "manual":
return self._manual()
self._xy_in_dataframe(data=data)
dims = [i for i in [self.x, self.y] if i is not None]
if self.sampling.get("method", None) is not None:
data = self._downsample(data=data)
if self.method == "quantile":
thresholds = self._quantile_gate(data=data)
else:
thresholds = list()
for d in dims:
thresholds.append(self._find_threshold(data[d].values))
return thresholds
def _find_threshold(self, x: np.ndarray):
"""
Given a single dimension of data find the threshold point according to the
methodology defined for this gate and the number of peaks detected.
Parameters
----------
x: Numpy Array
Returns
-------
float
Raises
------
AssertionError
If no peaks are detected
"""
peaks, x_grid, p = self._density_peak_finding(x)
assert len(peaks) > 0, "No peaks detected"
if len(peaks) == 1:
threshold = self._process_one_peak(x,
x_grid=x_grid,
p=p,
peak_idx=peaks[0])
elif len(peaks) == 2:
threshold = find_local_minima(p=p, x=x_grid, peaks=peaks)
else:
threshold = self._solve_threshold_for_multiple_peaks(x=x, p=p, x_grid=x_grid)
return threshold
def _solve_threshold_for_multiple_peaks(self,
x: np.ndarray,
p: np.ndarray,
x_grid: np.ndarray):
"""
Handle the detection of > 2 peaks by smoothing the estimated PDF and
rerunning the peak finding algorithm
Parameters
----------
x: Numpy Array
One dimensional PDF
p: Numpy Array
Indices of detected peaks
x_grid: Numpy Array
Grid space PDF was generated in
Returns
-------
float
"""
smoothed_peak_finding_kwargs = self.method_kwargs.get("smoothed_peak_finding_kwargs", {})
smoothed_peak_finding_kwargs["min_peak_threshold"] = smoothed_peak_finding_kwargs.get(
"min_peak_threshold",
self.method_kwargs.get("min_peak_threshold", 0.05))
smoothed_peak_finding_kwargs["peak_boundary"] = smoothed_peak_finding_kwargs.get("peak_boundary",
self.method_kwargs.get(
"peak_boundary",
0.1))
p, peaks = smoothed_peak_finding(p=p, **smoothed_peak_finding_kwargs)
if len(peaks) == 1:
return self._process_one_peak(x,
x_grid=x_grid,
p=p,
peak_idx=peaks[0])
else:
return find_local_minima(p=p, x=x_grid, peaks=peaks)
def _density_peak_finding(self,
x: np.ndarray):
"""
Estimate the underlying PDF of a single dimension using a convolution based
KDE (KDEpy.FFTKDE), then run a peak finding algorithm (detecta.detect_peaks)
Parameters
----------
x: Numpy Array
Returns
-------
(Numpy Array, Numpy Array, Numpy Array)
Index of detected peaks, grid space that PDF is estimated on, and estimated PDF
"""
x_grid, p = (FFTKDE(kernel=self.method_kwargs.get("kernel", "gaussian"),
bw=self.method_kwargs.get("bw", "silverman"))
.fit(x)
.evaluate())
peaks = find_peaks(p=p,
min_peak_threshold=self.method_kwargs.get("min_peak_threshold", 0.05),
peak_boundary=self.method_kwargs.get("peak_boundary", 0.1))
return peaks, x_grid, p
def _manual(self) -> list:
"""
Wrapper called if manual gating method. Searches the method kwargs and returns static thresholds
Returns
-------
List
Raises
------
AssertionError
If x or y threshold is None when required
"""
x_threshold = self.method_kwargs.get("x_threshold", None)
y_threshold = self.method_kwargs.get("y_threshold", None)
assert x_threshold is not None, "Manual threshold gating requires the keyword argument 'x_threshold'"
if self.transform_x:
kwargs = self.transform_x_kwargs or {}
x_threshold = apply_transform(pd.DataFrame({"x": [x_threshold]}),
features=["x"],
method=self.transform_x,
**kwargs).x.values[0]
if self.y:
assert y_threshold is not None, "2D manual threshold gating requires the keyword argument 'y_threshold'"
if self.transform_y:
kwargs = self.transform_y_kwargs or {}
y_threshold = apply_transform(pd.DataFrame({"y": [y_threshold]}),
features=["y"],
method=self.transform_y,
**kwargs).y.values[0]
thresholds = [i for i in [x_threshold, y_threshold] if i is not None]
return [float(i) for i in thresholds]
def _ctrl_fit(self,
primary_data: pd.DataFrame,
ctrl_data: pd.DataFrame):
"""
Estimate the thresholds to apply to dome primary data using the given control data
Parameters
----------
primary_data: Pandas.DataFrame
ctrl_data: Pandas.DataFrame
Returns
-------
List
List of thresholds [x dimension threshold, y dimension threshold]
"""
self._xy_in_dataframe(data=primary_data)
self._xy_in_dataframe(data=ctrl_data)
ctrl_data = self.transform(data=ctrl_data)
ctrl_data = self._dim_reduction(data=ctrl_data)
dims = [i for i in [self.x, self.y] if i is not None]
if self.sampling.get("method", None) is not None:
primary_data, ctrl_data = self._downsample(data=primary_data), self._downsample(data=ctrl_data)
thresholds = list()
for d in dims:
fmo_threshold = self._find_threshold(ctrl_data[d].values)
peaks, x_grid, p = self._density_peak_finding(primary_data[d].values)
if len(peaks) == 1:
thresholds.append(fmo_threshold)
else:
if len(peaks) > 2:
t = self._solve_threshold_for_multiple_peaks(x=primary_data[d].values,
p=p,
x_grid=x_grid)
else:
t = find_local_minima(p=p, x=x_grid, peaks=peaks)
if t > fmo_threshold:
thresholds.append(t)
else:
thresholds.append(fmo_threshold)
return thresholds
def fit(self,
data: pd.DataFrame,
ctrl_data: pd.DataFrame or None = None) -> None:
"""
Fit the gate using a given dataframe. If children already exist will raise an AssertionError
and notify user to call `fit_predict`.
Parameters
----------
data: Pandas.DataFrame
Population data to fit threshold
ctrl_data: Pandas.DataFrame, optional
If provided, thresholds will be calculated using ctrl_data and then applied to data
Returns
-------
None
Raises
------
AssertionError
If gate Children have already been defined i.e. fit has been called previously
"""
data = data.copy()
data = self.transform(data=data)
data = self._dim_reduction(data=data)
assert len(self.children) == 0, "Children already defined for this gate. Call 'fit_predict' to " \
"fit to new data and match populations to children, or call " \
"'predict' to apply static thresholds to new data. If you want to " \
"reset the gate and call 'fit' again, first call 'reset_gate'"
if ctrl_data is not None:
thresholds = self._ctrl_fit(primary_data=data, ctrl_data=ctrl_data)
else:
thresholds = self._fit(data=data)
y_threshold = None
if len(thresholds) > 1:
y_threshold = thresholds[1]
data = apply_threshold(data=data,
x=self.x, x_threshold=thresholds[0],
y=self.y, y_threshold=y_threshold)
for definition, df in data.items():
self.add_child(ChildThreshold(name=definition,
definition=definition,
geom=ThresholdGeom(x_threshold=thresholds[0],
y_threshold=y_threshold)))
return None
def fit_predict(self,
data: pd.DataFrame,
ctrl_data: pd.DataFrame or None = None) -> list:
"""
Fit the gate using a given dataframe and then associate predicted Population objects to
existing children. If no children exist, an AssertionError will be raised prompting the
user to call `fit` method.
Parameters
----------
data: Pandas.DataFrame
Population data to fit threshold to
ctrl_data: Pandas.DataFrame, optional
If provided, thresholds will be calculated using ctrl_data and then applied to data
Returns
-------
List
List of predicted Population objects, labelled according to the gates child objects
Raises
------
AssertionError
If fit has not been called prior to fit_predict
"""
assert len(self.children) > 0, "No children defined for gate, call 'fit' before calling 'fit_predict'"
data = data.copy()
data = self.transform(data=data)
data = self._dim_reduction(data=data)
if ctrl_data is not None:
thresholds = self._ctrl_fit(primary_data=data, ctrl_data=ctrl_data)
else:
thresholds = self._fit(data=data)
y_threshold = None
if len(thresholds) == 2:
y_threshold = thresholds[1]
results = apply_threshold(data=data,
x=self.x,
y=self.y,
x_threshold=thresholds[0],
y_threshold=y_threshold)
pops = self._generate_populations(data=results,
x_threshold=thresholds[0],
y_threshold=y_threshold)
return self._match_to_children(new_populations=pops)
def predict(self,
data: pd.DataFrame) -> list:
"""
Using existing children associated to this gate, the previously calculated thresholds of
these children will be applied to the given data and then Population objects created and
labelled to match the children of this gate. NOTE: the data will not be fitted and thresholds
applied will be STATIC not data driven. For data driven gates call `fit_predict` method.
Parameters
----------
data: Pandas.DataFrame
Data to apply static thresholds too
Returns
-------
List
List of Population objects
Raises
------
AssertionError
If fit has not been called prior to predict
"""
assert len(self.children) > 0, "Must call 'fit' prior to predict"
self._xy_in_dataframe(data=data)
data = self.transform(data=data)
data = self._dim_reduction(data=data)
if self.y is not None:
data = threshold_2d(data=data,
x=self.x,
y=self.y,
x_threshold=self.children[0].geom.x_threshold,
y_threshold=self.children[0].geom.y_threshold)
else:
data = threshold_1d(data=data, x=self.x, x_threshold=self.children[0].geom.x_threshold)
return self._generate_populations(data=data,
x_threshold=self.children[0].geom.x_threshold,
y_threshold=self.children[0].geom.y_threshold)
def _generate_populations(self,
data: dict,
x_threshold: float,
y_threshold: float or None) -> list:
"""
Generate populations from a standard dictionary of dataframes that have had thresholds applied.
Parameters
----------
data: Pandas.DataFrame
x_threshold: float
y_threshold: float (optional)
Returns
-------
List
List of Population objects
"""
pops = list()
for definition, df in data.items():
pops.append(Population(population_name=definition,
definition=definition,
parent=self.parent,
n=df.shape[0],
source="gate",
index=df.index.values,
signature=df.mean().to_dict(),
geom=ThresholdGeom(x=self.x,
y=self.y,
transform_x=self.transform_x,
transform_y=self.transform_y,
transform_x_kwargs=self.transform_x_kwargs,
transform_y_kwargs=self.transform_y_kwargs,
x_threshold=x_threshold,
y_threshold=y_threshold)))
return pops
class PolygonGate(Gate):
"""
PolygonGate inherits from Gate. A Gate attempts to separate single cell data in one or
two-dimensional space using unsupervised learning algorithms. The algorithm is fitted
to example data to generate "children"; the populations of cells a user expects to
identify. These children are stored and then when the gate is 'fitted' to new data,
the resulting populations are matched to the expected children.
The PolygonGate subsets data based on the results of an unsupervised learning algorithm
such a clustering algorithm. PolygonGate supports any clustering algorithm from the
Scikit-Learn machine learning library. Support is extended to any clustering library
that follows the Scikit-Learn template, but currently this only includes HDBSCAN.
Contributions to extend to other libraries are welcome. The name of the class to use
should be provided in "method" along with keyword arguments for initiating this class
in "method_kwargs".
Alternatively the "method" can be "manual" for a static gate to be applied; user should
provide x_values and y_values (if two-dimensional) to "method_kwargs" as two arrays,
this will be interpreted as the x and y coordinates of the polygon to fit to the data.
DOES NOT SUPPORT CONTROL GATING.
Attributes
-----------
gate_name: str (required)
Name of the gate
parent: str (required)
Parent population that this gate is applied to
x: str (required)
Name of the x-axis variable forming the one/two dimensional space this gate
is applied to
y: str (optional)
Name of the y-axis variable forming the two dimensional space this gate
is applied to
transform_x: str, optional
Method used to transform the X-axis dimension, supported methods are: logicle, hyperlog, asinh or log
transform_y: str, optional
Method used to transform the Y-axis dimension, supported methods are: logicle, hyperlog, asinh or log
transform_x_kwargs: dict, optional
Additional keyword arguments passed to Transformer object when transforming the x-axis dimension
transform_y_kwargs: dict, optional
Additional keyword arguments passed to Transformer object when transforming the y-axis dimension
sampling: dict (optional)
Options for downsampling data prior to application of gate. Should contain a
key/value pair for desired method e.g ({"method": "uniform"). Available methods
are: 'uniform', 'density' or 'faithful'. See cytopy.flow.sampling for details. Additional
keyword arguments should be provided in the sampling dictionary.
dim_reduction: dict (optional)
Experimental feature. Allows for dimension reduction to be performed prior to
applying gate. Gate will be applied to the resulting embeddings. Provide a dictionary
with a key "method" and the value as any supported method in cytopy.flow.dim_reduction.
Additional keyword arguments should be provided in this dictionary.
method: str (required)
Name of the underlying algorithm to use. Should have a value of: "manual", or correspond
to the name of an existing class in Scikit-Learn or HDBSCAN.
If you have a method that follows the Scikit-Learn template but isn't currently present
in cytopy and you would like it to be, please contribute to the respository on GitHub
or contact <EMAIL>
method_kwargs: dict
Keyword arguments for initiation of the above method.
"""
children = mongoengine.EmbeddedDocumentListField(ChildPolygon)
def __init__(self, *args, **values):
super().__init__(*args, **values)
assert self.y is not None, "Polygon gate expects a y-axis variable"
def _generate_populations(self,
data: pd.DataFrame,
polygons: List[ShapelyPoly]) -> List[Population]:
"""
Given a dataframe and a list of Polygon shapes as generated from the '_fit' method, generate a
list of Population objects.
Parameters
----------
data: Pandas.DataFrame
polygons: list
Returns
-------
List
List of Population objects
"""
pops = list()
for name, poly in zip(ascii_uppercase, polygons):
pop_df = inside_polygon(df=data, x=self.x, y=self.y, poly=poly)
geom = PolygonGeom(x=self.x,
y=self.y,
transform_x=self.transform_x,
transform_y=self.transform_y,
transform_x_kwargs=self.transform_x_kwargs,
transform_y_kwargs=self.transform_y_kwargs,
x_values=poly.exterior.xy[0],
y_values=poly.exterior.xy[1])
pops.append(Population(population_name=name,
source="gate",
parent=self.parent,
n=pop_df.shape[0],
signature=pop_df.mean().to_dict(),
geom=geom,
index=pop_df.index.values))
return pops
def label_children(self,
labels: dict,
drop: bool = True) -> None:
"""
Rename children using a dictionary of labels where the key correspond to the existing child name
and the value is the new desired population name. If the same population name is given to multiple
children, these children will be merged.
If drop is True, then children that are absent from the given dictionary will be dropped.
Parameters
----------
labels: dict
Mapping for new children name
drop: bool (default=True)
If True, children absent from labels will be dropped
Returns
-------
None
Raises
------
AssertionError
If duplicate labels are provided
"""
assert len(set(labels.values())) == len(labels.values()), \
"Duplicate labels provided. Child merging not available for polygon gates"
if drop:
self.children = [c for c in self.children if c.name in labels.keys()]
for c in self.children:
c.name = labels.get(c.name)
def add_child(self,
child: ChildPolygon) -> None:
"""
Add a new child for this gate. Checks that child is valid and overwrites geom with gate information.
Parameters
----------
child: ChildPolygon
Returns
-------
None
Raises
------
TypeError
x_values or y_values is not type list
"""
child.geom.x = self.x
child.geom.y = self.y
child.geom.transform_x = self.transform_x
child.geom.transform_y = self.transform_y
child.geom.transform_x_kwargs = self.transform_x_kwargs
child.geom.transform_y_kwargs = self.transform_y_kwargs
if not isinstance(child.geom.x_values, list):
raise TypeError("ChildPolygon x_values should be of type list")
if not isinstance(child.geom.y_values, list):
raise TypeError("ChildPolygon y_values should be of type list")
self.children.append(child)
def _match_to_children(self,
new_populations: List[Population]) -> List[Population]:
"""
Given a list of newly create Populations, match the Populations to the gates children and
return list of Populations with correct population names. Populations are matched to children
based on minimising the hausdorff distance between the set of polygon coordinates defining
the gate as it was originally created and the newly generated gate fitted to new data.
Parameters
-----------
new_populations: list
List of newly created Population objects
Returns
-------
List
"""
matched_populations = list()
for child in self.children:
hausdorff_distances = [child.geom.shape.hausdorff_distance(pop.geom.shape)
for pop in new_populations]
matching_population = new_populations[int(np.argmin(hausdorff_distances))]
matching_population.population_name = child.name
matched_populations.append(matching_population)
return matched_populations
def _manual(self) -> ShapelyPoly:
"""
Wrapper for manual polygon gating. Searches method kwargs for x and y coordinates and returns
polygon.
Returns
-------
Shapely.geometry.Polygon
Raises
------
AssertionError
x_values or y_values missing from method kwargs
"""
x_values, y_values = self.method_kwargs.get("x_values", None), self.method_kwargs.get("y_values", None)
assert x_values is not None and y_values is not None, "For manual polygon gate must provide x_values and " \
"y_values"
if self.transform_x:
kwargs = self.transform_x_kwargs or {}
x_values = apply_transform(pd.DataFrame({"x": x_values}),
features="x",
method=self.transform_x, **kwargs).x.values
if self.transform_y:
kwargs = self.transform_y_kwargs or {}
y_values = apply_transform(pd.DataFrame({"y": y_values}),
features="y",
method=self.transform_y, **kwargs).y.values
return create_polygon(x_values, y_values)
def _fit(self,
data: pd.DataFrame) -> List[ShapelyPoly]:
"""
Internal method for fitting gate to the given data and returning geometric polygons for
captured populations.
Parameters
----------
data: Pandas.DataFrame
Returns
-------
List
List of Shapely polygon's
"""
if self.method == "manual":
return [self._manual()]
kwargs = {k: v for k, v in self.method_kwargs.items() if k != "conf"}
self.model = globals()[self.method](**kwargs)
self._xy_in_dataframe(data=data)
if self.sampling.get("method", None) is not None:
data = self._downsample(data=data)
labels = self.model.fit_predict(data[[self.x, self.y]])
hulls = [create_convex_hull(x_values=data.iloc[np.where(labels == i)][self.x].values,
y_values=data.iloc[np.where(labels == i)][self.y].values)
for i in np.unique(labels)]
hulls = [x for x in hulls if len(x[0]) > 0]
return [create_polygon(*x) for x in hulls]
def fit(self,
data: pd.DataFrame,
ctrl_data: None = None) -> None:
"""
Fit the gate using a given dataframe. This will generate new children using the calculated
polygons. If children already exist will raise an AssertionError and notify user to call
`fit_predict`.
Parameters
----------
data: Pandas.DataFrame
Population data to fit gate to
ctrl_data: None
Redundant parameter, necessary for Gate signature. Ignore.
Returns
-------
None
Raises
------
AssertionError
If Children have already been defined i.e. fit has been called previously without calling
'reset_gate'
"""
assert len(self.children) == 0, "Gate is already defined, call 'reset_gate' to clear children"
data = self.transform(data=data)
data = self._dim_reduction(data=data)
polygons = self._fit(data=data)
for name, poly in zip(ascii_uppercase, polygons):
self.add_child(ChildPolygon(name=name,
geom=PolygonGeom(x_values=poly.exterior.xy[0].tolist(),
y_values=poly.exterior.xy[1].tolist())))
def fit_predict(self,
data: pd.DataFrame,
ctrl_data: None = None) -> List[Population]:
"""
Fit the gate using a given dataframe and then associate predicted Population objects to
existing children. If no children exist, an AssertionError will be raised prompting the
user to call 'fit' method.
Parameters
----------
data: Pandas.DataFrame
Population data to fit gate to
ctrl_data: None
Redundant parameter, necessary for Gate signature. Ignore.
Returns
-------
List
List of predicted Population objects, labelled according to the gates child objects
Raises
------
AssertionError
If fit has not been previously called
"""
assert len(self.children) > 0, "No children defined for gate, call 'fit' before calling 'fit_predict'"
data = self.transform(data=data)
data = self._dim_reduction(data=data)
return self._match_to_children(self._generate_populations(data=data.copy(),
polygons=self._fit(data=data)))
def predict(self,
data: pd.DataFrame) -> List[Population]:
"""
Using existing children associated to this gate, the previously calculated polygons of
these children will be applied to the given data and then Population objects created and
labelled to match the children of this gate. NOTE: the data will not be fitted and polygons
applied will be STATIC not data driven. For data driven gates call `fit_predict` method.
Parameters
----------
data: Pandas.DataFrame
Data to apply static polygons to
Returns
-------
List
List of Population objects
Raises
------
AssertionError
If fit has not been previously called
"""
data = self.transform(data=data)
data = self._dim_reduction(data=data)
polygons = [create_polygon(c.geom.x_values, c.geom.y_values) for c in self.children]
populations = self._generate_populations(data=data, polygons=polygons)
for p, name in zip(populations, [c.name for c in self.children]):
p.population_name = name
return populations
class EllipseGate(PolygonGate):
"""
EllipseGate inherits from PolygonGate. A Gate attempts to separate single cell data in one or
two-dimensional space using unsupervised learning algorithms. The algorithm is fitted
to example data to generate "children"; the populations of cells a user expects to
identify. These children are stored and then when the gate is 'fitted' to new data,
the resulting populations are matched to the expected children.
The EllipseGate uses probabilistic mixture models to subset data into "populations". For
each component of the mixture model the covariance matrix is used to generate a confidence
ellipse, surrounding data and emulating a gate. EllipseGate can use any of the methods
from the Scikit-Learn mixture module. Keyword arguments for the initiation of a class
from this module can be given in "method_kwargs".
DOES NOT SUPPORT CONTROL GATING.
Attributes
-----------
gate_name: str (required)
Name of the gate
parent: str (required)
Parent population that this gate is applied to
x: str (required)
Name of the x-axis variable forming the one/two dimensional space this gate
is applied to
y: str (optional)
Name of the y-axis variable forming the two dimensional space this gate
is applied to
transform_x: str, optional
Method used to transform the X-axis dimension, supported methods are: logicle, hyperlog, asinh or log
transform_y: str, optional
Method used to transform the Y-axis dimension, supported methods are: logicle, hyperlog, asinh or log
transform_x_kwargs: dict, optional
Additional keyword arguments passed to Transformer object when transforming the x-axis dimension
transform_y_kwargs: dict, optional
Additional keyword arguments passed to Transformer object when transforming the y-axis dimension
sampling: dict (optional)
Options for downsampling data prior to application of gate. Should contain a
key/value pair for desired method e.g ({"method": "uniform"). Available methods
are: 'uniform', 'density' or 'faithful'. See cytopy.flow.sampling for details. Additional
keyword arguments should be provided in the sampling dictionary.
dim_reduction: dict (optional)
Experimental feature. Allows for dimension reduction to be performed prior to
applying gate. Gate will be applied to the resulting embeddings. Provide a dictionary
with a key "method" and the value as any supported method in cytopy.flow.dim_reduction.
Additional keyword arguments should be provided in this dictionary.
method: str (required)
Name of the underlying algorithm to use. Should have a value of: "manual", or correspond
to the name of an existing class in Scikit-Learn mixture module..
If you have a method that follows the Scikit-Learn template but isn't currently present
in cytopy and you would like it to be, please contribute to the repository on GitHub
or contact <EMAIL>
method_kwargs: dict
Keyword arguments for initiation of the above method.
"""
children = mongoengine.EmbeddedDocumentListField(ChildPolygon)
def __init__(self, *args, **values):
method = values.get("method", None)
method_kwargs = values.get("method_kwargs", {})
assert method_kwargs.get("covariance_type", "full"), "EllipseGate only supports covariance_type of 'full'"
valid = ["manual", "GaussianMixture", "BayesianGaussianMixture"]
assert method in valid, f"Elliptical gating method should be one of {valid}"
self.conf = method_kwargs.get("conf", 0.95)
super().__init__(*args, **values)
def _manual(self) -> ShapelyPoly:
"""
Wrapper for manual elliptical gating. Searches method kwargs for centroid, width, height, and angle,
and returns polygon.
Returns
-------
Shapely.geometry.Polygon
Raises
------
AssertionError
If axis transformations do not match
TypeError
If centroid, width, height, or angle are of invalid type
ValueError
If centroid, width, height, or angle are missing from method kwargs
"""
centroid = self.method_kwargs.get("centroid", None)
width = self.method_kwargs.get("width", None)
height = self.method_kwargs.get("height", None)
angle = self.method_kwargs.get("angle", None)
if self.transform_x:
assert self.transform_x == self.transform_y, "Manual elliptical gate requires that x and y axis are " \
"transformed to the same scale"
kwargs = self.transform_x_kwargs or {}
centroid = apply_transform(pd.DataFrame({"c": list(centroid)}),
features=["c"],
method=self.transform_x,
**kwargs)["c"].values
df = apply_transform(pd.DataFrame({"w": [width], "h": [height], "a": [angle]}),
features=["w", "h", "a"],
method=self.transform_x,
**kwargs)
width, height, angle = df["w"].values[0], df["h"].values[0], df["a"].values[0]
if not all([x is not None for x in [centroid, width, height, angle]]):
raise ValueError("Manual elliptical gate requires the following keyword arguments; "
"width, height, angle and centroid")
if not len(centroid) == 2 and not all(isinstance(x, float) for x in centroid):
raise TypeError("Centroid should be a list of two float values")
if not all(isinstance(x, float) for x in [width, height, angle]):
raise TypeError("Width, height, and angle should be of type float")
return ellipse_to_polygon(centroid=centroid,
width=width,
height=height,
angle=angle)
def _fit(self,
data: pd.DataFrame) -> List[ShapelyPoly]:
"""
Internal method for fitting gate to the given data and returning geometric polygons for
captured populations.
Parameters
----------
data: Pandas.DataFrame
Returns
-------
list
List of Shapely polygon's
"""
params = {k: v for k, v in self.method_kwargs.items() if k != "conf"}
self.model = globals()[self.method](**params)
if not self.method_kwargs.get("probabilistic_ellipse", True):
return super()._fit(data=data)
self._xy_in_dataframe(data=data)
if self.sampling.get("method", None) is not None:
data = self._downsample(data=data)
self.model.fit_predict(data[[self.x, self.y]])
ellipses = [probablistic_ellipse(covar, conf=self.conf)
for covar in self.model.covariances_]
polygons = [ellipse_to_polygon(centroid, *ellipse)
for centroid, ellipse in zip(self.model.means_, ellipses)]
return polygons
class BooleanGate(PolygonGate):
"""
The BooleanGate is a special class of Gate that allows for merging, subtraction, and intersection methods.
A BooleanGate should be defined with one of the following string values as its 'method' and a set of
population names as 'populations' in method_kwargs:
* AND - generates a new population containing only events present in every population of a given
set of populations
* OR - generates a new population that is a merger of all unique events from all populations in a given
set of populations
* NOT - generates a new population that contains all events in some target population that are not
present in some set of other populations (taken as the first member of 'populations')
BooleanGate inherits from the PolygonGate and generates a Population with Polygon geometry. This
allows the user to view the resulting 'gate' as a polygon structure. This means
"""
populations = mongoengine.ListField(required=True)
def __init__(self,
method: str,
populations: list,
*args,
**kwargs):
if method not in ["AND", "OR", "NOT"]:
raise ValueError("method must be one of: 'OR', 'AND' or 'NOT'")
super().__init__(*args, method=method, populations=populations, **kwargs)
def _or(self, data: List[pd.DataFrame]) -> pd.DataFrame:
"""
OR operation, generates index of events that is a merger of all unique events from all populations in a given
set of populations.
Parameters
----------
data: list
List of Pandas DataFrames
Returns
-------
Pandas.DataFrame
New population dataframe
"""
idx = np.unique(np.concatenate([df.index.values for df in data], axis=0), axis=0)
return
|
pd.concat(data)
|
pandas.concat
|
#coding=utf-8
import pandas as pd
import numpy as np
import sys
import os
from sklearn import preprocessing
import datetime
import scipy as sc
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.externals import joblib
#import joblib
class FEbase(object):
"""description of class"""
def __init__(self, **kwargs):
pass
def create(self,*DataSetName):
#print (self.__class__.__name__)
(filepath, tempfilename) = os.path.split(DataSetName[0])
(filename, extension) = os.path.splitext(tempfilename)
#bufferstring='savetest2017.csv'
bufferstringoutput=filepath+'/'+filename+'_'+self.__class__.__name__+extension
if(os.path.exists(bufferstringoutput)==False):
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
df_all=self.core(DataSetName)
df_all.to_csv(bufferstringoutput)
return bufferstringoutput
def core(self,df_all,Data_adj_name=''):
return df_all
def real_FE():
return 0
class FEg30eom0110network(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
intflag=True
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
if(intflag):
df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
if(intflag):
df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
if(intflag):
df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max',True)
df_all,_=FEsingle.HighLowRange(df_all,8,True)
df_all,_=FEsingle.HighLowRange(df_all,25,True)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
if(intflag):
df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
if(intflag):
df_all['pct_chg_abs_rank']=df_all['pct_chg_abs_rank']*10//2
df_all=FEsingle.PctChgAbsSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,3,True)
df_all=FEsingle.PctChgSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,12,True)
df_all=FEsingle.AmountChgRank(df_all,12,True)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
if(intflag):
df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FEg30eom0110onlinew6d(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_all['sm_amount']=df_all.groupby('ts_code')['sm_amount'].shift(1)
df_all['lg_amount']=df_all.groupby('ts_code')['lg_amount'].shift(1)
df_all['net_mf_amount']=df_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
#df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FE_a23(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>15]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29_Volatility(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>15]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a31(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=
|
pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
|
pandas.merge
|
'''
pyjade
A program to export, curate, and transform data from the MySQL database used by the Jane Addams Digital Edition.
'''
import os
import re
import sys
import json
import string
import datetime
import mysql.connector
from diskcache import Cache
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
from tqdm import tqdm
from safeprint import print
'''
Options
'''
try: # Options file setup credit <NAME>
with open(os.path.join('options.json')) as env_file:
ENV = json.loads(env_file.read())
except:
print('"Options.json" not found; please add "options.json" to the current directory.')
'''
SQL Connection
'''
DB = mysql.connector.connect(
host=ENV['SQL']['HOST'],
user=ENV['SQL']['USER'],
passwd=ENV['SQL']['PASSWORD'],
database=ENV['SQL']['DATABASE']
)
CUR = DB.cursor(buffered=True)
'''
Setup
'''
BEGIN = datetime.datetime.now()
TS = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
ITEM_ELEMENTS = ENV['ELEMENT_DICTIONARY']['DCTERMS_IN_USE']
ITEM_ELEMENTS.update(ENV['ELEMENT_DICTIONARY']['DESC_JADE_ELEMENTS'])
TYPES = ENV['ELEMENT_DICTIONARY']['TYPES']
OUT_DIR = 'outputs/'
if not os.path.exists(OUT_DIR):
os.makedirs(OUT_DIR)
DATASET_OPTIONS = ENV['DATASET_OPTIONS']
CRUMBS = DATASET_OPTIONS['EXPORT_SEPARATE_SQL_CRUMBS']
PROP_SET_LIST = DATASET_OPTIONS['PROPERTIES_TO_INCLUDE_FOR_EACH_TYPE']
INCLUDE_PROPS = DATASET_OPTIONS['PROPERTIES_TO_INCLUDE_FOR_EACH_TYPE']
class Dataset():
def __init__(self):
'''
Start building the dataset objects by pulling IDs and types from omek_items
'''
statement = '''
SELECT omek_items.id as item_id, omek_item_types.`name` as 'jade_type', collection_id as 'jade_collection' FROM omek_items
JOIN omek_item_types on omek_items.item_type_id = omek_item_types.id
WHERE public = 1
ORDER BY item_id;
'''
self.omek_items = pd.read_sql(statement,DB)
self.omek_items = self.omek_items.set_index('item_id',drop=False)
self.objects = self.omek_items.copy()
self.objects['item_id'] = self.objects['item_id'].apply(
lambda x: self.convert_to_jade_id(x))
self.objects.rename(columns={'item_id': 'jade_id'},inplace=True)
self.objects = self.objects.set_index('jade_id',drop=False)
self.objects = self.objects[self.objects['jade_type'].isin(
['Text','Event','Person','Organization','Publication']
)]
# Noise is an alternate dataset to record property values that dont fit the regular usage
self.noise = self.objects.copy()
self.noise.drop('jade_type',axis=1)
self.noise.drop('jade_collection',axis=1)
def ingest(self,limit=None):
'''
Get the item element texts
'''
statement = f'''
SELECT et.id AS id, et.record_id AS record_id,
et.element_id AS element_id, et.`text` AS el_text,
items.item_type_id AS item_type
FROM omek_element_texts as et
JOIN omek_items AS items ON et.record_id = items.id
WHERE record_type = "Item"
ORDER BY id;
'''
if limit != None:
statement = statement.split(';')[0] + f' LIMIT {str(limit)};'
self.element_texts = pd.read_sql(statement,DB)
# Load environment variables
ELEMENT_IDS = list(ITEM_ELEMENTS.keys())
# Set data structure:
data = {}
noise = {}
# Iterate through the element_texts
iter = tqdm(self.element_texts.iterrows())
iter.set_description("Ingesting item attributes")
for tup in iter:
row = tup[1]
element_id = str(row.loc['element_id'])
if row.loc['record_id'] in self.omek_items.index.values:
jade_type = self.omek_items.loc[row.loc['record_id'],'jade_type']
jade_id = self.convert_to_jade_id(row.loc['record_id'])
# Filter element texts through environment variables
if element_id in ELEMENT_IDS:
if jade_type in TYPES.values():
element_label = ITEM_ELEMENTS[element_id]
# Filters property values through the sets designated in the options
if element_label in INCLUDE_PROPS[jade_type]:
compile_json(data,jade_id,element_label,row.loc['el_text'])
else:
compile_json(noise,jade_id,element_label,row.loc['el_text'])
# if CRUMBS:
# print('Excluded',element_label,'in type',jade_type)
# Add accumulated data to DataFrame
new_df = pd.DataFrame.from_dict(data,orient='index')
new_noise_df = pd.DataFrame.from_dict(noise,orient='index')
self.objects = pd.concat([self.objects,new_df],axis=1)
self.noise = pd.concat([self.noise,new_noise_df],axis=1)
# Add URLs
base_url = "https://digital.janeaddams.ramapo.edu/items/show/"
self.objects.insert(loc=1,column='jade_url',value=[
base_url+id.split('_')[-1] for id in self.objects.index.values
])
self.add_collections(limit)
self.add_tags(limit)
# Remove records with no title fields found
self.objects = self.objects.dropna(subset=['dcterms_title'])
def convert_to_jade_id(self,item_id):
'''
Prepend the type string to the SQL primary key so that locations and items are unique in the same set of relations
'''
if type(item_id) != type(str):
if item_id in self.omek_items.index.values:
the_type = self.omek_items.at[item_id,"jade_type"]
if the_type in list(TYPES.values()):
return the_type.lower()+"_"+str(item_id)
else:
return "unspecified_"+str(item_id)
else:
return "unpublished_"+str(item_id)
else:
return item_id
def add_tags(self,limit):
'''
Pull tags from the database
'''
statement = f'''
SELECT * FROM omek_records_tags
JOIN omek_tags on omek_records_tags.tag_id = omek_tags.id;
'''
self.tag_df = pd.read_sql(statement,DB)
self.objects = self.objects[:limit].apply(
lambda x : self.add_tag(x),axis=1)
def add_tag(self, row_ser):
'''
Add the tag to the list for each object
'''
new_subj_field = []
id = row_ser.loc['jade_id']
try:
tag_names = self.tag_df.loc[self.tag_df['record_id'] == int(id.split("_")[-1])]
if not tag_names.empty:
for name in tag_names['name'].to_list():
if name not in new_subj_field:
new_subj_field.append(name)
row_ser['dcterms_subject'] = new_subj_field
return row_ser
except:
return row_ser
def add_collections(self,limit):
'''
Pull collections from the database
'''
statement = '''
SELECT omek_collections.id as collection_id, `text` as collection_name FROM omek_collections
JOIN omek_element_texts AS texts ON omek_collections.id = texts.record_id
WHERE record_type = "Collection"
AND element_id = 50
AND public = 1;
'''
self.collection_df = pd.read_sql(statement,DB)
self.collection_df = self.collection_df.set_index('collection_id')
self.objects = self.objects[:limit].apply(
lambda x : self.add_collection(x),
axis=1
)
def add_collection(self,row_ser):
'''
Add the collection to the list for each object
'''
new_collection_field = []
ids = row_ser.loc['jade_collection']
if not isinstance(ids, list):
ids = [ids]
try:
for coll_id in ids:
matches = self.collection_df.at[coll_id,'collection_name']
if isinstance(matches,np.ndarray):
match_list = matches.tolist()
elif isinstance(matches,str):
match_list = [matches]
else:
print("Unrecognized type of collection",type(matches))
for name in match_list:
if name not in new_collection_field:
new_collection_field.append(name)
row_ser['jade_collection'] = new_collection_field
return row_ser
except:
return row_ser
def add_relations(self,limit=None):
'''
Ingest relation data from SQL
'''
# Read from SQL tables omek_item_relations_relations and omek_item_relations_properties
statement = f'''
SELECT relations.id as id, relations.subject_item_id AS subjId, properties.id as relId, properties.label AS relLabel, relations.object_item_id AS objId
FROM omek_item_relations_relations AS relations
JOIN omek_item_relations_properties AS properties ON relations.property_id = properties.id;
'''
if limit != None:
statement = statement.split(';')[0] + f' LIMIT {str(limit)};'
self.relations = pd.read_sql(statement,DB,index_col='id')
# Style relation labels with camel case
self.relations['relLabel'] = self.relations['relLabel'].apply(
lambda x: camel(x))
# Set up data structure
data = {}
noise = {}
# Add the type prefix to the subject and object IDs
self.relations['subjId'] = self.relations['subjId'].apply(
lambda x: self.convert_to_jade_id(x))
self.relations['objId'] = self.relations['objId'].apply(
lambda x: self.convert_to_jade_id(x))
# Iterate through the relation set
iter = tqdm(self.relations.iterrows())
iter.set_description("Adding relations")
for tup in iter:
row = tup[1]
subjId = row['subjId']
relLabel = row['relLabel']
objId = row['objId']
if (
subjId in self.objects.index.values
) and (
objId in self.objects.index.values
):
# print(subjId,objId)
compile_json(data,subjId,relLabel,objId)
else:
compile_json(noise,subjId,relLabel,objId)
# Add locations to the relations
# This is a thorny call bramble that should probably be untangled in a future iteration of the script
locSet = LocationSet()
locSet.ingest(self,limit=limit)
data, noise = self.add_locations(locSet,data,noise)
# Add the compiled relation data into the main DataFrame and the noise bin
new_df = pd.DataFrame(data={"jade_relation":list(data.values())},index=list(data.keys()))
self.objects = pd.concat([self.objects,new_df],sort=False,axis=1)
new_noise_df = pd.DataFrame(data={"jade_relation":list(noise.values())},index=list(noise.keys()))
self.noise = pd.concat([self.noise,new_noise_df],sort=False,axis=1)
def add_locations(self,locSet,data,noise):
'''
Add locations from class object already constructed
'''
# Add the type prefix to the location and item IDs
locSet.locations['loc_id'] = locSet.locations['loc_id'].astype(str)
locSet.locations['loc_id'] = locSet.locations['loc_id'].apply(
lambda x : "location_" + str(x))
locSet.locations.rename(columns={'loc_id': 'jade_id'},inplace=True)
# Merge locations table into objects table
self.objects = pd.concat([self.objects,locSet.locations],axis=0)
self.objects = self.objects.set_index('jade_id',drop=False)
self.objects.index.name = None
dataset_ids = self.objects.index.values
self.location_duplicates = locSet.location_duplicates
# Iterate through the location set
iter = tqdm(locSet.locations.iterrows())
iter.set_description("Adding locations")
for tup in iter:
row = tup[1]
# Iterate through the collection of items for each location
for rel in list(row.loc['loc_relation'].items()):
loc_id = row.loc['jade_id']
desc_list = rel[1]
item_id = rel[0]
for desc in desc_list:
# Build up the data structure for the later DataFrame
if item_id in dataset_ids:
compile_json(data,item_id,desc,loc_id)
else:
compile_json(noise,item_id,desc,loc_id)
# Remove relations from locations table as they are now represented in item rows
self.objects = self.objects.drop("loc_relation",axis=1)
# Add location types
self.objects = self.objects.apply(
lambda ser : self.add_location_types(ser),
axis=1
)
self.noise = self.noise.apply(
lambda ser : self.add_location_types(ser),
axis=1
)
self.objects = self.objects.dropna(subset=['jade_id'])
return data, noise
def add_location_types(self,row):
'''
Look for null type values and adds location if location in jade_id prefix
'''
try:
if pd.isnull(row.loc['jade_type']):
if type(row.loc['jade_id']) == type(""):
if row.loc['jade_id'].split("_")[0] == "location":
row.loc['jade_type'] = "Location"
else:
print("Type null but not location:",row)
else:
print('Dropped type not included:',row['jade_url'])
return row
except:
print("Unknown problem during adding location type for:",row)
def quantify(self):
'''
Run counting functions on properties and relations to create descriptive statistics about the data
'''
self.quant = {}
# Items
self.quant["item_property_count"] = self.objects.count()
# Item properties
self.quantify_properties()
# Item properties by type
self.quantify_properties_by_type()
# Relations (including location relations)
self.quantify_relations()
# Data nesting
self.quant['nesting'] = {}
self.check_nesting(self.objects)
def quantify_properties(self):
'''
Run counts of properties
'''
# Iterate through properties identified for faceting
props = list(DATASET_OPTIONS['SUBSET_PROPERTIES_AND_QUANTITIES'].items())
iter = tqdm(props)
iter.set_description("Quantifying subsets by facet")
for prop, lim in iter:
if prop in self.objects.columns.values:
# Special cases
if prop in ['dcterms_date']:
# Date
dc_dates_ser = self.objects[prop]
dc_dates_ser = dc_dates_ser.apply(unwrap_list)
dc_dates_ser = dc_dates_ser.dropna()
for id in dc_dates_ser.index.values:
try:
date_val = dc_dates_ser[id]
if not isinstance(date_val, list):
date_list = [date_val]
else:
date_list = date_val
for date_string in date_list:
if not isinstance(date_string, str):
date_string = str(date_string)
yearlike = date_string.split('-')[0]
if (
len(yearlike) == 4
) and (
int(yearlike[0]) == 1
) and (
yearlike[3] in '0123456789'
):
year = yearlike
dc_dates_ser[id] = str(year)
else:
dc_dates_ser.drop(id)
print('Dropped unrecognized date value:',id,dc_dates_ser[id])
except:
dc_dates_ser.drop(id)
print('Dropped unrecognized date value:',id,dc_dates_ser[id])
if len(dc_dates_ser) > 1:
self.add_to_quant(
dc_dates_ser,
sort_on_property_name=False)
# All others / standard structure
else:
ser = self.objects[prop]
ser = ser.dropna()
if len(ser) > 1:
self.add_to_quant(ser)
def add_to_quant(
self,
series, # A named Series object whose index is the item or location IDs
# and whose values are non-empty strings or lists of strings
sort_on_property_name = False # Default False sorts by largest count. Optional True sorts alphabetically by property name
):
'''
Index the DataFrame's IDs by value of passed property (column name)
'''
property = series.name
# Create an index of jade_ids by property value for the series (column) passed
for id in series.index.values:
cell = series[id]
if isinstance(cell, np.ndarray):
cell = cell.tolist()
if not isinstance(cell, list):
cell = [cell]
for val in cell:
compile_json(
self.quant,
property,
val.strip() if isinstance(val, str) else val,
id)
# Create a dictionary of property values and instance counts
for val in list(self.quant[property].keys()):
compile_json(self.quant,
property+"_count",
val,
len(self.quant[property][val]))
# Sort the dictionary and add it to the dataset object
if not sort_on_property_name:
self.quant[property+"_count"] = dict(
sort_by_item_counts(self.quant[property+"_count"]))
self.quant[property+"_count"] = pd.Series(
self.quant[property+"_count"],
index=list(self.quant[property+"_count"].keys()),
name=property+"_count")
if sort_on_property_name:
self.quant[property+"_count"] = self.quant[property+"_count"].sort_index()
# Go ahead and unwrap the single-integer lists created by compile_json
self.quant[property+"_count"] = self.quant[property+"_count"].apply(unwrap_list)
def quantify_properties_by_type(self):
'''
Create a table of property counts by object type
'''
# Get a copy of the main DataFrame and send each row through the counter
self.quant['prop_table'] = {}
df = self.objects.copy()
df = df.apply(
lambda ser : self.compile_types_by_prop(ser),
axis=1
)
# Make the resulting dict a DataFrame, sort it, and abbreviate column headers
self.quant['prop_table'] = pd.DataFrame.from_dict(
self.quant['prop_table'],
orient='index')
self.quant['prop_table'] = self.quant['prop_table'][[
'Person',
'Text',
'Event',
'Organization',
'Publication',
'Location',
'All Types'
]]
self.quant['prop_table'] = self.quant['prop_table'].sort_index()
self.quant['prop_table'].rename(columns={'Organization':'Org.', 'Publication':'Pub.', 'Location':'Loc.'},inplace=True)
def compile_types_by_prop(self,ser):
'''
Count the properties in the passed series by object type
'''
jade_type = ser.loc['jade_type']
jade_type = unwrap_list(jade_type)
if jade_type in list(INCLUDE_PROPS.keys()):
for prop in ser.index.values:
if prop in INCLUDE_PROPS[jade_type]:
cell = ser.loc[prop]
if not isinstance(cell, list):
cell = [cell]
if not pd.isnull(cell).any():
if prop not in self.quant['prop_table']:
self.quant['prop_table'][prop] = {}
if "All Properties" not in self.quant['prop_table']:
self.quant['prop_table']['All Properties'] = {}
if jade_type not in self.quant['prop_table'][prop]:
self.quant['prop_table'][prop][jade_type] = 1
else:
self.quant['prop_table'][prop][jade_type] += 1
if "All Types" not in self.quant['prop_table'][prop]:
self.quant['prop_table'][prop]["All Types"] = 1
else:
self.quant['prop_table'][prop]["All Types"] += 1
if jade_type not in self.quant['prop_table']['All Properties']:
self.quant['prop_table']['All Properties'][jade_type] = 1
else:
self.quant['prop_table']['All Properties'][jade_type] += 1
return ser
def quantify_relations(self):
'''
Make a list of unique relation triples and a table of the most common subject–object pairs
'''
# Iterate through relations in the Dataset
uniq_rels = {}
count_df_index = []
count_df_columns = []
iter = tqdm(self.objects.index.values)
iter.set_description("Counting unique relations")
for subjId in iter:
row = self.objects.loc[subjId]
row_rels_dict = row.loc['jade_relation']
if not pd.isnull(row_rels_dict):
for relLabel, objIdList in row_rels_dict.items():
for objId in objIdList:
# Find the types of each subject and object
subjType = subjId.split('_')[0].capitalize()
objType = objId.split('_')[0].capitalize()
# Count the unique combinations of subject, relation, and object
rel = " ".join([subjType,relLabel,objType])
if rel not in uniq_rels:
uniq_rels[rel] = 1
else:
uniq_rels[rel] += 1
# Make the dimensions for a dataframe
if subjType not in count_df_index:
count_df_index.append(subjType)
if objType not in count_df_columns:
count_df_columns.append(objType)
# Sort and output simple list
self.quant["unique_relation_list"] = pd.DataFrame.from_dict(
dict(sort_by_item_counts(uniq_rels)),orient='index')
# Make the dataframe
count_df = pd.DataFrame(data=0,index=count_df_index,columns=count_df_columns)
for rel in list(uniq_rels.keys()):
count = uniq_rels[rel]
try:
subjType, relLabel, objType = rel.split(' ')
count_df.at[subjType,objType] += count
except:
print("Error counting relation:",rel)
self.quant["unique_relation_table"] = count_df
def check_nesting(self,df):
'''
Check whether each column in the passed df has repeating values in any of the rows
'''
for prop in df.columns.values:
column_ser = df[prop]
column_ser = column_ser.dropna()
self.is_nested(column_ser)
def is_nested(self,ser):
'''
Is the passed row repeating/nested?
'''
nested = False
for id, val in ser.iteritems():
if (
type(val) == type([])
) or (
type(val) == type({})
):
if len(val) > 1:
nested = True
self.quant['nesting'][ser.name] = nested
def unwrap_nonrepeating_columns(self):
'''
If a column hasn't been marked as nested, take its values out of the list wrappers
'''
for prop in self.objects.columns.values:
if not self.quant['nesting'][prop]:
self.objects[prop] = self.objects[prop].apply(unwrap_list)
def segment_by_type(self,df):
'''
Break up the passed dataframe by object type and return up to six separate frames that only have the properties belonging to their types
'''
type_segments = {}
for type_name in list(PROP_SET_LIST.keys()):
prospective_props = PROP_SET_LIST[type_name]
props_for_this_type = []
for prop in prospective_props:
if prop in df.columns.values:
props_for_this_type.append(prop)
segment_df = df[props_for_this_type]
segment_df = segment_df.loc[lambda text_df: text_df['jade_type'] == type_name, :]
type_segments[type_name] = segment_df
return type_segments
def export_stats(self):
'''
Export results from quantify to an XLSX file
'''
filepath = f'{OUT_DIR}{TS}-batch/'
if not os.path.exists(filepath):
os.makedirs(filepath)
with open(
filepath+"jade_data_stats.md",
'w',
encoding='utf-8'
) as md_writer:
with pd.ExcelWriter(
filepath+"jade_data_stats.xlsx",
encoding='utf-8'
) as excel_writer:
for k in list(self.quant.keys()):
if k.split("_")[-1] in ["count", "list", "table"]:
md_writer.write(f"\n\n## {k}\n"+self.quant[k].to_markdown())
if isinstance(self.quant[k], pd.Series):
df = self.quant[k].apply(lambda x : colons_and_semicolons(x))
df = df.apply(lambda x: zap_illegal_characters(x))
else:
df = self.quant[k].applymap(lambda x : colons_and_semicolons(x))
df = df.applymap(lambda x: zap_illegal_characters(x))
df.to_excel(excel_writer,sheet_name=k)
def export_single_sheet(self):
'''
Export one big sheet that has all the objects and all the properties and relations (contains a lot of blank cells)
'''
filepath = f'{OUT_DIR}{TS}-batch/'
if not os.path.exists(filepath):
os.makedirs(filepath)
with pd.ExcelWriter(
filepath+"jade_data_single_sheet.xlsx",
encoding='utf-8'
) as excel_writer:
df = self.objects.applymap(lambda x : colons_and_semicolons(x))
df = df.applymap(lambda x: zap_illegal_characters(x))
df.to_excel(excel_writer,index=False,sheet_name='jade_data')
def export_complete_dataset(self):
'''
Export a complete, curated dataset, segmented by object type in the XLSX and CSV formats
'''
self.type_segments = self.segment_by_type(self.objects)
filepath = f'{OUT_DIR}{TS}-batch/complete_data/'
self.run_outputs(self.type_segments,filepath)
# filepath = f'{OUT_DIR}{TS}-batch/complete_data/Locations'
# self.run_outputs(self.locations,filepath)
def export_subsets(self):
'''
Manage creation of subsets by property value, using quant information
'''
props = list(DATASET_OPTIONS['SUBSET_PROPERTIES_AND_QUANTITIES'].items())
iter = tqdm(props)
iter.set_description("Exporting subsets by facet")
for prop, lim in iter:
if prop in self.quant:
self.create_subset(
prop,
self.quant[prop],
self.quant[prop+'_count'],
lim)
def create_subset(self,prop,attr_dict,ranked_attr_counts,lim):
'''
Create a subset for the passed property, using indexes in quant
'''
ranked_attr_list = list(ranked_attr_counts.keys())
for val in ranked_attr_list[:lim]:
filtered_jade_ids = attr_dict[val]
count = str(ranked_attr_counts[val])
# Items
df = self.objects[self.objects.index.isin(filtered_jade_ids)]
segmented_subset_dfs = self.segment_by_type(df)
safe_val_string = safen_string(val)
filepath = f'{OUT_DIR}{TS}-batch/filtered_subsets/{prop}/{safe_val_string} {count}/'
self.run_outputs(segmented_subset_dfs,filepath,filename=f'{prop} {safe_val_string} {count}')
def export_crumbs(self):
'''
Export a spreadsheet with noise from the RDBMS that did not conform to regular property usage. Does not yet contain relation noise. May have a bug with location noise, including too many locations. Also has a bug with respect to jade_id and jade_collection, leaving all of the regular values for those properties in.
'''
filepath = f'{OUT_DIR}{TS}-batch/'
if not os.path.exists(filepath):
os.makedirs(filepath)
with pd.ExcelWriter(
filepath+"sql_crumbs.xlsx",
encoding='utf-8'
) as excel_writer:
df = self.noise.applymap(lambda x : colons_and_semicolons(x))
df = df.applymap(lambda x: zap_illegal_characters(x))
df.to_excel(excel_writer,index=False,sheet_name='item_noise')
df = self.location_duplicates.applymap(lambda x : colons_and_semicolons(x))
df = df.applymap(lambda x: zap_illegal_characters(x))
df.to_excel(excel_writer,index=False,sheet_name='location_noise')
def run_outputs(self,type_segment_dfs,filepath,filename='default'):
'''
Manages the outputs specified for the dfs passed
'''
if not os.path.exists(filepath):
os.makedirs(filepath)
tsdfs = type_segment_dfs
if DATASET_OPTIONS['EXPORT_XLSX']:
self.save_xlsx(tsdfs,filepath,filename)
if DATASET_OPTIONS['EXPORT_CSV']:
self.save_csv(tsdfs,filepath,filename)
if DATASET_OPTIONS['EXPORT_JSON']:
self.save_json(tsdfs,filepath,filename)
text_df = tsdfs['Text']
if (
DATASET_OPTIONS['EXPORT_TXT']
) or (
DATASET_OPTIONS['EXPORT_HTML']
):
if len(text_df) > 0:
self.save_txt_and_html(text_df,filepath,filename)
def save_xlsx(self,tsdfs,filepath,filename):
'''
Run an XLSX export, putting multiple tables in a single workbook
'''
with pd.ExcelWriter(
f"{filepath}{'jade_data' if filename == 'default' else filename}.xlsx",
encoding='utf-8'
) as excel_writer:
for name, df in list(tsdfs.items()):
df = df.applymap(lambda x : colons_and_semicolons(x))
df = df.applymap(lambda x: zap_illegal_characters(x))
if len(df) > 0:
df.to_excel(excel_writer,index=False,sheet_name=name)
def save_csv(self,tsdfs,filepath,filename):
'''
Run a CSV export, using a subdirectory for multiples
'''
filepath+=f"{'jade_data' if filename == 'default' else filename}_csv"
if not os.path.exists(filepath):
os.makedirs(filepath)
for name, df in list(tsdfs.items()):
if len(df) > 0:
df.to_csv(f'{filepath}/jade_{name}.csv',index=False)
def save_json(self,tsdfs,filepath,filename):
'''
Run a JSON export, putting all the objects at the same level (no type segments) or wrapping them, depending on options
'''
json_output = {}
if DATASET_OPTIONS['WRAP_JSON_RECORDS_IN_TYPE_BRANCHES']:
for name, df in list(tsdfs.items()):
json_output[name] = json.loads(df.to_json(orient='index'))
if not DATASET_OPTIONS['WRAP_JSON_RECORDS_IN_TYPE_BRANCHES']:
for name, df in list(tsdfs.items()):
json_output.update(json.loads(df.to_json(orient='index')))
with open(filepath+f"{'jade_data' if filename == 'default' else filename}.json",'w') as fileref:
fileref.write(json.dumps(json_output))
def save_txt_and_html(self,df,filepath,filename):
'''
Run export of texts, using subdirectories by format
'''
if DATASET_OPTIONS['EXPORT_TXT']:
txt_filepath = filepath+f"{'jade_texts' if filename == 'default' else filename}_txt/"
if not os.path.exists(txt_filepath):
os.makedirs(txt_filepath)
if DATASET_OPTIONS['EXPORT_HTML']:
html_filepath = filepath+f"{'jade_texts' if filename == 'default' else filename}_html/"
if not os.path.exists(html_filepath):
os.makedirs(html_filepath)
# Iterate through the text column
text_ser = df["jade_text"]
text_ser = text_ser.dropna()
text_ser = text_ser.apply(unwrap_list)
for jade_id, val in text_ser.iteritems():
# Manage whether values are wrapped in lists
if not isinstance(val, list):
val_list = [val]
for val in val_list:
if not pd.isnull(val):
# Check whether value is html
is_html = False
if "<" in val:
if ">" in val:
is_html = True
# Run HTML and TXT exports
if is_html:
soup = BeautifulSoup(val,'html.parser')
if DATASET_OPTIONS['EXPORT_HTML']:
with open(html_filepath+jade_id+'.html','w',encoding='utf-8') as html_ref:
html_ref.write(soup.prettify())
if DATASET_OPTIONS['EXPORT_TXT']:
with open(txt_filepath+jade_id+'.txt','w',encoding='utf-8') as txt_ref:
txt_ref.write(text_with_newlines(soup))
else:
if DATASET_OPTIONS['EXPORT_TXT']:
with open(txt_filepath+jade_id+'.txt','w',encoding='utf-8') as txt_ref:
txt_ref.write(val)
class LocationSet():
'''
A class to hold locations in the few seconds before they get subsumed into the dataset object
'''
# A dummy init function
def __init__(self):
pass
# Ingest location data from SQL
def ingest(self,dataset,limit=None):
# Read from SQL table omek_locations
statement = f'''
SELECT * FROM omek_locations;
'''
if limit != None:
statement = statement.split(';')[0] + f' LIMIT {str(limit)};'
self.omek_locations = pd.read_sql(statement,DB)
# Set up data structure for later DataFrame
data = {}
noise = {}
ids = []
retrieved = []
# Convert item IDs
self.omek_locations['item_id'] = self.omek_locations['item_id'].apply(
lambda x: dataset.convert_to_jade_id(x))
# Read data retrieved from SQL
iter = tqdm(self.omek_locations.iterrows())
iter.set_description("Ingesting locations")
for tup in iter:
row = tup[1]
loc_id = row.loc['id']
if (
loc_id not in retrieved
) and (
row.loc['item_id'] in dataset.objects.index.values
):
cluster_address_versions = {}
# Check for duplicates
addr_fp = fingerprint(row.loc["address"])
cluster_statement = f'''
SELECT * FROM omek_locations
WHERE latitude = {row.loc['latitude']}
AND longitude = {row.loc['longitude']};
'''
cluster = pd.read_sql(cluster_statement,DB)
# Combine duplicates
for cluster_tup in cluster.iterrows():
cluster_row = cluster_tup[1]
if fingerprint(cluster_row.loc['address']) == addr_fp:
# Keep track of addresses to choose most common style below
if cluster_row.loc["address"] not in cluster_address_versions:
cluster_address_versions[cluster_row.loc["address"]] = 1
else:
cluster_address_versions[cluster_row.loc["address"]] += 1
# Group item-location relations, styling descriptions with camel case and defining blanks
cluster_loc_id = cluster_row.loc['id']
cluster_item_id = cluster_row.loc['item_id']
if (cluster_row.loc['description'] == '' or None):
cluster_desc = 'noDescription'
else:
cluster_desc = camel(cluster_row.loc['description'])
# Put approved forms in the curated data
compile_json(
data,
loc_id,
"loc_relation",
dataset.convert_to_jade_id(cluster_item_id),
cluster_desc)
# Keep track of which rows have been combined
compile_json(
noise,
loc_id,
"set_of_dup_loc_ids_with_assoc_item_ids",
cluster_loc_id,
cluster_item_id)
retrieved.append(cluster_loc_id)
# Update address for row to most commonly used capitalization and punctuation
chosen_style = sort_by_item_counts(cluster_address_versions)[0][0]
data[loc_id]['jade_address'] = chosen_style
noise[loc_id]['jade_address'] = chosen_style
# Add in other properties
data[loc_id]['loc_id'] = loc_id
# data[loc_id]['jade_zoom_level'] = row.loc['zoom_level']
# data[loc_id]['jade_map_type'] = row.loc['map_type']
data[loc_id]['jade_latitude'] = row.loc['latitude']
data[loc_id]['jade_longitude'] = row.loc['longitude']
# Create DataFrame
self.locations = pd.DataFrame.from_dict(data,orient='index')
self.location_duplicates =
|
pd.DataFrame.from_dict(noise,orient='index')
|
pandas.DataFrame.from_dict
|
import os
import sys
sys.path = [os.path.join(os.path.abspath(os.getcwd()), 'auto_ml')] + sys.path
os.environ['is_test_suite'] = 'True'
import numpy as np
import pandas as pd
from sklearn.datasets import load_boston
from sklearn.metrics import brier_score_loss, mean_squared_error
from sklearn.model_selection import train_test_split
from auto_ml import Predictor
def get_boston_regression_dataset():
boston = load_boston()
df_boston =
|
pd.DataFrame(boston.data)
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
# In[2]:
def add_taxi_Ndays_rolling(df, days, shift):
"""
This function calculates and adds additional columns for rolling average taxi_in/taxi_out time per airport/carrier per day.
Args:
df - df to process as DataFrame
days - Days to calculate rolling number for taxi_in, taxi_out time
shift - Days to offset calculation into the past.
Output:
processed DataFrame is returned back
"""
cols={'origin':['origin_airport_id', 'taxi_out'],
'destination':['dest_airport_id','taxi_in'],
'carrier_taxi_out':['mkt_carrier_fl_num', 'taxi_out'],
'carrier_taxi_in':['mkt_carrier_fl_num', 'taxi_in']}
df = df.sort_values(['fl_date']) #Sorting by fl_date just in case it was not sorted before.
#It is important for rolling average
#Iterating the keys in cols which has columns we interested in.
for key in cols.keys():
#First we calculate average taxi time per airport per day
df_taxi=df[[cols[key][0], 'fl_date', cols[key][1]]].groupby([cols[key][0], 'fl_date']).mean().reset_index()
#Based on our average taxi time we can calculate rolling average
df_taxi_roll=df_taxi.groupby([cols[key][0]]).rolling(days, on='fl_date', min_periods=2
).agg({cols[key][1]:'mean'}).shift(shift).reset_index()
#Renaming column to avoid collision during merging
df_taxi_roll.rename(columns={cols[key][1]: str(days) +'d ' + cols[key][1] + ' by ' + cols[key][0]}, inplace=True)
#Merging with initial DataFrame
df=df.merge(df_taxi_roll, on=[cols[key][0], 'fl_date' ] , how='left')
return df
# In[3]:
def add_traffic_rolling(df, days, shift):
"""
This function calculates and adds additional column for rolling average number of flights per airport per day.
Args:
df - DataFrame to process.
days - Days as integer to calculate rolling average
shift - Days to offset calculation into the past.
Output:
dataframe - initial dataframe with additional column
"""
cols = ['origin_airport_id', 'dest_airport_id']
df = df.sort_values(['fl_date']) #Sorting by fl_date just in case it was not sorted before.
#It is important for rolling average
for item in cols:
#Now calculating trafic per airport per day. Also will calculate N - days rolling average.
count_flight=df[[item, 'fl_date', 'mkt_carrier']].groupby([item, 'fl_date'
]).count().reset_index()
count_flights_roll= count_flight.groupby([item]).rolling(days, on='fl_date', min_periods=2
).agg({'mkt_carrier':'mean'}).shift(shift).reset_index()
#Renaming to avoid collision during merging
count_flights_roll.rename(columns={'mkt_carrier': str(days) + 'd roll flts ' + item}, inplace=True)
#Merging
df=df.merge(count_flights_roll, on=[item, 'fl_date' ] , how='left')
return df
# In[4]:
def return_outlier_limits(df,column):
"""
Function calculates Interquartile Range (IQR) in order to return upper and lower limits after which to consider a value an outlier.
A limit is defined as 1.5 times the IQR below Quartile 1 (Q1) or above Quartile 3 (Q3).
Args:
df - Pandas DataFrame
column - Column of DataFrame with the aforementioned outliers, input as a string.
Output:
List with lower and upper outlier limits.
"""
# The .describe() method for Pandas DataFrames outputs a Pandas Series; index number 4 corresponds to
# Quartile 1, index number 6 to Quartile 3. The Inter-Quartile Range (IQR) is then calculated as Q3 - Q1.
Q1 = df[column].describe()[4]
Q3 = df[column].describe()[6]
IQR = float(Q3 - Q1)
# An outlier threshold is calculated as 1.5 times the IQR.
outlier_threshold = 1.5 * IQR
lower_limit = Q1 - outlier_threshold
upper_limit = Q3 + outlier_threshold
limits = [lower_limit, upper_limit]
return limits
# In[5]:
def remove_outliers(df, column):
"""
Function removes rows with outliers from a dataframe, as defined by the return_outlier_limits function.
Args:
df - Pandas DataFrame
column - Column of DataFrame with the aforementioned outliers, input as a string.
Output:
Processed DataFrame is returned (subset of original).
"""
# Call return_outlier_limits function to return list `limit` with two values, lower and upper: limit[0] corresponds to the lower limit,
# limit[1] to the upper limit.
limits = return_outlier_limits(df,column)
# Use boolean operators to define subset of column values that exclude outliers
df_no_outliers = df[(df[column] > limits[0]) & (df[column] < limits[1])]
return df_no_outliers
# In[6]:
def replace_nan_with_mean(df,column,include_outliers=False):
"""
This function replaces all NaN values for a given column in a dataframe with the mean of the column values.
Args:
df - Pandas DataFrame
column - Column of DataFrame, input as a string.
include_outliers - If True, calculates mean of all values,
if False, does not consider outliers when calculating mean. Defaults to False.
Output:
Processed DataFrame is returned.
"""
if include_outliers == False:
df_no_outliers = remove_outliers(df,column)
mean = df_no_outliers[column].mean()
else:
mean = df[column].mean()
# Replace NaN values with previously calculated mean, using .fillna() Pandas method.
df[column].fillna(mean,inplace=True)
# Return processed DataFrame
return df
# In[7]:
def make_dates_ordinal(df, dates_column):
"""
This function converts the dates of a DataFrame column to integers, in order to easily fit the data to a regression model.
More specifically, the function toordinal() returns the proleptic Gregorian ordinal of a date.
In simple terms datetime.toordinal() returns the day count from the date 01/01/01
Though Gregorian calendar was not followed before October 1582, several computer
systems follow the Gregorian calendar for the dates that comes even before October 1582.
Python's date class also does the same.
Args:
df - Pandas DataFrame
dates_column - column of DataFrame, input as a string. All values in column must be
of type datetime64[ns].
Output:
Processed DataFrame is returned.
"""
# The function imports the required datetime module.
import datetime as dt
# Applies datetime.toordinal() function to desired column of DataFrame.
df[dates_column] = df[dates_column].map(dt.datetime.toordinal)
# Returns processed DataFrame
return df
# In[8]:
def distill_features(df, desired_features = ['fl_date','mkt_carrier_fl_num','origin_airport_id','dest_airport_id','crs_dep_time',
'crs_arr_time','crs_elapsed_time','distance','arr_delay']):
df = df[desired_features]
return df
# In[9]:
def make_month_dummies(df, date_column):
"""
This function adds dummy variable columns for months.
Args:
df - Dataframe which needed to be processed.
date_column as string. Column with dates
Output:
Dataframe with dummy varialbles.
"""
df['month']=df[date_column].dt.month
df =
|
pd.get_dummies(df, columns=['month'])
|
pandas.get_dummies
|
import pandas as pd
import numpy as np
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from scipy import stats
import plotly.graph_objs as go
import cufflinks
cufflinks.go_offline()
def make_hist(df, x, category=None):
"""
Make an interactive histogram, optionally segmented by `category`
:param df: dataframe of data
:param x: string of column to use for plotting
:param category: string representing column to segment by
:return figure: a plotly histogram to show with iplot or plot
"""
if category is not None:
data = []
for name, group in df.groupby(category):
data.append(go.Histogram(dict(x=group[x], name=name)))
else:
data = [go.Histogram(dict(x=df[x]))]
layout = go.Layout(
yaxis=dict(title="Count"),
xaxis=dict(title=x.replace("_", " ").title()),
title=f"{x.replace('_', ' ').title()} Distribution by {category.replace('_', ' ').title()}"
if category
else f"{x.replace('_', ' ').title()} Distribution",
)
figure = go.Figure(data=data, layout=layout)
return figure
def make_cum_plot(df, y, category=None, ranges=False):
"""
Make an interactive cumulative plot, optionally segmented by `category`
:param df: dataframe of data, must have a `published_date` column
:param y: string of column to use for plotting or list of two strings for double y axis
:param category: string representing column to segment by
:param ranges: boolean for whether to add range slider and range selector
:return figure: a plotly plot to show with iplot or plot
"""
if category is not None:
data = []
for i, (name, group) in enumerate(df.groupby(category)):
group.sort_values("published_date", inplace=True)
data.append(
go.Scatter(
x=group["published_date"],
y=group[y].cumsum(),
mode="lines+markers",
text=group["title"],
name=name,
marker=dict(size=10, opacity=0.8, symbol=i + 2),
)
)
else:
df.sort_values("published_date", inplace=True)
if len(y) == 2:
data = [
go.Scatter(
x=df["published_date"],
y=df[y[0]].cumsum(),
name=y[0].title(),
mode="lines+markers",
text=df["title"],
marker=dict(
size=10,
color="blue",
opacity=0.6,
line=dict(color="black"),
),
),
go.Scatter(
x=df["published_date"],
y=df[y[1]].cumsum(),
yaxis="y2",
name=y[1].title(),
mode="lines+markers",
text=df["title"],
marker=dict(
size=10,
color="red",
opacity=0.6,
line=dict(color="black"),
),
),
]
else:
data = [
go.Scatter(
x=df["published_date"],
y=df[y].cumsum(),
mode="lines+markers",
text=df["title"],
marker=dict(
size=12,
color="blue",
opacity=0.6,
line=dict(color="black"),
),
)
]
if len(y) == 2:
layout = go.Layout(
xaxis=dict(title="Published Date", type="date"),
yaxis=dict(title=y[0].replace("_", " ").title(), color="blue"),
yaxis2=dict(
title=y[1].replace("_", " ").title(),
color="red",
overlaying="y",
side="right",
),
font=dict(size=14),
title=f"Cumulative {y[0].title()} and {y[1].title()}",
)
else:
layout = go.Layout(
xaxis=dict(title="Published Date", type="date"),
yaxis=dict(title=y.replace("_", " ").title()),
font=dict(size=14),
title=f"Cumulative {y.replace('_', ' ').title()} by {category.replace('_', ' ').title()}"
if category is not None
else f"Cumulative {y.replace('_', ' ').title()}",
)
# Add a rangeselector and rangeslider for a data xaxis
if ranges:
rangeselector = dict(
buttons=list(
[
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=6, label="6m", step="month", stepmode="backward"),
dict(count=1, label="1y", step="year", stepmode="backward"),
dict(step="all"),
]
)
)
rangeslider = dict(visible=True)
layout["xaxis"]["rangeselector"] = rangeselector
layout["xaxis"]["rangeslider"] = rangeslider
layout["width"] = 1000
layout["height"] = 600
figure = go.Figure(data=data, layout=layout)
return figure
def make_scatter_plot(
df,
x,
y,
fits=None,
xlog=False,
ylog=False,
category=None,
scale=None,
sizeref=2,
annotations=None,
ranges=False,
title_override=None,
):
"""
Make an interactive scatterplot, optionally segmented by `category`
:param df: dataframe of data
:param x: string of column to use for xaxis
:param y: string of column to use for yaxis
:param fits: list of strings of fits
:param xlog: boolean for making a log xaxis
:param ylog boolean for making a log yaxis
:param category: string representing categorical column to segment by, this must be a categorical
:param scale: string representing numerical column to size and color markers by, this must be numerical data
:param sizeref: float or integer for setting the size of markers according to the scale, only used if scale is set
:param annotations: text to display on the plot (dictionary)
:param ranges: boolean for whether to add a range slider and selector
:param title_override: String to override the title
:return figure: a plotly plot to show with iplot or plot
"""
if category is not None:
title = f"{y.replace('_', ' ').title()} vs {x.replace('_', ' ').title()} by {category.replace('_', ' ').title()}"
data = []
for i, (name, group) in enumerate(df.groupby(category)):
data.append(
go.Scatter(
x=group[x],
y=group[y],
mode="markers",
text=group["title"],
name=name,
marker=dict(size=8, symbol=i + 2),
)
)
else:
if scale is not None:
title = f"{y.replace('_', ' ').title()} vs {x.replace('_', ' ').title()} Scaled by {scale.title()}"
data = [
go.Scatter(
x=df[x],
y=df[y],
mode="markers",
text=df["title"],
marker=dict(
size=df[scale],
line=dict(color="black", width=0.5),
sizemode="area",
sizeref=sizeref,
opacity=0.8,
colorscale="Viridis",
color=df[scale],
showscale=True,
sizemin=2,
),
)
]
else:
df.sort_values(x, inplace=True)
title = f"{y.replace('_', ' ').title()} vs {x.replace('_', ' ').title()}"
data = [
go.Scatter(
x=df[x],
y=df[y],
mode="markers",
text=df["title"],
marker=dict(
size=12, color="blue", opacity=0.8, line=dict(color="black")
),
name="observations",
)
]
if fits is not None:
for fit in fits:
data.append(
go.Scatter(
x=df[x],
y=df[fit],
text=df["title"],
mode="lines+markers",
marker=dict(size=8, opacity=0.6),
line=dict(dash="dash"),
name=fit,
)
)
title += " with Fit"
layout = go.Layout(
annotations=annotations,
xaxis=dict(
title=x.replace("_", " ").title() + (" (log scale)" if xlog else ""),
type="log" if xlog else None,
),
yaxis=dict(
title=y.replace("_", " ").title() + (" (log scale)" if ylog else ""),
type="log" if ylog else None,
),
font=dict(size=14),
title=title if title_override is None else title_override,
)
# Add a rangeselector and rangeslider for a data xaxis
if ranges:
rangeselector = dict(
buttons=list(
[
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=6, label="6m", step="month", stepmode="backward"),
dict(count=1, label="1y", step="year", stepmode="backward"),
dict(step="all"),
]
)
)
rangeslider = dict(visible=True)
layout["xaxis"]["rangeselector"] = rangeselector
layout["xaxis"]["rangeslider"] = rangeslider
layout["width"] = 1000
layout["height"] = 600
figure = go.Figure(data=data, layout=layout)
return figure
def make_linear_regression(df, x, y, intercept_0):
"""
Create a linear regression, either with the intercept set to 0 or
the intercept allowed to be fitted
:param df: dataframe with data
:param x: string or list of stringsfor the name of the column with x data
:param y: string for the name of the column with y data
:param intercept_0: boolean indicating whether to set the intercept to 0
"""
if isinstance(x, list):
lin_model = LinearRegression()
lin_model.fit(df[x], df[y])
slopes, intercept, = (
lin_model.coef_,
lin_model.intercept_,
)
df["predicted"] = lin_model.predict(df[x])
r2 = lin_model.score(df[x], df[y])
rmse = np.sqrt(mean_squared_error(y_true=df[y], y_pred=df["predicted"]))
equation = f'{y.replace("_", " ")} ='
names = ["r2", "rmse", "intercept"]
values = [r2, rmse, intercept]
for i, (p, s) in enumerate(zip(x, slopes)):
if (i + 1) % 3 == 0:
equation += f'<br>{s:.2f} * {p.replace("_", " ")} +'
else:
equation += f' {s:.2f} * {p.replace("_", " ")} +'
names.append(p)
values.append(s)
equation += f" {intercept:.2f}"
annotations = [
dict(
x=0.4 * df.index.max(),
y=0.9 * df[y].max(),
showarrow=False,
text=equation,
font=dict(size=10),
)
]
df["index"] = list(df.index)
figure = make_scatter_plot(
df, x="index", y=y, fits=["predicted"], annotations=annotations
)
summary = pd.DataFrame({"name": names, "value": values})
else:
if intercept_0:
lin_reg = sm.OLS(df[y], df[x]).fit()
df["fit_values"] = lin_reg.fittedvalues
summary = lin_reg.summary()
slope = float(lin_reg.params)
equation = f"${y.replace('_', ' ')} = {slope:.2f} * {x.replace('_', ' ')}$"
else:
lin_reg = stats.linregress(df[x], df[y])
intercept, slope = lin_reg.intercept, lin_reg.slope
params = ["pvalue", "rvalue", "slope", "intercept"]
values = []
for p in params:
values.append(getattr(lin_reg, p))
summary = pd.DataFrame({"param": params, "value": values})
df["fit_values"] = df[x] * slope + intercept
equation = f"${y.replace('_', ' ')} = {slope:.2f} * {x.replace('_', ' ')} + {intercept:.2f}$"
annotations = [
dict(
x=0.75 * df[x].max(),
y=0.9 * df[y].max(),
showarrow=False,
text=equation,
font=dict(size=32),
)
]
figure = make_scatter_plot(
df, x=x, y=y, fits=["fit_values"], annotations=annotations
)
return figure, summary
def make_poly_fits(df, x, y, degree=6):
"""
Generate fits and make interactive plot with fits
:param df: dataframe with data
:param x: string representing x data column
:param y: string representing y data column
:param degree: integer degree of fits to go up to
:return fit_stats: dataframe with information about fits
:return figure: interactive plotly figure that can be shown with iplot or plot
"""
# Don't want to alter original data frame
df = df.copy()
fit_list = []
rmse = []
fit_params = []
# Make each fit
for i in range(1, degree + 1):
fit_name = f"fit degree = {i}"
fit_list.append(fit_name)
z, res, *rest = np.polyfit(df[x], df[y], i, full=True)
fit_params.append(z)
df.loc[:, fit_name] = np.poly1d(z)(df[x])
rmse.append(np.sqrt(res[0]))
fit_stats =
|
pd.DataFrame({"fit": fit_list, "rmse": rmse, "params": fit_params})
|
pandas.DataFrame
|
"""
Tests for DatetimeIndex timezone-related methods
"""
from datetime import date, datetime, time, timedelta, tzinfo
import dateutil
from dateutil.tz import gettz, tzlocal
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import conversion, timezones
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
Timestamp,
bdate_range,
date_range,
isna,
to_datetime,
)
import pandas._testing as tm
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
fixed_off = FixedOffset(-420, "-07:00")
fixed_off_no_name = FixedOffset(-330, None)
class TestDatetimeIndexTimezones:
# -------------------------------------------------------------
# DatetimeIndex.tz_convert
def test_tz_convert_nat(self):
# GH#5546
dates = [pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Eastern"))
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="UTC"))
dates = ["2010-12-01 00:00", "2010-12-02 00:00", pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 03:00", "2010-12-02 03:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx + pd.offsets.Hour(5)
expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx.tz_convert("US/Pacific")
expected = ["2010-12-01 05:00", "2010-12-02 05:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx + np.timedelta64(3, "h")
expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 11:00", "2010-12-02 11:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_tz_convert_compat_timestamp(self, prefix):
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
idx = DatetimeIndex(strdates, tz=prefix + "US/Eastern")
conv = idx[0].tz_convert(prefix + "US/Pacific")
expected = idx.tz_convert(prefix + "US/Pacific")[0]
assert conv == expected
def test_dti_tz_convert_hour_overflow_dst(self):
# Regression test for:
# https://github.com/pandas-dev/pandas/issues/13306
# sorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2009-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2009-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2008-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2008-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_convert_hour_overflow_dst_timestamps(self, tz):
# Regression test for GH#13306
# sorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2009-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2009-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2008-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2008-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("freq, n", [("H", 1), ("T", 60), ("S", 3600)])
def test_dti_tz_convert_trans_pos_plus_1__bug(self, freq, n):
# Regression test for tslib.tz_convert(vals, tz1, tz2).
# See https://github.com/pandas-dev/pandas/issues/4496 for details.
idx = date_range(datetime(2011, 3, 26, 23), datetime(2011, 3, 27, 1), freq=freq)
idx = idx.tz_localize("UTC")
idx = idx.tz_convert("Europe/Moscow")
expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
def test_dti_tz_convert_dst(self):
for freq, n in [("H", 1), ("T", 60), ("S", 3600)]:
# Start DST
idx = date_range(
"2014-03-08 23:00", "2014-03-09 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([18, 19, 20, 21, 22, 23, 0, 1, 3, 4, 5]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range(
"2014-03-08 18:00", "2014-03-09 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
# End DST
idx = date_range(
"2014-11-01 23:00", "2014-11-02 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([19, 20, 21, 22, 23, 0, 1, 1, 2, 3, 4]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range(
"2014-11-01 18:00", "2014-11-02 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([22, 23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
np.array([n, n, n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
# daily
# Start DST
idx = date_range("2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="UTC")
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx.hour, Index([19, 19]))
idx = date_range(
"2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx.hour, Index([5, 5]))
# End DST
idx = date_range("2014-11-01 00:00", "2014-11-02 00:00", freq="D", tz="UTC")
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx.hour, Index([20, 20]))
idx = date_range(
"2014-11-01 00:00", "2014-11-02 000:00", freq="D", tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx.hour, Index([4, 4]))
def test_tz_convert_roundtrip(self, tz_aware_fixture):
tz = tz_aware_fixture
idx1 = date_range(start="2014-01-01", end="2014-12-31", freq="M", tz="UTC")
exp1 = date_range(start="2014-01-01", end="2014-12-31", freq="M")
idx2 = date_range(start="2014-01-01", end="2014-12-31", freq="D", tz="UTC")
exp2 = date_range(start="2014-01-01", end="2014-12-31", freq="D")
idx3 = date_range(start="2014-01-01", end="2014-03-01", freq="H", tz="UTC")
exp3 = date_range(start="2014-01-01", end="2014-03-01", freq="H")
idx4 = date_range(start="2014-08-01", end="2014-10-31", freq="T", tz="UTC")
exp4 = date_range(start="2014-08-01", end="2014-10-31", freq="T")
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3), (idx4, exp4)]:
converted = idx.tz_convert(tz)
reset = converted.tz_convert(None)
tm.assert_index_equal(reset, expected)
assert reset.tzinfo is None
expected = converted.tz_convert("UTC").tz_localize(None)
expected = expected._with_freq("infer")
tm.assert_index_equal(reset, expected)
def test_dti_tz_convert_tzlocal(self):
# GH#13583
# tz_convert doesn't affect to internal
dti = date_range(start="2001-01-01", end="2001-03-01", tz="UTC")
dti2 = dti.tz_convert(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal())
dti2 = dti.tz_convert(None)
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
@pytest.mark.parametrize(
"tz",
[
"US/Eastern",
"dateutil/US/Eastern",
pytz.timezone("US/Eastern"),
gettz("US/Eastern"),
],
)
def test_dti_tz_convert_utc_to_local_no_modify(self, tz):
rng = date_range("3/11/2012", "3/12/2012", freq="H", tz="utc")
rng_eastern = rng.tz_convert(tz)
# Values are unmodified
tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8)
assert timezones.tz_compare(rng_eastern.tz, timezones.maybe_get_tz(tz))
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_tz_convert_unsorted(self, tzstr):
dr = date_range("2012-03-09", freq="H", periods=100, tz="utc")
dr = dr.tz_convert(tzstr)
result = dr[::-1].hour
exp = dr.hour[::-1]
tm.assert_almost_equal(result, exp)
# -------------------------------------------------------------
# DatetimeIndex.tz_localize
def test_dti_tz_localize_nonexistent_raise_coerce(self):
# GH#13057
times = ["2015-03-08 01:00", "2015-03-08 02:00", "2015-03-08 03:00"]
index = DatetimeIndex(times)
tz = "US/Eastern"
with pytest.raises(pytz.NonExistentTimeError, match="|".join(times)):
index.tz_localize(tz=tz)
with pytest.raises(pytz.NonExistentTimeError, match="|".join(times)):
index.tz_localize(tz=tz, nonexistent="raise")
result = index.tz_localize(tz=tz, nonexistent="NaT")
test_times = ["2015-03-08 01:00-05:00", "NaT", "2015-03-08 03:00-04:00"]
dti = to_datetime(test_times, utc=True)
expected = dti.tz_convert("US/Eastern")
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_infer(self, tz):
# November 6, 2011, fall back, repeat 2 AM hour
# With no repeated hours, we cannot infer the transition
dr = date_range(datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour())
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
dr.tz_localize(tz)
# With repeated hours, we can infer the transition
dr = date_range(
datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour(), tz=tz
)
times = [
"11/06/2011 00:00",
"11/06/2011 01:00",
"11/06/2011 01:00",
"11/06/2011 02:00",
"11/06/2011 03:00",
]
di = DatetimeIndex(times)
localized = di.tz_localize(tz, ambiguous="infer")
expected = dr._with_freq(None)
tm.assert_index_equal(expected, localized)
tm.assert_index_equal(expected, DatetimeIndex(times, tz=tz, ambiguous="infer"))
# When there is no dst transition, nothing special happens
dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=pd.offsets.Hour())
localized = dr.tz_localize(tz)
localized_infer = dr.tz_localize(tz, ambiguous="infer")
tm.assert_index_equal(localized, localized_infer)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_times(self, tz):
# March 13, 2011, spring forward, skip from 2 AM to 3 AM
dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3, freq=pd.offsets.Hour())
with pytest.raises(pytz.NonExistentTimeError, match="2011-03-13 02:30:00"):
dr.tz_localize(tz)
# after dst transition, it works
dr = date_range(
datetime(2011, 3, 13, 3, 30), periods=3, freq=pd.offsets.Hour(), tz=tz
)
# November 6, 2011, fall back, repeat 2 AM hour
dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3, freq=pd.offsets.Hour())
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
dr.tz_localize(tz)
# UTC is OK
dr = date_range(
datetime(2011, 3, 13), periods=48, freq=pd.offsets.Minute(30), tz=pytz.utc
)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_localize_pass_dates_to_utc(self, tzstr):
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
idx = DatetimeIndex(strdates)
conv = idx.tz_localize(tzstr)
fromdates = DatetimeIndex(strdates, tz=tzstr)
assert conv.tz == fromdates.tz
tm.assert_numpy_array_equal(conv.values, fromdates.values)
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_tz_localize(self, prefix):
tzstr = prefix + "US/Eastern"
dti = pd.date_range(start="1/1/2005", end="1/1/2005 0:00:30.256", freq="L")
dti2 = dti.tz_localize(tzstr)
dti_utc = pd.date_range(
start="1/1/2005 05:00", end="1/1/2005 5:00:30.256", freq="L", tz="utc"
)
tm.assert_numpy_array_equal(dti2.values, dti_utc.values)
dti3 = dti2.tz_convert(prefix + "US/Pacific")
tm.assert_numpy_array_equal(dti3.values, dti_utc.values)
dti = pd.date_range(start="11/6/2011 1:59", end="11/6/2011 2:00", freq="L")
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
dti.tz_localize(tzstr)
dti = pd.date_range(start="3/13/2011 1:59", end="3/13/2011 2:00", freq="L")
with pytest.raises(pytz.NonExistentTimeError, match="2011-03-13 02:00:00"):
dti.tz_localize(tzstr)
@pytest.mark.parametrize(
"tz",
[
"US/Eastern",
"dateutil/US/Eastern",
pytz.timezone("US/Eastern"),
gettz("US/Eastern"),
],
)
def test_dti_tz_localize_utc_conversion(self, tz):
# Localizing to time zone should:
# 1) check for DST ambiguities
# 2) convert to UTC
rng = date_range("3/10/2012", "3/11/2012", freq="30T")
converted = rng.tz_localize(tz)
expected_naive = rng + pd.offsets.Hour(5)
tm.assert_numpy_array_equal(converted.asi8, expected_naive.asi8)
# DST ambiguity, this should fail
rng = date_range("3/11/2012", "3/12/2012", freq="30T")
# Is this really how it should fail??
with pytest.raises(pytz.NonExistentTimeError, match="2012-03-11 02:00:00"):
rng.tz_localize(tz)
def test_dti_tz_localize_roundtrip(self, tz_aware_fixture):
# note: this tz tests that a tz-naive index can be localized
# and de-localized successfully, when there are no DST transitions
# in the range.
idx = date_range(start="2014-06-01", end="2014-08-30", freq="15T")
tz = tz_aware_fixture
localized = idx.tz_localize(tz)
# cant localize a tz-aware object
with pytest.raises(
TypeError, match="Already tz-aware, use tz_convert to convert"
):
localized.tz_localize(tz)
reset = localized.tz_localize(None)
assert reset.tzinfo is None
expected = idx._with_freq(None)
tm.assert_index_equal(reset, expected)
def test_dti_tz_localize_naive(self):
rng = date_range("1/1/2011", periods=100, freq="H")
conv = rng.tz_localize("US/Pacific")
exp = date_range("1/1/2011", periods=100, freq="H", tz="US/Pacific")
tm.assert_index_equal(conv, exp._with_freq(None))
def test_dti_tz_localize_tzlocal(self):
# GH#13583
offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1))
offset = int(offset.total_seconds() * 1000000000)
dti = date_range(start="2001-01-01", end="2001-03-01")
dti2 = dti.tz_localize(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8 + offset, dti.asi8)
dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal())
dti2 = dti.tz_localize(None)
tm.assert_numpy_array_equal(dti2.asi8 - offset, dti.asi8)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_nat(self, tz):
times = [
"11/06/2011 00:00",
"11/06/2011 01:00",
"11/06/2011 01:00",
"11/06/2011 02:00",
"11/06/2011 03:00",
]
di = DatetimeIndex(times)
localized = di.tz_localize(tz, ambiguous="NaT")
times = [
"11/06/2011 00:00",
np.NaN,
np.NaN,
"11/06/2011 02:00",
"11/06/2011 03:00",
]
di_test = DatetimeIndex(times, tz="US/Eastern")
# left dtype is datetime64[ns, US/Eastern]
# right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')]
tm.assert_numpy_array_equal(di_test.values, localized.values)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_flags(self, tz):
# November 6, 2011, fall back, repeat 2 AM hour
# Pass in flags to determine right dst transition
dr = date_range(
datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour(), tz=tz
)
times = [
"11/06/2011 00:00",
"11/06/2011 01:00",
"11/06/2011 01:00",
"11/06/2011 02:00",
"11/06/2011 03:00",
]
# Test tz_localize
di =
|
DatetimeIndex(times)
|
pandas.DatetimeIndex
|
import pytest
import numpy as np
import pandas as pd
from six import StringIO
from dae.tools.generate_histogram import (
ScoreHistogramInfo,
GenerateScoresHistograms,
)
# pytestmark = pytest.mark.xfail
class MyStringIO(StringIO):
def __add__(self, other):
return ""
@pytest.fixture
def score_files():
score = pd.DataFrame({"SCORE": [1, 2, 3, 4, 4, 5, 6]})
rankscore = pd.DataFrame({"RANKSCORE": [1, 10, 100, 100, 1000, 10000]})
rankscore_zero_start = pd.DataFrame(
{"RANKSCORE_0": [0, 1, 10, 100, 100, 1000, 10000]}
)
return [score, rankscore, rankscore_zero_start]
@pytest.fixture
def score_files_by_chunks():
score = [
|
pd.DataFrame({"SCORE": [1, 2, 3]})
|
pandas.DataFrame
|
import warnings
from pandas import Series
# This warning is alerting that the regex uses a capturing group but the match is not used.
warnings.filterwarnings("ignore", 'This pattern has match groups')
class Evaluator:
series: Series
def __init__(self, series: Series):
self.series = series
self.unique_series = list(self.series.dropna().unique())
def series_match(self, pattern: str):
"""
Evaluate if all series match the pattern
"""
if len(self.unique_series) == 0:
return False
return Series(
self.unique_series).astype(str).str.match(pattern).eq(True).all()
def series_contains(self, pattern: str):
"""
Evaluate if the series contains the pattern
"""
return (
|
Series(self.unique_series)
|
pandas.Series
|
""" Module for data preprocessing.
"""
import datetime
import warnings
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Set
from typing import Union
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
from sklearn.utils.validation import check_is_fitted
__all__ = [
'ColumnSelector',
'ColumnDropper',
'ColumnRename',
'NaDropper',
'Clip',
'DatetimeTransformer',
'NumericTransformer',
'TimeframeExtractor',
'DateExtractor',
'ValueMapper',
'Sorter',
'Fill',
'TimeOffsetTransformer',
'ConditionedDropper',
'ZeroVarianceDropper',
'SignalSorter',
'ColumnSorter',
'DifferentialCreator'
]
class ColumnSelector(BaseEstimator, TransformerMixin):
"""Transformer to select a list of columns by their name.
Example:
>>> data = pd.DataFrame({'a': [0], 'b': [0]})
>>> ColumnSelector(keys=['a']).transform(data)
pd.DataFrame({'a': [0]})
"""
def __init__(self, keys: List[str]):
"""Creates ColumnSelector.
Transformer to select a list of columns for further processing.
Args:
keys (List[str]): List of columns to extract.
"""
self._keys = keys
def fit(self, X, y=None):
return self
def transform(self, X):
"""Extracts the columns from `X`.
Args:
X (pd.DataFrame): Dataframe.
Returns:
pd.DataFrame: Returns a DataFrame only containing the selected
features.
"""
return X.loc[:, self._keys]
class ColumnDropper(BaseEstimator, TransformerMixin):
"""Transformer to drop a list of columns by their name.
Example:
>>> data = pd.DataFrame({'a': [0], 'b': [0]})
>>> ColumnDropper(columns=['b']).transform(data)
pd.DataFrame({'a': [0]})
"""
def __init__(self,
*,
columns: Union[List[str], Set[str]],
verbose: bool = False):
"""Creates ColumnDropper.
Transformer to drop a list of columns from the data frame.
Args:
keys (list): List of columns names to drop.
"""
self.columns = set(columns)
self.verbose = verbose
def fit(self, X, y=None):
return self
def transform(self, X):
"""Drops a list of columns of `X`.
Args:
X (pd.DataFrame): Dataframe.
Returns:
pd.DataFrame: Returns the dataframe without the dropped features.
"""
cols = set(X.columns.to_list())
if len(m := self.columns - cols) > 0:
warnings.warn(f'Columns {m} not found in dataframe.')
if self.verbose:
print(f'New columns: {cols - self.columns}. '
f'Removed: {self.columns}.')
return X.drop(self.columns, axis=1, errors='ignore')
class ColumnRename(BaseEstimator, TransformerMixin):
"""Transformer to rename column with a function.
Example:
>>> data = pd.DataFrame({'a.b.c': [0], 'd.e.f': [0]})
>>> ColumnRename(lambda x: x.split('.')[-1]).transform(data)
pd.DataFrame({'c': [0], 'f': [0]})
"""
def __init__(self, mapper: Callable[[str], str]):
"""Create ColumnRename.
Transformer to rename columns by a mapper function.
Args:
mapper (lambda): Mapper rename function.
Example:
Given column with name: a.b.c
lambda x: x.split('.')[-1]
Returns c
"""
self.mapper = mapper
def fit(self, X, y=None):
return self
def transform(self, X):
"""Renames a columns in `X` with a mapper function.
Args:
X (pd.DataFrame): Dataframe.
Returns:
pd.DataFrame: Returns the dataframe with the renamed columns.
"""
# split the column name
# use the last item as new name
return X.rename(columns=self.mapper)
class NaDropper(BaseEstimator, TransformerMixin):
"""Transformer that drops rows with na values.
Example:
>>> data = pd.DataFrame({'a': [0, 1], 'b': [0, np.nan]})
>>> NaDropper().transform(data)
pd.DataFrame({'a': [0], 'b': [0]})
"""
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
return X.dropna()
class Clip(BaseEstimator, TransformerMixin):
"""Transformer that clips values by a lower and upper bound.
Example:
>>> data = pd.DataFrame({'a': [-0.1, 1.2], 'b': [0.5, 0.6]})
>>> Clip().transform(data)
pd.DataFrame({'a': [0, 1], 'b': [0.5, 0.6]})
"""
def __init__(self, lower: float = 0.0, upper: float = 1.0):
"""Creates Clip.
Transformer that clips a numeric column to the treshold if the
threshold is exceeded. Works with an upper and lower threshold. Wrapper
for pd.DataFrame.clip.
Args:
lower (float, optional): lower limit. Defaults to 0.
upper (float, optional): upper limit. Defaults to 1.
"""
self.upper = upper
self.lower = lower
def fit(self, X, y=None):
return self
def transform(self, X):
return X.clip(lower=self.lower, upper=self.upper, axis=0)
class ColumnTSMapper(BaseEstimator, TransformerMixin):
def __init__(self,
cols: List[str],
timedelta: pd.Timedelta = pd.Timedelta(250, 'ms'),
classes: List[str] = None,
verbose: bool = False):
"""Creates ColumnTSMapper.
Expects the timestamp column to be of type pd.Timestamp.
Args:
cols (List[str]): names of [0] timestamp column, [1] sensor names,
[2] sensor values.
timedelta (pd.Timedelta): Timedelta to resample with.
classes (List[str]): List of sensor names.
verbose (bool, optional): Whether to allow prints.
"""
super().__init__()
self._cols = cols
self._timedelta = timedelta
self._verbose = verbose
if classes is not None:
self.classes_ = classes
def fit(self, X, y=None):
"""Gets the unique values in the sensor name column that
are needed to expand the dataframe.
Args:
X (pd.DataFrame): Dataframe.
y (array-like, optional): Labels. Defaults to None.
Returns:
ColumnTSMapper: Returns this.
"""
classes = X[self._cols[1]].unique()
self.classes_ = np.hstack(['Timestamp', classes])
return self
def transform(self, X):
"""Performs the mapping to equidistant timestamps.
Args:
X (pd.DataFrame): Dataframe.
Raises:
ValueError: Raised if column is not found in `X`.
Returns:
pd.DataFrame: Returns the remapped dataframe.
"""
# check is fit had been called
check_is_fitted(self)
# check if all columns exist
if not all([item in X.columns for item in self._cols]):
raise ValueError(
f'Columns {self._cols} not found in DataFrame '
f'{X.columns.to_list()}.')
# split sensors into individual columns
# create new dataframe with all _categories
# use timestamp index, to use resample later on
# initialized with na
sensors = pd.DataFrame(
None, columns=self.classes_, index=X[self._cols[0]])
# group by sensor
groups = X.groupby([self._cols[1]])
# write sensor values to sensors which is indexed by the timestamp
for g in groups:
sensors.loc[g[1][self._cols[0]], g[0]
] = g[1][self._cols[2]].to_numpy()
sensors = sensors.apply(pd.to_numeric, errors='ignore')
# fill na, important before resampling
# otherwise mean affects more samples than necessary
# first: forward fill to next valid observation
# second: backward fill first missing rows
sensors = sensors.fillna(method='ffill').fillna(method='bfill')
# resamples to equidistant timeframe
# take avg if multiple samples in the same timeframe
sensors = sensors.resample(self._timedelta).mean()
sensors = sensors.fillna(method='ffill').fillna(method='bfill')
# FIXME: to avoid nans in model, but needs better fix
sensors = sensors.fillna(value=0.0)
# move index to column and use rangeindex
sensors['Timestamp'] = sensors.index
sensors.index = pd.RangeIndex(stop=sensors.shape[0])
if self._verbose:
start, end = sensors.iloc[0, 0], sensors.iloc[-1, 0]
print('ColumnTSMapper: ')
print(f'{sensors.shape[0]} rows. '
f'Mapped to {self._timedelta.total_seconds()}s interval '
f'from {start} to {end}.')
return sensors
class DatetimeTransformer(BaseEstimator, TransformerMixin):
"""Transforms a list of columns to datetime.
Example:
>>> data = pd.DataFrame({'dt': ['2021-07-02 16:30:00']})
>>> data = DatetimeTransformer(columns=['dt']).transform(data)
>>> data.dtypes
dt datetime64[ns]
"""
def __init__(self, *, columns: List[str], dt_format: str = None):
"""Creates DatetimeTransformer.
Parses a list of column to pd.Timestamp.
Args:
columns (list): List of columns names.
dt_format (str): Optional format string.
"""
super().__init__()
self._columns = columns
self._format = dt_format
def fit(self, X, y=None):
return self
def transform(self, X):
"""Parses `columns` to datetime.
Args:
X (pd.DataFrame): Dataframe.
Raises:
ValueError: Raised if columns are missing in `X`.
Returns:
pd.DataFrame: Returns the dataframe with datetime columns.
"""
X = X.copy()
# check if columns in dataframe
if len(diff := set(self._columns) - set(X.columns)):
raise ValueError(
f'Columns {diff} not found in DataFrame with columns'
f'{X.columns.to_list()}.')
# parse to pd.Timestamp
X[self._columns] = X[self._columns].apply(
lambda x: pd.to_datetime(x, format=self._format), axis=0)
# column wise
return X
class NumericTransformer(BaseEstimator, TransformerMixin):
"""Transforms a list of columns to numeric datatype.
Example:
>>> data = pd.DataFrame({'a': [0], 'b': ['1']})
>>> data.dtypes
a int64
b object
>>> data = NumericTransformer().transform(data)
>>> data.dtypes
a int64
b int64
"""
def __init__(self, *, columns: Optional[List[str]] = None):
"""Creates NumericTransformer.
Parses a list of column to numeric datatype. If None, all are
attempted to be parsed.
Args:
columns (list): List of columns names.
dt_format (str): Optional format string.
"""
super().__init__()
self._columns = columns
def fit(self, X, y=None):
return self
def transform(self, X):
"""Parses `columns` to numeric.
Args:
X (pd.DataFrame): Dataframe.
Raises:
ValueError: Raised if columns are missing in `X`.
Returns:
pd.DataFrame: Returns the dataframe with datetime columns.
"""
X = X.copy()
# transform all columns
if self._columns is None:
columns = X.columns.to_list()
else:
columns = self._columns
if len((diff := list(set(columns) - set(cols := X.columns)))):
raise ValueError(f'Columns found: {cols.to_list()}. '
f'Columns missing: {diff}.')
# parse to numeric
# column wise
X[columns] = X[columns].apply(pd.to_numeric, axis=0)
return X
class TimeframeExtractor(BaseEstimator, TransformerMixin):
"""Drops sampes that are not between a given start and end time.
Limits are inclusive.
Example:
>>> data = pd.DataFrame(
{'dates': [datetime.datetime(2021, 7, 2, 9, 50, 0),
datetime.datetime(2021, 7, 2, 11, 0, 0),
datetime.datetime(2021, 7, 2, 12, 10, 0)],
'values': [0, 1, 2]})
>>> TimeframeExtractor(time_column='dates',
start_time= datetime.time(10, 0, 0),
end_time=datetime.time(12, 0, 0)
).transform(data)
pd.DataFrame({'dates': datetime.datetime(2021, 7, 2, 11, 0, 0),
'values': [1]})
"""
def __init__(self,
*,
time_column: str,
start_time: datetime.time,
end_time: datetime.time,
invert: bool = False,
verbose: bool = False):
"""Creates TimeframeExtractor.
Drops samples that are not in between `start_time` and `end_time` in
`time_column`.
Args:
time_column (str): Column name of the timestamp column.
start_time (datetime.time): Start time.
end_time (datetime.time): End time.
invert(bool): Whether to invert the range.
verbose (bool, optional): Whether to allow prints.
"""
super().__init__()
self._start = start_time
self._end = end_time
self._column = time_column
self._negate = invert
self._verbose = verbose
def fit(self, X, y=None):
return self
def transform(self, X):
"""Drops rows from the dataframe if they are not in between
`start_time` and `end_time`. Limits are inclusive. Reindexes the
dataframe.
Args:
X (pd.DataFrame): Dataframe.
Returns:
pd.DataFrame: Returns the new dataframe.
"""
X = X.copy()
rows_before = X.shape[0]
dates = pd.to_datetime(X[self._column])
if self._negate:
X = X.loc[~((dates.dt.time >= self._start) &
(dates.dt.time <= self._end)), :]
else:
X = X.loc[(dates.dt.time >= self._start) &
(dates.dt.time <= self._end), :]
X.index = pd.RangeIndex(0, X.shape[0])
rows_after = X.shape[0]
if self._verbose:
print(
'TimeframeExtractor: \n'
f'{rows_after} rows. Dropped {rows_before - rows_after} '
f'rows which are {"in" if self._negate else "not in"} between '
f'{self._start} and {self._end}.'
)
return X
class DateExtractor(BaseEstimator, TransformerMixin):
""" Drops rows that are not between a start and end date.
Limits are inclusive.
Example:
>>> data = pd.DataFrame(
{'dates': [datetime.datetime(2021, 7, 1, 9, 50, 0),
datetime.datetime(2021, 7, 2, 11, 0, 0),
datetime.datetime(2021, 7, 3, 12, 10, 0)],
'values': [0, 1, 2]})
>>> DateExtractor(date_column='dates',
start_date=datetime.date(2021, 7, 2),
end_date=datetime.date(2021, 7, 2)).transform(data)
pd.DataFrame({'dates': datetime.datetime(2021, 07, 2, 11, 0, 0),
'values': [1]})
"""
def __init__(self,
*,
date_column: str,
start_date: datetime.date,
end_date: datetime.date,
invert: bool = False,
verbose: bool = False):
"""Initializes `DateExtractor`.
Args:
date_column (str): Name of timestamp column.
start_date (datetime.date): Start date.
end_date (datetime.date): End date.
invert (bool): Whether to invert the range.
verbose (bool, optional): Whether to allow prints.
"""
super().__init__()
self._start = start_date
self._end = end_date
self._column = date_column
self._negate = invert
self._verbose = verbose
def fit(self, X, y=None):
return self
def transform(self, X):
"""Drops rows which date is not between `start` and end date.
Bounds are inclusive. Dataframe is reindexed.
Args:
X (pd.Dataframe): Dataframe.
Returns:
pd.Dataframe: Returns the new dataframe.
"""
rows_before = X.shape[0]
dates = pd.to_datetime(X[self._column])
if self._negate:
X = X.loc[~((dates.dt.date >= self._start) &
(dates.dt.date <= self._end)), :]
else:
X = X.loc[(dates.dt.date >= self._start) &
(dates.dt.date <= self._end), :]
X.index = pd.RangeIndex(0, X.shape[0])
rows_after = X.shape[0]
if self._verbose:
print(
'DateExtractor: \n'
f'{rows_after} rows. Dropped {rows_before - rows_after} rows '
f'which are {"in" if self._negate else "not in"} between '
f'{self._start} and {self._end}.'
)
return X
class ValueMapper(BaseEstimator, TransformerMixin):
"""Maps values in `column` according to `classes`. Wrapper for
pd.DataFrame.replace.
Example:
>>> data = pd.DataFrame({'a': [0.0, 1.0, 2.0]})
>>> ValueMapper(columns=['a'], classes={2.0: 1.0}).transform(data)
pd.DataFrame({'a': [0.0, 1.0, 1.0]})
"""
def __init__(self,
*,
columns: List[str],
classes: Dict,
verbose: bool = False):
"""Initialize `ValueMapper`.
Args:
columns (List[str]): Names of columns to remap.
classes (Dict): Dictionary of old and new value.
verbose (bool, optional): Whether to allow prints.
"""
super().__init__()
self._columns = columns
self._classes = classes
self._verbose = verbose
def fit(self, X, y=None):
return self
def transform(self, X):
"""Remaps values in `column` according to `classes`.
Gives UserWarning if unmapped values are found.
Args:
X (pd.DataFrame): Dataframe.
Returns:
pd.DataFrame: Returns the new dataframe with remapped values.
"""
X = X.copy()
# warning if unmapped values
values = pd.unique(X[self._columns].values.ravel('K'))
if not set(self._classes.keys()).issuperset(values):
warnings.warn(
f'Classes {set(self._classes.keys()) - set(values)} ignored.')
X[self._columns] = X[self._columns].replace(self._classes)
return X
class Sorter(BaseEstimator, TransformerMixin):
"""Sorts the dataframe by a list of columns. Wrapper for
pd.DataFrame.sort_values.
Example:
>>> data = pd.DataFrame({'a': [0, 1], 'b': [1, 0]})
>>> Sorter(columns=['b'], ascending=True).transform(data)
pd.DataFrame({'a': [1, 0], 'b': [0, 1]})
"""
def __init__(self,
*,
columns: List[str],
ascending: bool = True,
axis: int = 0):
"""Initialize `Sorter`.
Args:
columns (List[str]): List of column names to sort by.
ascending (bool): Whether to sort ascending.
axis (int): Axis to sort by.
"""
super().__init__()
self._columns = columns
self._ascending = ascending
self._axis = axis
def fit(self, X, y=None):
return self
def transform(self, X):
"""Sorts `X` by `columns`.
Args:
X (pd.DataFrame): Dataframe.
Returns:
pd.DataFrame: Returns the sorted Dataframe.
"""
X = X.copy()
return X.sort_values(by=self._columns,
ascending=self._ascending,
axis=self._axis)
class Fill(BaseEstimator, TransformerMixin):
"""Fills NA values with a constant or 'bfill' / 'ffill'.
Wrapper for df.fillna.
Example:
>>> data = pd.DataFrame({'a': [0.0, np.nan]})
>>> Fill(value=1.0).transform(data)
pd.DataFrame({'a': [0.0, 1.0]})
"""
def __init__(self,
*,
value: Any,
method: str = None):
"""Initialize `Fill`.
Args:
value (Any): Constant to fill NAs.
method (str): method: 'ffill' or 'bfill'.
"""
super().__init__()
self._value = value
self._method = method
def fit(self, X, y=None):
return self
def transform(self, X):
"""Fills NAs.
Args:
X (pd.DataFrame): Dataframe.
Returns:
pd.DataFrame: Returns the filled dataframe.
"""
X = X.copy()
return X.fillna(self._value, method=self._method)
class TimeOffsetTransformer(BaseEstimator, TransformerMixin):
"""`TimeOffsetTransformer` offsets a datetime by `timedelta`.
Example:
>>> data = pd.DataFrame(
{'dates': [datetime.datetime(2021, 7, 1, 16, 0, 0)]})
>>> TimeOffsetTransformer(time_columns=['dates'],
timedelta=pd.Timedelta(1, 'h')
).transform(data)
pd.DataFrame({'dates': datetime.datetime(2021, 07, 2, 17, 0, 0)})
"""
def __init__(self, *, time_columns: List[str], timedelta: pd.Timedelta):
"""
Initialize `TimeOffsetTransformer`.
Args:
time_column (List[str]): List of names of columns with timestamps
to offset.
timedelta (pd.Timedelta): Offset.
"""
super().__init__()
self._time_columns = time_columns
self._timedelta = timedelta
def fit(self, X, y=None):
return self
def transform(self, X):
"""Offsets the timestamps in `time_columns` by `timedelta`-
Args:
X (pd.DataFrame): Dataframe.
Returns:
pd.DataFrame: Returns the dataframe.
"""
X = X.copy()
for column in self._time_columns:
X[column] = pd.to_datetime(X[column]) + self._timedelta
return X
class ConditionedDropper(BaseEstimator, TransformerMixin):
"""Module to drop rows in `column` that contain numeric values and are
above `threshold`. If `inverted` is true, values below `threshold` are
dropped.
Example:
>>> data = pd.DataFrame({'a': [0.0, 1.2, 0.5]})
>>> ConditionedDropper(column='a', threshold=0.5).transform(data)
pd.DataFrame({'a': [0.0, 0.5]})
"""
def __init__(self,
*,
column: str,
threshold: float,
invert: bool = False):
"""Initializes `ConditionedDropper`.
Args:
column (str): Column to match condition in.
threshold (float): Threshold.
inverted (bool, optional): If false, all values below `threshold`
are dropped, otherwise all values above are dropped.
"""
super().__init__()
self.column = column
self.threshold = threshold
self.inverted = invert
def fit(self, X, y=None):
return self
def transform(self, X):
"""Drops rows if below or above a threshold.
Args:
X (pd.DataFrame): Dataframe.
Returns:
pd.DataFrame: Returns the dataframe.
"""
X = X.copy()
if not self.inverted:
X = X.drop(X[X[self.column] > self.threshold].index)
else:
X = X.drop(X[X[self.column] < self.threshold].index)
X.index =
|
pd.RangeIndex(X.shape[0])
|
pandas.RangeIndex
|
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
mode: python; py-indent-offset: 4; tab-width: 4; coding: utf-8
'''
import unittest
import pprint
from numpy import array
from numpy.testing import assert_array_equal, assert_array_almost_equal
from pandas import DataFrame
from pandas.testing import assert_frame_equal
from sos_trades_core.execution_engine.execution_engine import ExecutionEngine
class TestExtendDict(unittest.TestCase):
"""
Extend dict type for GEMSEO test class
"""
def setUp(self):
self.name = 'EE'
self.pp = pprint.PrettyPrinter(indent=4, compact=True)
def test_01_sosdiscipline_simple_dict(self):
exec_eng = ExecutionEngine(self.name)
exec_eng.ns_manager.add_ns('ns_test', self.name)
mod_list = 'sos_trades_core.sos_wrapping.test_discs.disc5dict.Disc5'
disc5_builder = exec_eng.factory.get_builder_from_module(
'Disc5', mod_list)
exec_eng.factory.set_builders_to_coupling_builder(disc5_builder)
exec_eng.configure()
# additional test to verify that values_in are used
values_dict = {}
values_dict['EE.z'] = [3., 0.]
values_dict['EE.dict_out'] = {'key1': 0.5, 'key2': 0.5}
exec_eng.dm.set_values_from_dict(values_dict)
exec_eng.execute()
target = {
'EE.z': [
3.0, 0.0], 'EE.dict_out': [
0.5, 0.5], 'EE.h': [
0.75, 0.75]}
res = {}
for key in target:
res[key] = exec_eng.dm.get_value(key)
if target[key] is dict:
self.assertDictEqual(res[key], target[key])
elif target[key] is array:
self.assertListEqual(list(target[key]), list(res[key]))
def test_02_sosdiscipline_simple_dict_and_dataframe(self):
exec_eng = ExecutionEngine(self.name)
exec_eng.ns_manager.add_ns('ns_test', self.name)
mod_list = 'sos_trades_core.sos_wrapping.test_discs.disc4_dict_df.Disc4'
disc4_builder = exec_eng.factory.get_builder_from_module(
'Disc4', mod_list)
exec_eng.factory.set_builders_to_coupling_builder(disc4_builder)
exec_eng.configure()
# -- build input data
values_dict = {}
# built my_dict (private in)
values_dict['EE.Disc4.mydict'] = {'md_1': array([3., 4.])}
# build dict of dataframe (coupling in)
h = {'dataframe': DataFrame(data={'col1': array([0.75, 0.75])})}
values_dict['EE.h'] = h
# store data
exec_eng.dm.set_values_from_dict(values_dict)
# -- exec
exec_eng.execute()
# compare output h (sos_trades format) to reference
rp = exec_eng.root_process.sos_disciplines[0]
z_out, dict_out = rp.get_sosdisc_outputs(["z", "dict_out"])
z_out_target = array([0.75, 1.5])
df_data = {'col1': [1, 2], 'col2': [3, 0.75]}
df = DataFrame(data=df_data)
dict_out_target = {'key1': {'key11': 0.75, 'key12': 0.5, 'key13': 8., 'key14': {'key141': df, 'key142': array([5])}},
'key2': 10.}
assert_array_equal(
z_out, z_out_target, "wrong output z")
self.assertSetEqual(set(dict_out.keys()),
set(dict_out_target.keys()), "Incorrect dict_out keys")
self.assertSetEqual(set(dict_out['key1'].keys()),
set(dict_out_target['key1'].keys()), "Incorrect dict_out['key1'] keys")
self.assertSetEqual(set(dict_out['key1']['key14'].keys()),
set(dict_out_target['key1']['key14'].keys()), "Incorrect dict_out[key1][key14] keys")
self.assertAlmostEqual(
dict_out_target['key1']['key11'],
dict_out['key1']['key11'])
self.assertAlmostEqual(
dict_out_target['key1']['key12'],
dict_out['key1']['key12'])
self.assertAlmostEqual(
dict_out_target['key1']['key13'],
dict_out['key1']['key13'])
assert_array_equal(
dict_out_target['key1']['key14']['key142'],
dict_out['key1']['key14']['key142'])
assert_frame_equal(
dict_out_target['key1']['key14']['key141'],
dict_out['key1']['key14']['key141'])
def test_03_soscoupling_simple_dict(self):
exec_eng = ExecutionEngine(self.name)
exec_eng.ns_manager.add_ns('ns_test', self.name)
mod_list = 'sos_trades_core.sos_wrapping.test_discs.disc4dict.Disc4'
disc4_builder = exec_eng.factory.get_builder_from_module(
'Disc4', mod_list)
mod_list = 'sos_trades_core.sos_wrapping.test_discs.disc5dict.Disc5'
disc5_builder = exec_eng.factory.get_builder_from_module(
'Disc5', mod_list)
exec_eng.factory.set_builders_to_coupling_builder(
[disc4_builder, disc5_builder])
exec_eng.configure()
values_dict = {f'{self.name}.dict_out': {'key1': 3., 'key2': 4.},
f'{self.name}.z': array([4., 5.]),
f'{self.name}.h': array([8., 9.]),
f'{self.name}.Disc4.mydict': {'md_1': array([3., 4.])}
}
exec_eng.dm.set_values_from_dict(values_dict)
exec_eng.execute()
target = {f'{self.name}.dict_out': {'key1': 0.7071119843035847, 'key2': 0.7071119843035847},
f'{self.name}.z': array([0.707111984, 1.41422397]),
f'{self.name}.h': array([0.7071067811865475, 0.7071067811865475]),
f'{self.name}.Disc4.mydict': {'md_1': array([3., 4.])}}
res = {}
for key in target:
res[key] = exec_eng.dm.get_value(key)
if target[key] is dict:
self.assertDictEqual(res[key], target[key])
elif target[key] is array:
self.assertListEqual(list(target[key]), list(res[key]))
def test_04_sosdiscipline_nested_dict(self):
exec_eng = ExecutionEngine(self.name)
exec_eng.ns_manager.add_ns('ns_test', self.name)
mod_list = 'sos_trades_core.sos_wrapping.test_discs.disc5_disc_df.Disc5'
disc5_builder = exec_eng.factory.get_builder_from_module(
'Disc5', mod_list)
exec_eng.factory.set_builders_to_coupling_builder(disc5_builder)
exec_eng.configure()
df_data = {'col1': [1, 2], 'col2': [3, 0.5]}
df =
|
DataFrame(data=df_data)
|
pandas.DataFrame
|
"""
Assignment 4
Before working on this assignment please read these instructions fully. In the submission area, you will notice that you can click the link to Preview the Grading for each step of the assignment. This is the criteria that will be used for peer grading. Please familiarize yourself with the criteria before beginning the assignment.
This assignment requires that you to find at least two datasets on the web which are related, and that you visualize these datasets to answer a question with the broad topic of weather phenomena (see below) for the region of Ann Arbor, Michigan, United States, or United States more broadly.
You can merge these datasets with data from different regions if you like! For instance, you might want to compare Ann Arbor, Michigan, United States to Ann Arbor, USA. In that case at least one source file must be about Ann Arbor, Michigan, United States.
You are welcome to choose datasets at your discretion, but keep in mind they will be shared with your peers, so choose appropriate datasets. Sensitive, confidential, illicit, and proprietary materials are not good choices for datasets for this assignment. You are welcome to upload datasets of your own as well, and link to them using a third party repository such as github, bitbucket, pastebin, etc. Please be aware of the Coursera terms of service with respect to intellectual property.
Also, you are welcome to preserve data in its original language, but for the purposes of grading you should provide english translations. You are welcome to provide multiple visuals in different languages if you would like!
As this assignment is for the whole course, you must incorporate principles discussed in the first week, such as having as high data-ink ratio (Tufte) and aligning with Cairo’s principles of truth, beauty, function, and insight.
Here are the assignment instructions:
State the region and the domain category that your data sets are about (e.g., Ann Arbor, Michigan, United States and weather phenomena).
You must state a question about the domain category and region that you identified as being interesting.
You must provide at least two links to available datasets. These could be links to files such as CSV or Excel files, or links to websites which might have data in tabular form, such as Wikipedia pages.
You must upload an image which addresses the research question you stated. In addition to addressing the question, this visual should follow Cairo's principles of truthfulness, functionality, beauty, and insightfulness.
You must contribute a short (1-2 paragraph) written justification of how your visualization addresses your stated research question.
What do we mean by weather phenomena? For this category you might want to consider seasonal changes, natural disasters, or historical trends.
Tips
Wikipedia is an excellent source of data, and I strongly encourage you to explore it for new data sources.
Many governments run open data initiatives at the city, region, and country levels, and these are wonderful resources for localized data sources.
Several international agencies, such as the United Nations, the World Bank, the Global Open Data Index are other great places to look for data.
This assignment requires you to convert and clean datafiles. Check out the discussion forums for tips on how to do this from various sources, and share your successes with your fellow students!
Example
Looking for an example? Here's what our course assistant put together for the Ann Arbor, MI, USA area using sports and athletics as the topic. Example Solution File
"""
# pip3 install mplleaflet
from matplotlib import cm
from matplotlib.ticker import FuncFormatter
import mpl_toolkits.axes_grid1.inset_locator as mpl_il
import pandas as pd
import mplleaflet
from matplotlib.artist import Artist
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import sys
"""
1. region and domain
region: China, Japan, Korea
domain: Air transport, passengers carried (1970~2017)
2. Create a research question about the domain category and region that you identified.
Since 1970, How many passengers carried by air transport in China, Japan and Korea.
3. Links
china http://data.un.org/Data.aspx?d=WDI&f=Indicator_Code:IS.AIR.PSGR;Country_Code:CHN;Time_Code:1970,1971,1972,1973,1974,1975,1976,1977,1978,1979,1980,1981,1982,1983,1984,1985,1986,1987,1988,1989,1990,1991,1992,1993,1994,1995,1996,1997,1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017&c=0,1,2,3,4,5&s=Country_Name:asc,Year:desc&v=1
japan http://data.un.org/Data.aspx?d=WDI&f=Indicator_Code:IS.AIR.PSGR;Country_Code:JPN;Time_Code:1970,1971,1972,1973,1974,1975,1976,1977,1978,1979,1980,1981,1982,1983,1984,1985,1986,1987,1988,1989,1990,1991,1992,1993,1994,1995,1996,1997,1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017&c=0,1,2,3,4,5&s=Country_Name:asc,Year:desc&v=1
korea http://data.un.org/Data.aspx?d=WDI&f=Indicator_Code:IS.AIR.PSGR;Country_Code:KOR;Time_Code:1970,1971,1972,1973,1974,1975,1976,1977,1978,1979,1980,1981,1982,1983,1984,1985,1986,1987,1988,1989,1990,1991,1992,1993,1994,1995,1996,1997,1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017&c=0,1,2,3,4,5&s=Country_Name:asc,Year:desc&v=1
4.Provide a short (1-2 paragraphs) justification of how your visual addresses your research question.
This visualization was concerned with answering the question how many people carried by air transport. Due to the large difference in passenger numbers by country, the y-axis of the graph is displayed as 'e' by default, which is expressed in millions(M) for better readability. The x-axis of the graph used vertical lines to represent the last 2017 years of data. And I used the dot grid to guide you through the yearly data.
This graph shows the increase in aircraft use by Korean, Chinese and Japanese passengers from 1970 to 2017. Korean and Japanese passenger growth has been modest, while Chinese passengers have increased rapidly since 2000.
5.Describe your design choices for your visual in regards to Cairo's principle of truthfulness.
Describe your design choices for your visual in regards to Cairo's principle of beauty.
Describe your design choices for your visual in regards to Cairo's principle of functionality.
Describe your design choices for your visual in regards to Cairo's principle of insightfulness.
truthfulness : The number of passengers in different countries is expressed as it is.
beauty : I used harmonious colors.
functionality : Different country colors and legends were used to distinguish them.
insightfulness : It clearly shows the difference between the number of Chinese passengers and the number of passengers in other countries.
"""
# dataframe 에서 필요없는 컬럼, 로우 제거
def remove_extra_cols_rows(df):
df = df[df['Indicator Code'] != 'footnoteSeqID']
df = df[df['Indicator Code'] != '1']
# axis=1:컬럼, 0:로우
df.drop(['Indicator Code', 'Time Code',
'Value Footnotes'], axis=1, inplace=True)
return df
# 너쿠 커서 자연상수(e) 표시를 M(million,100만) 단위로 포맷팅
# https://matplotlib.org/examples/pylab_examples/custom_ticker1.html
def millions(x, pos):
# return '%1.1fM' % (x*1e-6)
return '%1dM' % (x*1e-6)
plt.figure()
plt.style.use('seaborn-colorblind')
df1 = pd.read_csv('UNdata_Export_china.csv')
df1 = remove_extra_cols_rows(df1)
df2 = pd.read_csv('UNdata_Export_japan.csv')
df2 = remove_extra_cols_rows(df2)
df3 =
|
pd.read_csv('UNdata_Export_korea.csv')
|
pandas.read_csv
|
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index=pd.Index(['min', 'max'], dtype='object'), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
index=pd.Index(['min', 'max'], dtype='object'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, fill_value=0.),
pd.DataFrame(
np.array([
[10., 13., 10., 0.],
[12., 14., 12., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(to_timedelta=True)),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
columns=wrapper.columns
) * day_dt
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame(
np.array([
[10., 10.],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True, group_by=False)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, group_by=group_by),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g1'].reduce(min_max_reduce_nb, returns_array=True),
pd.Series([10., 14.], name='g1')
)
pd.testing.assert_frame_equal(
mapped_array_grouped[['g1']].reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame([[10.], [14.]], columns=pd.Index(['g1'], dtype='object'))
)
def test_reduce_to_idx_array(self):
@njit
def idxmin_idxmax_reduce_nb(col, a):
return np.array([np.argmin(a), np.argmax(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['min', 'max'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.DataFrame(
{
'a': ['x', 'z'],
'b': ['x', 'y'],
'c': ['z', 'x'],
'd': [np.nan, np.nan]
},
index=pd.Index(['min', 'max'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 0, 2, -1],
[2, 1, 0, -1]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 2],
[1, 0]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_nth(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth(0),
pd.Series(np.array([10., 13., 12., np.nan]), index=wrapper.columns).rename('nth')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth(-1),
pd.Series(np.array([12., 13., 10., np.nan]), index=wrapper.columns).rename('nth')
)
with pytest.raises(Exception):
_ = mapped_array.nth(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth(0),
pd.Series(np.array([10., 12.]), index=pd.Index(['g1', 'g2'], dtype='object')).rename('nth')
)
def test_nth_index(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth_index(0),
pd.Series(
np.array(['x', 'x', 'x', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth_index(-1),
pd.Series(
np.array(['z', 'z', 'z', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
with pytest.raises(Exception):
_ = mapped_array.nth_index(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth_index(0),
pd.Series(
np.array(['x', 'x'], dtype='object'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('nth_index')
)
def test_min(self):
assert mapped_array['a'].min() == mapped_array['a'].to_pd().min()
pd.testing.assert_series_equal(
mapped_array.min(),
mapped_array.to_pd().min().rename('min')
)
pd.testing.assert_series_equal(
mapped_array_grouped.min(),
pd.Series([10., 10.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('min')
)
def test_max(self):
assert mapped_array['a'].max() == mapped_array['a'].to_pd().max()
pd.testing.assert_series_equal(
mapped_array.max(),
mapped_array.to_pd().max().rename('max')
)
pd.testing.assert_series_equal(
mapped_array_grouped.max(),
pd.Series([14., 12.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('max')
)
def test_mean(self):
assert mapped_array['a'].mean() == mapped_array['a'].to_pd().mean()
pd.testing.assert_series_equal(
mapped_array.mean(),
mapped_array.to_pd().mean().rename('mean')
)
pd.testing.assert_series_equal(
mapped_array_grouped.mean(),
pd.Series([12.166667, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('mean')
)
def test_median(self):
assert mapped_array['a'].median() == mapped_array['a'].to_pd().median()
pd.testing.assert_series_equal(
mapped_array.median(),
mapped_array.to_pd().median().rename('median')
)
pd.testing.assert_series_equal(
mapped_array_grouped.median(),
pd.Series([12.5, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('median')
)
def test_std(self):
assert mapped_array['a'].std() == mapped_array['a'].to_pd().std()
pd.testing.assert_series_equal(
mapped_array.std(),
mapped_array.to_pd().std().rename('std')
)
pd.testing.assert_series_equal(
mapped_array.std(ddof=0),
mapped_array.to_pd().std(ddof=0).rename('std')
)
pd.testing.assert_series_equal(
mapped_array_grouped.std(),
pd.Series([1.4719601443879746, 1.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('std')
)
def test_sum(self):
assert mapped_array['a'].sum() == mapped_array['a'].to_pd().sum()
pd.testing.assert_series_equal(
mapped_array.sum(),
mapped_array.to_pd().sum().rename('sum')
)
pd.testing.assert_series_equal(
mapped_array_grouped.sum(),
pd.Series([73.0, 33.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('sum')
)
def test_count(self):
assert mapped_array['a'].count() == mapped_array['a'].to_pd().count()
pd.testing.assert_series_equal(
mapped_array.count(),
mapped_array.to_pd().count().rename('count')
)
pd.testing.assert_series_equal(
mapped_array_grouped.count(),
pd.Series([6, 3], index=pd.Index(['g1', 'g2'], dtype='object')).rename('count')
)
def test_idxmin(self):
assert mapped_array['a'].idxmin() == mapped_array['a'].to_pd().idxmin()
pd.testing.assert_series_equal(
mapped_array.idxmin(),
mapped_array.to_pd().idxmin().rename('idxmin')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmin(),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmin')
)
def test_idxmax(self):
assert mapped_array['a'].idxmax() == mapped_array['a'].to_pd().idxmax()
pd.testing.assert_series_equal(
mapped_array.idxmax(),
mapped_array.to_pd().idxmax().rename('idxmax')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmax(),
pd.Series(
np.array(['y', 'x'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmax')
)
def test_describe(self):
pd.testing.assert_series_equal(
mapped_array['a'].describe(),
mapped_array['a'].to_pd().describe()
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=None),
mapped_array.to_pd().describe(percentiles=None)
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=[]),
mapped_array.to_pd().describe(percentiles=[])
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=np.arange(0, 1, 0.1)),
mapped_array.to_pd().describe(percentiles=np.arange(0, 1, 0.1))
)
pd.testing.assert_frame_equal(
mapped_array_grouped.describe(),
pd.DataFrame(
np.array([
[6., 3.],
[12.16666667, 11.],
[1.47196014, 1.],
[10., 10.],
[11.25, 10.5],
[12.5, 11.],
[13., 11.5],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object'),
index=mapped_array.describe().index
)
)
def test_value_counts(self):
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(),
pd.Series(
np.array([1, 1, 1]),
index=pd.Float64Index([10.0, 11.0, 12.0], dtype='float64'),
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(mapping=mapping),
pd.Series(
np.array([1, 1, 1]),
index=pd.Index(['test_10.0', 'test_11.0', 'test_12.0'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.value_counts(),
pd.DataFrame(
np.array([
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 2, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.value_counts(),
pd.DataFrame(
np.array([
[1, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
mapped_array2 = mapped_array.replace(mapped_arr=[4, 4, 3, 2, np.nan, 4, 3, 2, 1])
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=False),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 3.0, 2.0, 1.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([1.0, 2.0, 3.0, 4.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, ascending=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0]
]),
index=pd.Float64Index([1.0, np.nan, 2.0, 3.0, 4.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True),
pd.DataFrame(
np.array([
[0.2222222222222222, 0.1111111111111111, 0.0, 0.0],
[0.0, 0.1111111111111111, 0.1111111111111111, 0.0],
[0.1111111111111111, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.1111111111111111, 0.0, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True, dropna=True),
pd.DataFrame(
np.array([
[0.25, 0.125, 0.0, 0.0],
[0.0, 0.125, 0.125, 0.0],
[0.125, 0.0, 0.125, 0.0],
[0.0, 0.0, 0.125, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0], dtype='float64'),
columns=wrapper.columns
)
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
ma = mapped_array_nosort
ma_grouped = mapped_array_nosort_grouped
else:
ma = mapped_array
ma_grouped = mapped_array_grouped
np.testing.assert_array_equal(
ma['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
ma['a'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
np.testing.assert_array_equal(
ma['b'].id_arr,
np.array([3, 4, 5])
)
np.testing.assert_array_equal(
ma['b'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'a']].id_arr,
np.array([0, 1, 2, 0, 1, 2])
)
np.testing.assert_array_equal(
ma[['a', 'a']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'b']].id_arr,
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
ma[['a', 'b']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = ma.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped['g1'].wrapper.ndim == 2
assert ma_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert ma_grouped['g2'].wrapper.ndim == 2
assert ma_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped[['g1']].wrapper.ndim == 2
assert ma_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert ma_grouped[['g1', 'g2']].wrapper.ndim == 2
assert ma_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_magic(self):
a = vbt.MappedArray(
wrapper,
records_arr['some_field1'],
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
a_inv = vbt.MappedArray(
wrapper,
records_arr['some_field1'][::-1],
records_arr['col'][::-1],
id_arr=records_arr['id'][::-1],
idx_arr=records_arr['idx'][::-1]
)
b = records_arr['some_field2']
a_bool = vbt.MappedArray(
wrapper,
records_arr['some_field1'] > np.mean(records_arr['some_field1']),
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
b_bool = records_arr['some_field2'] > np.mean(records_arr['some_field2'])
assert a ** a == a ** 2
with pytest.raises(Exception):
_ = a * a_inv
# binary ops
# comparison ops
np.testing.assert_array_equal((a == b).values, a.values == b)
np.testing.assert_array_equal((a != b).values, a.values != b)
np.testing.assert_array_equal((a < b).values, a.values < b)
np.testing.assert_array_equal((a > b).values, a.values > b)
np.testing.assert_array_equal((a <= b).values, a.values <= b)
np.testing.assert_array_equal((a >= b).values, a.values >= b)
# arithmetic ops
np.testing.assert_array_equal((a + b).values, a.values + b)
np.testing.assert_array_equal((a - b).values, a.values - b)
np.testing.assert_array_equal((a * b).values, a.values * b)
np.testing.assert_array_equal((a ** b).values, a.values ** b)
np.testing.assert_array_equal((a % b).values, a.values % b)
np.testing.assert_array_equal((a // b).values, a.values // b)
np.testing.assert_array_equal((a / b).values, a.values / b)
# __r*__ is only called if the left object does not have an __*__ method
np.testing.assert_array_equal((10 + a).values, 10 + a.values)
np.testing.assert_array_equal((10 - a).values, 10 - a.values)
np.testing.assert_array_equal((10 * a).values, 10 * a.values)
np.testing.assert_array_equal((10 ** a).values, 10 ** a.values)
np.testing.assert_array_equal((10 % a).values, 10 % a.values)
np.testing.assert_array_equal((10 // a).values, 10 // a.values)
np.testing.assert_array_equal((10 / a).values, 10 / a.values)
# mask ops
np.testing.assert_array_equal((a_bool & b_bool).values, a_bool.values & b_bool)
np.testing.assert_array_equal((a_bool | b_bool).values, a_bool.values | b_bool)
np.testing.assert_array_equal((a_bool ^ b_bool).values, a_bool.values ^ b_bool)
np.testing.assert_array_equal((True & a_bool).values, True & a_bool.values)
np.testing.assert_array_equal((True | a_bool).values, True | a_bool.values)
np.testing.assert_array_equal((True ^ a_bool).values, True ^ a_bool.values)
# unary ops
np.testing.assert_array_equal((-a).values, -a.values)
np.testing.assert_array_equal((+a).values, +a.values)
np.testing.assert_array_equal((abs(-a)).values, abs((-a.values)))
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Mean', 'Std', 'Min', 'Median', 'Max', 'Min Index', 'Max Index'
], dtype='object')
pd.testing.assert_series_equal(
mapped_array.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
2.25, 11.777777777777779, 0.859116756396542, 11.0, 11.666666666666666, 12.666666666666666
],
index=stats_index[:-2],
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
3, 11.0, 1.0, 10.0, 11.0, 12.0, 'x', 'z'
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
6, 12.166666666666666, 1.4719601443879746, 10.0, 12.5, 14.0, 'x', 'y'
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
def test_stats_mapping(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Value Counts: test_10.0',
'Value Counts: test_11.0', 'Value Counts: test_12.0',
'Value Counts: test_13.0', 'Value Counts: test_14.0'
], dtype='object')
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
2.25, 0.5, 0.5, 0.5, 0.5, 0.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='a'),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
3, 1, 1, 1, 0, 0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
6, 1, 1, 1, 2, 1
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
mapped_array.stats(settings=dict(mapping=mapping))
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mp_mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 9)
pd.testing.assert_index_equal(stats_df.index, mp_mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# base.py ############# #
class TestRecords:
def test_config(self, tmp_path):
assert vbt.Records.loads(records['a'].dumps()) == records['a']
assert vbt.Records.loads(records.dumps()) == records
records.save(tmp_path / 'records')
assert vbt.Records.load(tmp_path / 'records') == records
def test_records(self):
pd.testing.assert_frame_equal(
records.records,
pd.DataFrame.from_records(records_arr)
)
def test_recarray(self):
np.testing.assert_array_equal(records['a'].recarray.some_field1, records['a'].values['some_field1'])
np.testing.assert_array_equal(records.recarray.some_field1, records.values['some_field1'])
def test_records_readable(self):
pd.testing.assert_frame_equal(
records.records_readable,
pd.DataFrame([
[0, 'a', 'x', 10.0, 21.0], [1, 'a', 'y', 11.0, 20.0], [2, 'a', 'z', 12.0, 19.0],
[3, 'b', 'x', 13.0, 18.0], [4, 'b', 'y', 14.0, 17.0], [5, 'b', 'z', 13.0, 18.0],
[6, 'c', 'x', 12.0, 19.0], [7, 'c', 'y', 11.0, 20.0], [8, 'c', 'z', 10.0, 21.0]
], columns=pd.Index(['Id', 'Column', 'Timestamp', 'some_field1', 'some_field2'], dtype='object'))
)
def test_is_sorted(self):
assert records.is_sorted()
assert records.is_sorted(incl_id=True)
assert not records_nosort.is_sorted()
assert not records_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert records.sort().is_sorted()
assert records.sort().is_sorted(incl_id=True)
assert records.sort(incl_id=True).is_sorted(incl_id=True)
assert records_nosort.sort().is_sorted()
assert records_nosort.sort().is_sorted(incl_id=True)
assert records_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = records['a'].values['some_field1'] >= records['a'].values['some_field1'].mean()
record_arrays_close(
records['a'].apply_mask(mask_a).values,
np.array([
(1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
mask = records.values['some_field1'] >= records.values['some_field1'].mean()
filtered = records.apply_mask(mask)
record_arrays_close(
filtered.values,
np.array([
(2, 0, 2, 12., 19.), (3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.),
(5, 1, 2, 13., 18.), (6, 2, 0, 12., 19.)
], dtype=example_dt)
)
assert records_grouped.apply_mask(mask).wrapper == records_grouped.wrapper
def test_map_field(self):
np.testing.assert_array_equal(
records['a'].map_field('some_field1').values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
records.map_field('some_field1').values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
assert records_grouped.map_field('some_field1').wrapper == \
records.map_field('some_field1', group_by=group_by).wrapper
assert records_grouped.map_field('some_field1', group_by=False).wrapper.grouper.group_by is None
def test_map(self):
@njit
def map_func_nb(record):
return record['some_field1'] + record['some_field2']
np.testing.assert_array_equal(
records['a'].map(map_func_nb).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map(map_func_nb).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map(map_func_nb).wrapper == \
records.map(map_func_nb, group_by=group_by).wrapper
assert records_grouped.map(map_func_nb, group_by=False).wrapper.grouper.group_by is None
def test_map_array(self):
arr = records_arr['some_field1'] + records_arr['some_field2']
np.testing.assert_array_equal(
records['a'].map_array(arr[:3]).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map_array(arr).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map_array(arr).wrapper == \
records.map_array(arr, group_by=group_by).wrapper
assert records_grouped.map_array(arr, group_by=False).wrapper.grouper.group_by is None
def test_apply(self):
@njit
def cumsum_apply_nb(records):
return np.cumsum(records['some_field1'])
np.testing.assert_array_equal(
records['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
records.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert records_grouped.apply(cumsum_apply_nb).wrapper == \
records.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert records_grouped.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_count(self):
assert records['a'].count() == 3
pd.testing.assert_series_equal(
records.count(),
pd.Series(
np.array([3, 3, 3, 0]),
index=wrapper.columns
).rename('count')
)
assert records_grouped['g1'].count() == 6
pd.testing.assert_series_equal(
records_grouped.count(),
pd.Series(
np.array([6, 3]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('count')
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
r = records_nosort
r_grouped = records_nosort_grouped
else:
r = records
r_grouped = records_grouped
record_arrays_close(
r['a'].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
pd.testing.assert_index_equal(
r['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
record_arrays_close(
r[['a', 'a']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(0, 1, 0, 10., 21.), (1, 1, 1, 11., 20.), (2, 1, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
record_arrays_close(
r[['a', 'b']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.), (5, 1, 2, 13., 18.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = r.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped['g1'].wrapper.ndim == 2
assert r_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert r_grouped['g2'].wrapper.ndim == 2
assert r_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped[['g1']].wrapper.ndim == 2
assert r_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert r_grouped[['g1', 'g2']].wrapper.ndim == 2
assert r_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_filtering(self):
filtered_records = vbt.Records(wrapper, records_arr[[0, -1]])
record_arrays_close(
filtered_records.values,
np.array([(0, 0, 0, 10., 21.), (8, 2, 2, 10., 21.)], dtype=example_dt)
)
# a
record_arrays_close(
filtered_records['a'].values,
np.array([(0, 0, 0, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['a'].map_field('some_field1').id_arr,
np.array([0])
)
assert filtered_records['a'].map_field('some_field1').min() == 10.
assert filtered_records['a'].count() == 1.
# b
record_arrays_close(
filtered_records['b'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['b'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['b'].map_field('some_field1').min())
assert filtered_records['b'].count() == 0.
# c
record_arrays_close(
filtered_records['c'].values,
np.array([(8, 0, 2, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['c'].map_field('some_field1').id_arr,
np.array([8])
)
assert filtered_records['c'].map_field('some_field1').min() == 10.
assert filtered_records['c'].count() == 1.
# d
record_arrays_close(
filtered_records['d'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['d'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['d'].map_field('some_field1').min())
assert filtered_records['d'].count() == 0.
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count'
], dtype='object')
pd.testing.assert_series_equal(
records.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 2.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
records.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 3
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
records.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 6
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c')
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records.stats(column='g2', group_by=group_by)
)
stats_df = records.stats(agg_func=None)
assert stats_df.shape == (4, 4)
pd.testing.assert_index_equal(stats_df.index, records.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# ranges.py ############# #
ts = pd.DataFrame({
'a': [1, -1, 3, -1, 5, -1],
'b': [-1, -1, -1, 4, 5, 6],
'c': [1, 2, 3, -1, -1, -1],
'd': [-1, -1, -1, -1, -1, -1]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
ranges = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days'))
ranges_grouped = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestRanges:
def test_mapped_fields(self):
for name in range_dt.names:
np.testing.assert_array_equal(
getattr(ranges, name).values,
ranges.values[name]
)
def test_from_ts(self):
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 1, 1), (1, 0, 2, 3, 1), (2, 0, 4, 5, 1), (3, 1, 3, 5, 0), (4, 2, 0, 3, 1)
], dtype=range_dt)
)
assert ranges.wrapper.freq == day_dt
pd.testing.assert_index_equal(
ranges_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = ranges.records_readable
np.testing.assert_array_equal(
records_readable['Range Id'].values,
np.array([
0, 1, 2, 3, 4
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-01T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Closed', 'Closed', 'Closed', 'Open', 'Closed'
])
)
def test_to_mask(self):
pd.testing.assert_series_equal(
ranges['a'].to_mask(),
ts['a'] != -1
)
pd.testing.assert_frame_equal(
ranges.to_mask(),
ts != -1
)
pd.testing.assert_frame_equal(
ranges_grouped.to_mask(),
pd.DataFrame(
[
[True, True],
[False, True],
[True, True],
[True, False],
[True, False],
[True, False]
],
index=ts.index,
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_duration(self):
np.testing.assert_array_equal(
ranges['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_equal(
ranges.duration.values,
np.array([1, 1, 1, 3, 3])
)
def test_avg_duration(self):
assert ranges['a'].avg_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.avg_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('avg_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.avg_duration(),
pd.Series(
np.array([129600000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_duration')
)
def test_max_duration(self):
assert ranges['a'].max_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.max_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('max_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.max_duration(),
pd.Series(
np.array([259200000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_duration')
)
def test_coverage(self):
assert ranges['a'].coverage() == 0.5
pd.testing.assert_series_equal(
ranges.coverage(),
pd.Series(
np.array([0.5, 0.5, 0.5, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(),
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage()
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True),
pd.Series(
np.array([1.0, 1.0, 1.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True, normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
pd.Series(
np.array([0.4166666666666667, 0.25]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
ranges_grouped.replace(records_arr=np.repeat(ranges_grouped.values, 2)).coverage()
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Coverage', 'Overlap Coverage',
'Total Records', 'Duration: Min', 'Duration: Median', 'Duration: Max',
'Duration: Mean', 'Duration: Std'
], dtype='object')
pd.testing.assert_series_equal(
ranges.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'), 1.25, pd.Timedelta('2 days 08:00:00'),
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('2 days 08:00:00'),
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('0 days 00:00:00')
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
ranges.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'), 3, pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('0 days 00:00:00')
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
ranges.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('5 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), 4, pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('1 days 12:00:00'), pd.Timedelta('1 days 00:00:00')
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
ranges['c'].stats(),
ranges.stats(column='c')
)
pd.testing.assert_series_equal(
ranges['c'].stats(),
ranges.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
ranges_grouped['g2'].stats(),
ranges_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
ranges_grouped['g2'].stats(),
ranges.stats(column='g2', group_by=group_by)
)
stats_df = ranges.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, ranges.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# drawdowns.py ############# #
ts2 = pd.DataFrame({
'a': [2, 1, 3, 1, 4, 1],
'b': [1, 2, 1, 3, 1, 4],
'c': [1, 2, 3, 2, 1, 2],
'd': [1, 2, 3, 4, 5, 6]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
drawdowns = vbt.Drawdowns.from_ts(ts2, wrapper_kwargs=dict(freq='1 days'))
drawdowns_grouped = vbt.Drawdowns.from_ts(ts2, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestDrawdowns:
def test_mapped_fields(self):
for name in drawdown_dt.names:
np.testing.assert_array_equal(
getattr(drawdowns, name).values,
drawdowns.values[name]
)
def test_ts(self):
pd.testing.assert_frame_equal(
drawdowns.ts,
ts2
)
pd.testing.assert_series_equal(
drawdowns['a'].ts,
ts2['a']
)
pd.testing.assert_frame_equal(
drawdowns_grouped['g1'].ts,
ts2[['a', 'b']]
)
assert drawdowns.replace(ts=None)['a'].ts is None
def test_from_ts(self):
record_arrays_close(
drawdowns.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1),
(2, 0, 4, 5, 5, 5, 4.0, 1.0, 1.0, 0), (3, 1, 1, 2, 2, 3, 2.0, 1.0, 3.0, 1),
(4, 1, 3, 4, 4, 5, 3.0, 1.0, 4.0, 1), (5, 2, 2, 3, 4, 5, 3.0, 1.0, 2.0, 0)
], dtype=drawdown_dt)
)
assert drawdowns.wrapper.freq == day_dt
pd.testing.assert_index_equal(
drawdowns_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = drawdowns.records_readable
np.testing.assert_array_equal(
records_readable['Drawdown Id'].values,
np.array([
0, 1, 2, 3, 4, 5
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Peak Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-04T00:00:00.000000000', '2020-01-03T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Valley Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-05T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-03T00:00:00.000000000', '2020-01-05T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Peak Value'].values,
np.array([
2., 3., 4., 2., 3., 3.
])
)
np.testing.assert_array_equal(
records_readable['Valley Value'].values,
np.array([
1., 1., 1., 1., 1., 1.
])
)
np.testing.assert_array_equal(
records_readable['End Value'].values,
np.array([
3., 4., 1., 3., 4., 2.
])
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Recovered', 'Recovered', 'Active', 'Recovered', 'Recovered', 'Active'
])
)
def test_drawdown(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].drawdown.values,
np.array([-0.5, -0.66666667, -0.75])
)
np.testing.assert_array_almost_equal(
drawdowns.drawdown.values,
np.array([-0.5, -0.66666667, -0.75, -0.5, -0.66666667, -0.66666667])
)
pd.testing.assert_frame_equal(
drawdowns.drawdown.to_pd(),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[-0.5, np.nan, np.nan, np.nan],
[np.nan, -0.5, np.nan, np.nan],
[-0.66666669, np.nan, np.nan, np.nan],
[-0.75, -0.66666669, -0.66666669, np.nan]
]),
index=ts2.index,
columns=ts2.columns
)
)
def test_avg_drawdown(self):
assert drawdowns['a'].avg_drawdown() == -0.6388888888888888
pd.testing.assert_series_equal(
drawdowns.avg_drawdown(),
pd.Series(
np.array([-0.63888889, -0.58333333, -0.66666667, np.nan]),
index=wrapper.columns
).rename('avg_drawdown')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_drawdown(),
pd.Series(
np.array([-0.6166666666666666, -0.6666666666666666]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_drawdown')
)
def test_max_drawdown(self):
assert drawdowns['a'].max_drawdown() == -0.75
pd.testing.assert_series_equal(
drawdowns.max_drawdown(),
pd.Series(
np.array([-0.75, -0.66666667, -0.66666667, np.nan]),
index=wrapper.columns
).rename('max_drawdown')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_drawdown(),
pd.Series(
np.array([-0.75, -0.6666666666666666]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_drawdown')
)
def test_recovery_return(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_return.values,
np.array([2., 3., 0.])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_return.values,
np.array([2., 3., 0., 2., 3., 1.])
)
pd.testing.assert_frame_equal(
drawdowns.recovery_return.to_pd(),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[2.0, np.nan, np.nan, np.nan],
[np.nan, 2.0, np.nan, np.nan],
[3.0, np.nan, np.nan, np.nan],
[0.0, 3.0, 1.0, np.nan]
]),
index=ts2.index,
columns=ts2.columns
)
)
def test_avg_recovery_return(self):
assert drawdowns['a'].avg_recovery_return() == 1.6666666666666667
pd.testing.assert_series_equal(
drawdowns.avg_recovery_return(),
pd.Series(
np.array([1.6666666666666667, 2.5, 1.0, np.nan]),
index=wrapper.columns
).rename('avg_recovery_return')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_recovery_return(),
pd.Series(
np.array([2.0, 1.0]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_recovery_return')
)
def test_max_recovery_return(self):
assert drawdowns['a'].max_recovery_return() == 3.0
pd.testing.assert_series_equal(
drawdowns.max_recovery_return(),
pd.Series(
np.array([3.0, 3.0, 1.0, np.nan]),
index=wrapper.columns
).rename('max_recovery_return')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_recovery_return(),
pd.Series(
np.array([3.0, 1.0]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_recovery_return')
)
def test_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_almost_equal(
drawdowns.duration.values,
np.array([1, 1, 1, 1, 1, 3])
)
def test_avg_duration(self):
assert drawdowns['a'].avg_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.avg_duration(),
pd.Series(
np.array([86400000000000, 86400000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('avg_duration')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_duration(),
pd.Series(
np.array([86400000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_duration')
)
def test_max_duration(self):
assert drawdowns['a'].max_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.max_duration(),
pd.Series(
np.array([86400000000000, 86400000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('max_duration')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_duration(),
pd.Series(
np.array([86400000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_duration')
)
def test_coverage(self):
assert drawdowns['a'].coverage() == 0.5
pd.testing.assert_series_equal(
drawdowns.coverage(),
pd.Series(
np.array([0.5, 0.3333333333333333, 0.5, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
drawdowns_grouped.coverage(),
pd.Series(
np.array([0.4166666666666667, 0.25]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('coverage')
)
def test_decline_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].decline_duration.values,
np.array([1., 1., 1.])
)
np.testing.assert_array_almost_equal(
drawdowns.decline_duration.values,
np.array([1., 1., 1., 1., 1., 2.])
)
def test_recovery_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_duration.values,
np.array([1, 1, 0])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_duration.values,
np.array([1, 1, 0, 1, 1, 1])
)
def test_recovery_duration_ratio(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_duration_ratio.values,
np.array([1., 1., 0.])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_duration_ratio.values,
np.array([1., 1., 0., 1., 1., 0.5])
)
def test_active_records(self):
assert isinstance(drawdowns.active, vbt.Drawdowns)
assert drawdowns.active.wrapper == drawdowns.wrapper
record_arrays_close(
drawdowns['a'].active.values,
np.array([
(2, 0, 4, 5, 5, 5, 4., 1., 1., 0)
], dtype=drawdown_dt)
)
record_arrays_close(
drawdowns['a'].active.values,
drawdowns.active['a'].values
)
record_arrays_close(
drawdowns.active.values,
np.array([
(2, 0, 4, 5, 5, 5, 4.0, 1.0, 1.0, 0), (5, 2, 2, 3, 4, 5, 3.0, 1.0, 2.0, 0)
], dtype=drawdown_dt)
)
def test_recovered_records(self):
assert isinstance(drawdowns.recovered, vbt.Drawdowns)
assert drawdowns.recovered.wrapper == drawdowns.wrapper
record_arrays_close(
drawdowns['a'].recovered.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1)
], dtype=drawdown_dt)
)
record_arrays_close(
drawdowns['a'].recovered.values,
drawdowns.recovered['a'].values
)
record_arrays_close(
drawdowns.recovered.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1),
(3, 1, 1, 2, 2, 3, 2.0, 1.0, 3.0, 1), (4, 1, 3, 4, 4, 5, 3.0, 1.0, 4.0, 1)
], dtype=drawdown_dt)
)
def test_active_drawdown(self):
assert drawdowns['a'].active_drawdown() == -0.75
pd.testing.assert_series_equal(
drawdowns.active_drawdown(),
pd.Series(
np.array([-0.75, np.nan, -0.3333333333333333, np.nan]),
index=wrapper.columns
).rename('active_drawdown')
)
with pytest.raises(Exception):
drawdowns_grouped.active_drawdown()
def test_active_duration(self):
assert drawdowns['a'].active_duration() == np.timedelta64(86400000000000)
pd.testing.assert_series_equal(
drawdowns.active_duration(),
pd.Series(
np.array([86400000000000, 'NaT', 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('active_duration')
)
with pytest.raises(Exception):
drawdowns_grouped.active_duration()
def test_active_recovery(self):
assert drawdowns['a'].active_recovery() == 0.
pd.testing.assert_series_equal(
drawdowns.active_recovery(),
pd.Series(
np.array([0., np.nan, 0.5, np.nan]),
index=wrapper.columns
).rename('active_recovery')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery()
def test_active_recovery_return(self):
assert drawdowns['a'].active_recovery_return() == 0.
pd.testing.assert_series_equal(
drawdowns.active_recovery_return(),
pd.Series(
np.array([0., np.nan, 1., np.nan]),
index=wrapper.columns
).rename('active_recovery_return')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery_return()
def test_active_recovery_duration(self):
assert drawdowns['a'].active_recovery_duration() == pd.Timedelta('0 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.active_recovery_duration(),
pd.Series(
np.array([0, 'NaT', 86400000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('active_recovery_duration')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery_duration()
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Coverage [%]', 'Total Records',
'Total Recovered Drawdowns', 'Total Active Drawdowns',
'Active Drawdown [%]', 'Active Duration', 'Active Recovery [%]',
'Active Recovery Return [%]', 'Active Recovery Duration',
'Max Drawdown [%]', 'Avg Drawdown [%]', 'Max Drawdown Duration',
'Avg Drawdown Duration', 'Max Recovery Return [%]',
'Avg Recovery Return [%]', 'Max Recovery Duration',
'Avg Recovery Duration', 'Avg Recovery Duration Ratio'
], dtype='object')
pd.testing.assert_series_equal(
drawdowns.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 44.444444444444436, 1.5, 1.0, 0.5,
54.166666666666664, pd.Timedelta('2 days 00:00:00'), 25.0, 50.0,
pd.Timedelta('0 days 12:00:00'), 66.66666666666666, 58.33333333333333,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(settings=dict(incl_active=True)),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 44.444444444444436, 1.5, 1.0, 0.5,
54.166666666666664, pd.Timedelta('2 days 00:00:00'), 25.0, 50.0,
pd.Timedelta('0 days 12:00:00'), 69.44444444444444, 62.962962962962955,
pd.Timedelta('1 days 16:00:00'), pd.Timedelta('1 days 16:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 50.0, 3, 2, 1, 75.0, pd.Timedelta('1 days 00:00:00'),
0.0, 0.0, pd.Timedelta('0 days 00:00:00'), 66.66666666666666, 58.33333333333333,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 41.66666666666667, 5, 4, 1, 66.66666666666666,
58.33333333333333, pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
300.0, 250.0, pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=pd.Index([
'Start', 'End', 'Period', 'Coverage [%]', 'Total Records',
'Total Recovered Drawdowns', 'Total Active Drawdowns',
'Max Drawdown [%]', 'Avg Drawdown [%]', 'Max Drawdown Duration',
'Avg Drawdown Duration', 'Max Recovery Return [%]',
'Avg Recovery Return [%]', 'Max Recovery Duration',
'Avg Recovery Duration', 'Avg Recovery Duration Ratio'
], dtype='object'),
name='g1'
)
)
pd.testing.assert_series_equal(
drawdowns['c'].stats(),
drawdowns.stats(column='c')
)
pd.testing.assert_series_equal(
drawdowns['c'].stats(),
drawdowns.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
drawdowns_grouped['g2'].stats(),
drawdowns_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
drawdowns_grouped['g2'].stats(),
drawdowns.stats(column='g2', group_by=group_by)
)
stats_df = drawdowns.stats(agg_func=None)
assert stats_df.shape == (4, 21)
pd.testing.assert_index_equal(stats_df.index, drawdowns.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# orders.py ############# #
close = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6),
datetime(2020, 1, 7),
datetime(2020, 1, 8)
]).vbt.tile(4, keys=['a', 'b', 'c', 'd'])
size = np.full(close.shape, np.nan, dtype=np.float_)
size[:, 0] = [1, 0.1, -1, -0.1, np.nan, 1, -1, 2]
size[:, 1] = [-1, -0.1, 1, 0.1, np.nan, -1, 1, -2]
size[:, 2] = [1, 0.1, -1, -0.1, np.nan, 1, -2, 2]
orders = vbt.Portfolio.from_orders(close, size, fees=0.01, freq='1 days').orders
orders_grouped = orders.regroup(group_by)
class TestOrders:
def test_mapped_fields(self):
for name in order_dt.names:
np.testing.assert_array_equal(
getattr(orders, name).values,
orders.values[name]
)
def test_close(self):
pd.testing.assert_frame_equal(
orders.close,
close
)
pd.testing.assert_series_equal(
orders['a'].close,
close['a']
)
pd.testing.assert_frame_equal(
orders_grouped['g1'].close,
close[['a', 'b']]
)
assert orders.replace(close=None)['a'].close is None
def test_records_readable(self):
records_readable = orders.records_readable
np.testing.assert_array_equal(
records_readable['Order Id'].values,
np.array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20
])
)
np.testing.assert_array_equal(
records_readable['Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-07T00:00:00.000000000',
'2020-01-08T00:00:00.000000000', '2020-01-01T00:00:00.000000000',
'2020-01-02T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-04T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-01T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-07T00:00:00.000000000',
'2020-01-08T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'b', 'b',
'b', 'c', 'c', 'c', 'c', 'c', 'c', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Size'].values,
np.array([
1.0, 0.1, 1.0, 0.1, 1.0, 1.0, 2.0, 1.0, 0.1, 1.0, 0.1, 1.0, 1.0,
2.0, 1.0, 0.1, 1.0, 0.1, 1.0, 2.0, 2.0
])
)
np.testing.assert_array_equal(
records_readable['Price'].values,
np.array([
1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 8.0, 1.0, 2.0, 3.0, 4.0, 6.0, 7.0,
8.0, 1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 8.0
])
)
np.testing.assert_array_equal(
records_readable['Fees'].values,
np.array([
0.01, 0.002, 0.03, 0.004, 0.06, 0.07, 0.16, 0.01, 0.002, 0.03,
0.004, 0.06, 0.07, 0.16, 0.01, 0.002, 0.03, 0.004, 0.06, 0.14,
0.16
])
)
np.testing.assert_array_equal(
records_readable['Side'].values,
np.array([
'Buy', 'Buy', 'Sell', 'Sell', 'Buy', 'Sell', 'Buy', 'Sell', 'Sell',
'Buy', 'Buy', 'Sell', 'Buy', 'Sell', 'Buy', 'Buy', 'Sell', 'Sell',
'Buy', 'Sell', 'Buy'
])
)
def test_buy_records(self):
assert isinstance(orders.buy, vbt.Orders)
assert orders.buy.wrapper == orders.wrapper
record_arrays_close(
orders['a'].buy.values,
np.array([
(0, 0, 0, 1., 1., 0.01, 0), (1, 0, 1, 0.1, 2., 0.002, 0),
(4, 0, 5, 1., 6., 0.06, 0), (6, 0, 7, 2., 8., 0.16, 0)
], dtype=order_dt)
)
record_arrays_close(
orders['a'].buy.values,
orders.buy['a'].values
)
record_arrays_close(
orders.buy.values,
np.array([
(0, 0, 0, 1., 1., 0.01, 0), (1, 0, 1, 0.1, 2., 0.002, 0),
(4, 0, 5, 1., 6., 0.06, 0), (6, 0, 7, 2., 8., 0.16, 0),
(9, 1, 2, 1., 3., 0.03, 0), (10, 1, 3, 0.1, 4., 0.004, 0),
(12, 1, 6, 1., 7., 0.07, 0), (14, 2, 0, 1., 1., 0.01, 0),
(15, 2, 1, 0.1, 2., 0.002, 0), (18, 2, 5, 1., 6., 0.06, 0),
(20, 2, 7, 2., 8., 0.16, 0)
], dtype=order_dt)
)
def test_sell_records(self):
assert isinstance(orders.sell, vbt.Orders)
assert orders.sell.wrapper == orders.wrapper
record_arrays_close(
orders['a'].sell.values,
np.array([
(2, 0, 2, 1., 3., 0.03, 1), (3, 0, 3, 0.1, 4., 0.004, 1),
(5, 0, 6, 1., 7., 0.07, 1)
], dtype=order_dt)
)
record_arrays_close(
orders['a'].sell.values,
orders.sell['a'].values
)
record_arrays_close(
orders.sell.values,
np.array([
(2, 0, 2, 1., 3., 0.03, 1), (3, 0, 3, 0.1, 4., 0.004, 1),
(5, 0, 6, 1., 7., 0.07, 1), (7, 1, 0, 1., 1., 0.01, 1),
(8, 1, 1, 0.1, 2., 0.002, 1), (11, 1, 5, 1., 6., 0.06, 1),
(13, 1, 7, 2., 8., 0.16, 1), (16, 2, 2, 1., 3., 0.03, 1),
(17, 2, 3, 0.1, 4., 0.004, 1), (19, 2, 6, 2., 7., 0.14, 1)
], dtype=order_dt)
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Total Records', 'Total Buy Orders', 'Total Sell Orders',
'Min Size', 'Max Size', 'Avg Size', 'Avg Buy Size', 'Avg Sell Size',
'Avg Buy Price', 'Avg Sell Price', 'Total Fees', 'Min Fees', 'Max Fees',
'Avg Fees', 'Avg Buy Fees', 'Avg Sell Fees'
], dtype='object')
pd.testing.assert_series_equal(
orders.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), 5.25, 2.75, 2.5, 0.10000000000000002, 2.0,
0.9333333333333335, 0.9166666666666666, 0.9194444444444446, 4.388888888888889,
4.527777777777779, 0.26949999999999996, 0.002, 0.16, 0.051333333333333335,
0.050222222222222224, 0.050222222222222224
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
orders.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), 7, 4, 3, 0.1, 2.0, 0.8857142857142858,
1.025, 0.7000000000000001, 4.25, 4.666666666666667, 0.33599999999999997,
0.002, 0.16, 0.047999999999999994, 0.057999999999999996, 0.03466666666666667
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
orders.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), 14, 7, 7, 0.1, 2.0, 0.8857142857142858,
0.8857142857142856, 0.8857142857142858, 4.428571428571429, 4.428571428571429,
0.672, 0.002, 0.16, 0.048, 0.048, 0.047999999999999994
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
orders['c'].stats(),
orders.stats(column='c')
)
pd.testing.assert_series_equal(
orders['c'].stats(),
orders.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
orders_grouped['g2'].stats(),
orders_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
orders_grouped['g2'].stats(),
orders.stats(column='g2', group_by=group_by)
)
stats_df = orders.stats(agg_func=None)
assert stats_df.shape == (4, 19)
pd.testing.assert_index_equal(stats_df.index, orders.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# trades.py ############# #
exit_trades = vbt.ExitTrades.from_orders(orders)
exit_trades_grouped = vbt.ExitTrades.from_orders(orders_grouped)
class TestExitTrades:
def test_mapped_fields(self):
for name in trade_dt.names:
if name == 'return':
np.testing.assert_array_equal(
getattr(exit_trades, 'returns').values,
exit_trades.values[name]
)
else:
np.testing.assert_array_equal(
getattr(exit_trades, name).values,
exit_trades.values[name]
)
def test_close(self):
pd.testing.assert_frame_equal(
exit_trades.close,
close
)
pd.testing.assert_series_equal(
exit_trades['a'].close,
close['a']
)
pd.testing.assert_frame_equal(
exit_trades_grouped['g1'].close,
close[['a', 'b']]
)
assert exit_trades.replace(close=None)['a'].close is None
def test_records_arr(self):
record_arrays_close(
exit_trades.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2),
(4, 1, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, -1.95, -1.7875, 1, 1, 3),
(5, 1, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, -0.296, -2.71333333, 1, 1, 3),
(6, 1, 1., 5, 6., 0.06, 6, 7., 0.07, -1.13, -0.18833333, 1, 1, 4),
(7, 1, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 1, 0, 5),
(8, 2, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 6),
(9, 2, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 6),
(10, 2, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 7),
(11, 2, 1., 6, 7., 0.07, 7, 8., 0.08, -1.15, -0.16428571, 1, 1, 8),
(12, 2, 1., 7, 8., 0.08, 7, 8., 0., -0.08, -0.01, 0, 0, 9)
], dtype=trade_dt)
)
reversed_col_orders = orders.replace(records_arr=np.concatenate((
orders.values[orders.values['col'] == 2],
orders.values[orders.values['col'] == 1],
orders.values[orders.values['col'] == 0]
)))
record_arrays_close(
vbt.ExitTrades.from_orders(reversed_col_orders).values,
exit_trades.values
)
def test_records_readable(self):
records_readable = exit_trades.records_readable
np.testing.assert_array_equal(
records_readable['Exit Trade Id'].values,
np.array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'c', 'c', 'c', 'c', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Size'].values,
np.array([
1.0, 0.10000000000000009, 1.0, 2.0, 1.0, 0.10000000000000009, 1.0,
2.0, 1.0, 0.10000000000000009, 1.0, 1.0, 1.0
])
)
np.testing.assert_array_equal(
records_readable['Entry Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-01T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-01T00:00:00.000000000', '2020-01-01T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-01T00:00:00.000000000', '2020-01-01T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-07T00:00:00.000000000',
'2020-01-08T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Avg Entry Price'].values,
np.array([
1.0909090909090908, 1.0909090909090908, 6.0, 8.0,
1.0909090909090908, 1.0909090909090908, 6.0, 8.0,
1.0909090909090908, 1.0909090909090908, 6.0, 7.0, 8.0
])
)
np.testing.assert_array_equal(
records_readable['Entry Fees'].values,
np.array([
0.010909090909090908, 0.0010909090909090918, 0.06, 0.16,
0.010909090909090908, 0.0010909090909090918, 0.06, 0.16,
0.010909090909090908, 0.0010909090909090918, 0.06, 0.07, 0.08
])
)
np.testing.assert_array_equal(
records_readable['Exit Timestamp'].values,
np.array([
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-08T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Avg Exit Price'].values,
np.array([
3.0, 4.0, 7.0, 8.0, 3.0, 4.0, 7.0, 8.0, 3.0, 4.0, 7.0, 8.0, 8.0
])
)
np.testing.assert_array_equal(
records_readable['Exit Fees'].values,
np.array([
0.03, 0.004, 0.07, 0.0, 0.03, 0.004, 0.07, 0.0, 0.03, 0.004, 0.07, 0.08, 0.0
])
)
np.testing.assert_array_equal(
records_readable['PnL'].values,
np.array([
1.8681818181818182, 0.2858181818181821, 0.8699999999999999, -0.16,
-1.9500000000000002, -0.29600000000000026, -1.1300000000000001,
-0.16, 1.8681818181818182, 0.2858181818181821, 0.8699999999999999,
-1.1500000000000001, -0.08
])
)
np.testing.assert_array_equal(
records_readable['Return'].values,
np.array([
1.7125000000000001, 2.62, 0.145, -0.01, -1.7875000000000003,
-2.7133333333333334, -0.18833333333333335, -0.01,
1.7125000000000001, 2.62, 0.145, -0.1642857142857143, -0.01
])
)
np.testing.assert_array_equal(
records_readable['Direction'].values,
np.array([
'Long', 'Long', 'Long', 'Long', 'Short', 'Short', 'Short',
'Short', 'Long', 'Long', 'Long', 'Short', 'Long'
])
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Closed', 'Closed', 'Closed', 'Open', 'Closed', 'Closed', 'Closed',
'Open', 'Closed', 'Closed', 'Closed', 'Closed', 'Open'
])
)
np.testing.assert_array_equal(
records_readable['Position Id'].values,
np.array([
0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9
])
)
def test_duration(self):
np.testing.assert_array_almost_equal(
exit_trades['a'].duration.values,
np.array([2, 3, 1, 1])
)
np.testing.assert_array_almost_equal(
exit_trades.duration.values,
np.array([2, 3, 1, 1, 2, 3, 1, 1, 2, 3, 1, 1, 1])
)
def test_winning_records(self):
assert isinstance(exit_trades.winning, vbt.ExitTrades)
assert exit_trades.winning.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].winning.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].winning.values,
exit_trades.winning['a'].values
)
record_arrays_close(
exit_trades.winning.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(8, 2, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 6),
(9, 2, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 6),
(10, 2, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 7)
], dtype=trade_dt)
)
def test_losing_records(self):
assert isinstance(exit_trades.losing, vbt.ExitTrades)
assert exit_trades.losing.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].losing.values,
np.array([
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].losing.values,
exit_trades.losing['a'].values
)
record_arrays_close(
exit_trades.losing.values,
np.array([
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2),
(4, 1, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, -1.95, -1.7875, 1, 1, 3),
(5, 1, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, -0.296, -2.71333333, 1, 1, 3),
(6, 1, 1., 5, 6., 0.06, 6, 7., 0.07, -1.13, -0.18833333, 1, 1, 4),
(7, 1, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 1, 0, 5),
(11, 2, 1., 6, 7., 0.07, 7, 8., 0.08, -1.15, -0.16428571, 1, 1, 8),
(12, 2, 1., 7, 8., 0.08, 7, 8., 0., -0.08, -0.01, 0, 0, 9)
], dtype=trade_dt)
)
def test_win_rate(self):
assert exit_trades['a'].win_rate() == 0.75
pd.testing.assert_series_equal(
exit_trades.win_rate(),
pd.Series(
np.array([0.75, 0., 0.6, np.nan]),
index=close.columns
).rename('win_rate')
)
pd.testing.assert_series_equal(
exit_trades_grouped.win_rate(),
pd.Series(
np.array([0.375, 0.6]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('win_rate')
)
def test_winning_streak(self):
np.testing.assert_array_almost_equal(
exit_trades['a'].winning_streak.values,
np.array([1, 2, 3, 0])
)
np.testing.assert_array_almost_equal(
exit_trades.winning_streak.values,
np.array([1, 2, 3, 0, 0, 0, 0, 0, 1, 2, 3, 0, 0])
)
def test_losing_streak(self):
np.testing.assert_array_almost_equal(
exit_trades['a'].losing_streak.values,
np.array([0, 0, 0, 1])
)
np.testing.assert_array_almost_equal(
exit_trades.losing_streak.values,
np.array([0, 0, 0, 1, 1, 2, 3, 4, 0, 0, 0, 1, 2])
)
def test_profit_factor(self):
assert exit_trades['a'].profit_factor() == 18.9
pd.testing.assert_series_equal(
exit_trades.profit_factor(),
pd.Series(
np.array([18.9, 0., 2.45853659, np.nan]),
index=ts2.columns
).rename('profit_factor')
)
pd.testing.assert_series_equal(
exit_trades_grouped.profit_factor(),
pd.Series(
np.array([0.81818182, 2.45853659]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('profit_factor')
)
def test_expectancy(self):
assert exit_trades['a'].expectancy() == 0.716
pd.testing.assert_series_equal(
exit_trades.expectancy(),
pd.Series(
np.array([0.716, -0.884, 0.3588, np.nan]),
index=ts2.columns
).rename('expectancy')
)
pd.testing.assert_series_equal(
exit_trades_grouped.expectancy(),
pd.Series(
np.array([-0.084, 0.3588]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('expectancy')
)
def test_sqn(self):
assert exit_trades['a'].sqn() == 1.634155521947584
pd.testing.assert_series_equal(
exit_trades.sqn(),
pd.Series(
np.array([1.63415552, -2.13007307, 0.71660403, np.nan]),
index=ts2.columns
).rename('sqn')
)
pd.testing.assert_series_equal(
exit_trades_grouped.sqn(),
pd.Series(
np.array([-0.20404671, 0.71660403]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('sqn')
)
def test_long_records(self):
assert isinstance(exit_trades.long, vbt.ExitTrades)
assert exit_trades.long.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].long.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].long.values,
exit_trades.long['a'].values
)
record_arrays_close(
exit_trades.long.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2),
(8, 2, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 6),
(9, 2, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 6),
(10, 2, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 7),
(12, 2, 1., 7, 8., 0.08, 7, 8., 0., -0.08, -0.01, 0, 0, 9)
], dtype=trade_dt)
)
def test_short_records(self):
assert isinstance(exit_trades.short, vbt.ExitTrades)
assert exit_trades.short.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].short.values,
np.array([], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].short.values,
exit_trades.short['a'].values
)
record_arrays_close(
exit_trades.short.values,
np.array([
(4, 1, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, -1.95, -1.7875, 1, 1, 3),
(5, 1, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, -0.296, -2.71333333, 1, 1, 3),
(6, 1, 1., 5, 6., 0.06, 6, 7., 0.07, -1.13, -0.18833333, 1, 1, 4),
(7, 1, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 1, 0, 5),
(11, 2, 1., 6, 7., 0.07, 7, 8., 0.08, -1.15, -0.16428571, 1, 1, 8)
], dtype=trade_dt)
)
def test_open_records(self):
assert isinstance(exit_trades.open, vbt.ExitTrades)
assert exit_trades.open.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].open.values,
np.array([
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].open.values,
exit_trades.open['a'].values
)
record_arrays_close(
exit_trades.open.values,
np.array([
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2),
(7, 1, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 1, 0, 5),
(12, 2, 1., 7, 8., 0.08, 7, 8., 0., -0.08, -0.01, 0, 0, 9)
], dtype=trade_dt)
)
def test_closed_records(self):
assert isinstance(exit_trades.closed, vbt.ExitTrades)
assert exit_trades.closed.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].closed.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].closed.values,
exit_trades.closed['a'].values
)
record_arrays_close(
exit_trades.closed.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(4, 1, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, -1.95, -1.7875, 1, 1, 3),
(5, 1, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, -0.296, -2.71333333, 1, 1, 3),
(6, 1, 1., 5, 6., 0.06, 6, 7., 0.07, -1.13, -0.18833333, 1, 1, 4),
(8, 2, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 6),
(9, 2, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 6),
(10, 2, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 7),
(11, 2, 1., 6, 7., 0.07, 7, 8., 0.08, -1.15, -0.16428571, 1, 1, 8)
], dtype=trade_dt)
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'First Trade Start', 'Last Trade End',
'Coverage', 'Overlap Coverage', 'Total Records', 'Total Long Trades',
'Total Short Trades', 'Total Closed Trades', 'Total Open Trades',
'Open Trade PnL', 'Win Rate [%]', 'Max Win Streak', 'Max Loss Streak',
'Best Trade [%]', 'Worst Trade [%]', 'Avg Winning Trade [%]',
'Avg Losing Trade [%]', 'Avg Winning Trade Duration',
'Avg Losing Trade Duration', 'Profit Factor', 'Expectancy', 'SQN'
], dtype='object')
pd.testing.assert_series_equal(
exit_trades.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), pd.Timestamp('2020-01-01 00:00:00'),
|
pd.Timestamp('2020-01-08 00:00:00')
|
pandas.Timestamp
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import netCDF4 as nc
from netCDF4 import Dataset
id
import itertools
import datetime
from scipy.stats import ks_2samp
import matplotlib.colors as colors
import matplotlib.cm as cm
import os
Path_save = '/home/nacorreasa/Maestria/Datos_Tesis/Arrays/'
Horizonte = 'Anio' ##-->'Anio' para los datos del 2018 y 2019y 'EXP' para los datos a partir del experimento.
#------------------------------------------------------------------------------
# Motivación codigo -----------------------------------------------------------
'Código para la derteminacion de la frecuencia y a demas de la dimension fractal (con el fin de revelar relaciones'
'entre ambos conceptos). En la entrada anteriore se define el horizonte de tiempo con el cual se quiere trabajar.'
'Además se obtiene el scatter q relaciona las reflectancias con las anomalías de la radiación.'
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
##########################################################################################
## ----------------LECTURA DE LOS DATOS DE LAS ANOMALIAS DE LA RADIACION--------------- ##
##########################################################################################
Anomal_df_975 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/df_AnomalRad_pix975_2018_2019.csv', sep=',')
Anomal_df_348 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/df_AnomalRad_pix348_2018_2019.csv', sep=',')
Anomal_df_350 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/df_AnomalRad_pix350_2018_2019.csv', sep=',')
Anomal_df_975['fecha_hora'] = pd.to_datetime(Anomal_df_975['fecha_hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Anomal_df_975.index = Anomal_df_975['fecha_hora']
Anomal_df_975 = Anomal_df_975.drop(['fecha_hora'], axis=1)
Anomal_df_975 = Anomal_df_975.between_time('06:00', '18:00') ##--> Seleccionar solo los datos de horas del dia
Anomal_df_975_h = Anomal_df_975.groupby(
|
pd.Grouper(freq="H")
|
pandas.Grouper
|
import os
import sys
import warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.neural_network import MLPRegressor
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.metrics import mean_squared_log_error
from sklearn.model_selection import GridSearchCV, cross_val_score
print(os.listdir("data"))
train_data = pd.read_csv('data/train.csv')
test_data = pd.read_csv('data/test.csv')
def get_cat_cols(df):
return [col for col in df.columns if df[col].dtype == 'object']
y = np.log1p(train_data.SalePrice)
cand_train_predictors = train_data.drop(['Id', 'SalePrice'], axis=1)
cand_test_predictors = test_data.drop(['Id'], axis=1)
cat_cols = get_cat_cols(cand_train_predictors)
cand_train_predictors[cat_cols] = cand_train_predictors[cat_cols].fillna('NotAvailable')
cand_test_predictors[cat_cols] = cand_test_predictors[cat_cols].fillna('NotAvailable')
encoders = {}
for col in cat_cols:
encoders[col] = LabelEncoder()
val = cand_train_predictors[col].tolist()
val.extend(cand_test_predictors[col].tolist())
encoders[col].fit(val)
cand_train_predictors[col] = encoders[col].transform(cand_train_predictors[col]) + 1
cand_test_predictors[col] = encoders[col].transform(cand_test_predictors[col]) + 1
corr_matrix = cand_train_predictors.corr().abs()
upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))
cols_to_drop = [column for column in upper.columns if any(upper[column] > 0.8)]
print('correlated features(will be droped):', cols_to_drop)
cand_train_predictors = cand_train_predictors.drop(cols_to_drop, axis=1)
cand_test_predictors = cand_test_predictors.drop(cols_to_drop, axis=1)
print(cand_train_predictors.shape)
print(cand_test_predictors.shape)
train_set, test_set = cand_train_predictors.align(cand_test_predictors, join='left', axis=1)
train_set = np.log1p(train_set)
test_set = np.log1p(test_set)
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LassoCV
from sklearn.model_selection import KFold
params = {}
train_set.fillna('NaN', inplace=True)
score_results = []
kfold = KFold(n_splits=10, random_state=1)
imputer = SimpleImputer()
# scaler = RobustScaler(with_scaling=True, with_centering=True, quantile_range=(20., 80.))
scaler = RobustScaler()
select = SelectFromModel(LassoCV(cv=kfold, random_state=1), threshold='median')
regressor = MLPRegressor(early_stopping=True,
activation='identity',
max_iter=10000)
my_model = make_pipeline(imputer, scaler, select, regressor)
scores = np.sqrt(
-1 * cross_val_score(my_model,
train_set,
y,
scoring='neg_mean_squared_log_error',
verbose=0,
n_jobs=2,
cv=kfold))
mean_score = scores.mean()
print(mean_score)
print(scores.std())
regressor = MLPRegressor(early_stopping=True,
activation='identity',
max_iter=10000)
my_model = make_pipeline(imputer, scaler, select, regressor)
my_model.fit(train_set, y)
print(my_model.score(train_set, y))
# sgd
# 0.010535126813418304
# 0.0014979832026646835
# 0.8753998274053919
# adam
# 0.01042968407581037
# 0.0015518828940809886
# 0.8953275894476386
# rmsle: 0.009936016739047336
# rmse: 30724.85212172329
# mae: 16180.395157871164
# [120197.07675005 158456.00818102 180513.20130843 194200.42489418
# 180857.53842551]
train_pred = my_model.predict(train_set)
print('rmsle: ', np.sqrt(mean_squared_log_error(y, train_pred)))
print('rmse: ', np.sqrt(mean_squared_error(train_data.SalePrice, np.expm1(train_pred))))
print('mae: ', mean_absolute_error(train_data.SalePrice, np.expm1(train_pred)))
test_set.fillna('NaN', inplace=True)
predicted_prices = np.expm1(my_model.predict(test_set))
print(predicted_prices[:5])
my_submission =
|
pd.DataFrame({'Id': test_data.Id, 'SalePrice': predicted_prices})
|
pandas.DataFrame
|
import os
import json
import pandas as pd
statements = []
evidences = []
adjective_frequencies = {'sub': {}, 'obj': {}}
_adjective_frequencies = {'sub': {}, 'obj': {}}
adjective_names = {'sub': {}, 'obj': {}}
_adjective_names = {'sub': {}, 'obj': {}}
adjective_pairs = {}
_adjective_pairs = {}
with open('../../data/causemos_indra_statements/CauseMos_indra_statements.json', 'r') as f:
lines = f.readlines()
for idx, line in enumerate(lines, 1):
statement = json.loads(line)
#print(json.dumps(statement, indent=4, sort_keys=True))
belief = statement["_source"]["belief"]
evidence = statement["_source"]["evidence"]
for evid_idx, evid in enumerate(evidence, 1):
text = evid["evidence_context"]["text"]
_adjectives = []
for key in ["subj_adjectives", "obj_adjectives"]:
_adj = evid["evidence_context"][key]
_adj = _adj if _adj else []
_adjectives.append(_adj)
_polarities = []
for key in ["subj_polarity", "obj_polarity"]:
_pol = evid["evidence_context"][key]
_pol = _pol if _pol else 0
_polarities.append(_pol)
evidences.append({
'Statement #': idx,
'Evidence #': evid_idx,
'_Sub Adj': ', '.join(_adjectives[0]),
'_Obj Adj': ', '.join(_adjectives[1]),
'_Sub Pol': _polarities[0],
'_Obj Pol': _polarities[1],
'# _Sub Adj': len(_adjectives[0]),
'# _Obj Adj': len(_adjectives[1]),
'Text': text
})
for idx2, key in enumerate(['sub', 'obj']):
if len(_adjectives[idx2]) in _adjective_frequencies[key].keys():
_adjective_frequencies[key][len(_adjectives[idx2])] += 1
else:
_adjective_frequencies[key][len(_adjectives[idx2])] = 1
_adjectives[0] = ['None'] if len(_adjectives[0]) == 0 else _adjectives[0]
_adjectives[1] = ['None'] if len(_adjectives[1]) == 0 else _adjectives[1]
for adj in _adjectives[0]:
if adj in _adjective_names['sub'].keys():
_adjective_names['sub'][adj] += 1
else:
_adjective_names['sub'][adj] = 1
for adj in _adjectives[1]:
if adj in _adjective_names['obj'].keys():
_adjective_names['obj'][adj] += 1
else:
_adjective_names['obj'][adj] = 1
for sub in _adjectives[0]:
for obj in _adjectives[1]:
adj_pair = (sub, obj)
if adj_pair in _adjective_pairs.keys():
_adjective_pairs[adj_pair] += 1
else:
_adjective_pairs[adj_pair] = 1
# print(len(evidence))
# print(json.dumps(statement, indent=4, sort_keys=True))
# exit()
#
# continue
text = evidence[0]["evidence_context"]["text"]
_adjectives = []
for key in ["subj_adjectives", "obj_adjectives"]:
_adj = evidence[0]["evidence_context"][key]
_adj = _adj if _adj else []
_adjectives.append(_adj)
_polarities = []
for key in ["subj_polarity", "obj_polarity"]:
_pol = evidence[0]["evidence_context"][key]
_pol = _pol if _pol else 0
_polarities.append(_pol)
concepts = []
for key in ["subj", "obj"]:
con = statement["_source"][key]["concept"]
concepts.append(con)
adjectives = []
for key in ["subj", "obj"]:
adj = statement["_source"][key]["adjectives"]
adjectives.append(adj)
polarities = []
for key in ["subj", "obj"]:
pol = statement["_source"][key]["polarity"]
polarities.append(pol)
statements.append({
'Statement #': idx,
'Belief': belief,
'Subject': concepts[0],
'Object': concepts[1],
'Sub Adj': ', '.join(adjectives[0]),
'Obj Adj': ', '.join(adjectives[1]),
'Sub Pol': polarities[0],
'Obj Pol': polarities[1],
'_Sub Adj': ', '.join(_adjectives[0]),
'_Obj Adj': ', '.join(_adjectives[1]),
'_Sub Pol': _polarities[0],
'_Obj Pol': _polarities[1],
'# Sub Adj': len(adjectives[0]),
'# Obj Adj': len(adjectives[1]),
'# _Sub Adj': len(_adjectives[0]),
'# _Obj Adj': len(_adjectives[1]),
'# _Evidence': len(evidence),
'Text': text
})
if len(adjectives[0]) > 1 or len(adjectives[1]) > 1:
with open(f'../../data/causemos_indra_statements/multi_adjective/{idx}.json', 'w') as out:
out.write(json.dumps(statement, indent=4, sort_keys=True))
for idx2, key in enumerate(['sub', 'obj']):
if len(adjectives[idx2]) in adjective_frequencies[key].keys():
adjective_frequencies[key][len(adjectives[idx2])] += 1
else:
adjective_frequencies[key][len(adjectives[idx2])] = 1
adjectives[0] = ['None'] if len(adjectives[0]) == 0 else adjectives[0]
adjectives[1] = ['None'] if len(adjectives[1]) == 0 else adjectives[1]
for adj in adjectives[0]:
if adj in adjective_names['sub'].keys():
adjective_names['sub'][adj] += 1
else:
adjective_names['sub'][adj] = 1
for adj in adjectives[1]:
if adj in adjective_names['obj'].keys():
adjective_names['obj'][adj] += 1
else:
adjective_names['obj'][adj] = 1
for sub in adjectives[0]:
for obj in adjectives[1]:
adj_pair = (sub, obj)
if adj_pair in adjective_pairs.keys():
adjective_pairs[adj_pair] += 1
else:
adjective_pairs[adj_pair] = 1
# print(belief)
# print(text)
# print(_adjectives)
# print(_polarities)
# print(adjectives)
# print(_polarities)
# print(concepts)
df_statements = pd.DataFrame(statements)
df_evidences = pd.DataFrame(evidences)
df_statements.to_csv('../../data/causemos_indra_statements/statements.csv', index=False,
columns=['Statement #', 'Sub Adj', '_Sub Adj', 'Sub Pol', '_Sub Pol', 'Subject', 'Obj Adj',
'_Obj Adj', 'Obj Pol', '_Obj Pol', '# Sub Adj', '# _Sub Adj', '# Obj Adj', '# _Obj Adj',
'# _Evidence', 'Text'])
df_evidences.to_csv('../../data/causemos_indra_statements/evidence.csv', index=False,
columns=['Statement #', 'Evidence #', '_Sub Adj', '_Sub Pol', '_Obj Adj', '_Obj Pol', '# _Sub Adj',
'# _Obj Adj', 'Text'])
# df_sub_adj_counts = df_statements.groupby(by='# Sub Adj').count()
# df_obj_adj_counts = df_statements.groupby(by='# Obj Adj').count()
#
# _df_sub_adj_counts = df_statements.groupby(by='# _Sub Adj').count()
# _df_obj_adj_counts = df_statements.groupby(by='# _Obj Adj').count()
#
# df_sub_adj_counts.to_csv('../../data/causemos_indra_statements/sub_adj_counts.csv', index=False)
# df_obj_adj_counts.to_csv('../../data/causemos_indra_statements/obj_adj_counts.csv', index=False)
#
# _df_sub_adj_counts.to_csv('../../data/causemos_indra_statements/_sub_adj_counts.csv', index=False)
# _df_obj_adj_counts.to_csv('../../data/causemos_indra_statements/_obj_adj_counts.csv', index=False)
for idx2, key in enumerate(['sub', 'obj']):
multiplicity = []
frequency = []
for mult, freq in adjective_frequencies[key].items():
multiplicity.append(mult)
frequency.append(freq)
df_freq = pd.DataFrame({'# Adjectives': multiplicity, 'frequency': frequency})
df_freq.to_csv(f'../../data/causemos_indra_statements/{key}_adj_counts.csv', index=False)
multiplicity = []
frequency = []
for mult, freq in _adjective_frequencies[key].items():
multiplicity.append(mult)
frequency.append(freq)
df_freq = pd.DataFrame({'# Adjectives': multiplicity, 'frequency': frequency})
df_freq.to_csv(f'../../data/causemos_indra_statements/_{key}_adj_counts.csv', index=False)
adjective = []
frequency = []
for adj, freq in adjective_names[key].items():
adjective.append(adj)
frequency.append(freq)
df_freq = pd.DataFrame({'Adjective': adjective, 'frequency': frequency})
df_freq.to_csv(f'../../data/causemos_indra_statements/{key}_adjectives.csv', index=False)
adjective = []
frequency = []
for adj, freq in _adjective_names[key].items():
adjective.append(adj)
frequency.append(freq)
df_freq = pd.DataFrame({'Adjective': adjective, 'frequency': frequency})
df_freq.to_csv(f'../../data/causemos_indra_statements/_{key}_adjectives.csv', index=False)
sub = []
obj = []
freq = []
for adj_pair, count in adjective_pairs.items():
sub.append(adj_pair[0])
obj.append(adj_pair[1])
freq.append(count)
df_pair =
|
pd.DataFrame({'Subject': sub, 'Object': obj, 'frequency': freq})
|
pandas.DataFrame
|
# Author: <NAME>
# Email: <EMAIL>
# License: MIT License
import numpy as np
import pandas as pd
import hiplot as hip
import plotly.express as px
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import seaborn as sns
color_scale = px.colors.sequential.Jet
def plot_missing_values(df, _st_):
import plotly.express as px
color_scale = [
[0.0, "rgba(0, 255, 0, 0.25)"],
[0.5, "rgba(0, 255, 0, 0.25)"],
[0.5, "rgba(255, 0, 0, 0.75)"],
[1, "rgba(255, 0, 0, 0.75)"],
]
df_miss_o = df.isnull()
fig = px.imshow(
df_miss_o,
color_continuous_scale=color_scale,
)
fig.update(layout_coloraxis_showscale=False)
_st_.plotly_chart(fig)
def plot_duplicate_rows(df, _st_):
color_scale = [
[0.0, "rgba(0, 0, 0, 0.25)"],
[0.5, "rgba(0, 0, 0, 0.25)"],
[0.5, "rgba(255, 255, 255, 0.75)"],
[1, "rgba(255, 255, 255, 0.75)"],
]
color_scale = [
[0.0, "rgba(255, 0, 0, 0.75)"],
[0.5, "rgba(255, 0, 0, 0.75)"],
[0.5, "rgba(0, 255, 0, 0.25)"],
[1, "rgba(0, 255, 0, 0.25)"],
]
dupl = df.duplicated()
n_col = len(df.columns)
df_dupl_o =
|
pd.DataFrame([[i] * n_col for i in dupl])
|
pandas.DataFrame
|
'''
This module contains all functions relating to feature engineering
'''
import pandas as pd
import numpy as np
from .structdata import get_cat_feats, get_num_feats, get_date_cols
def drop_missing(data=None, percent=99):
'''
Drops missing columns with [percent] of missing data.
Parameters:
-------------------------
data: Pandas DataFrame or Series.
percent: float, Default 99
Percentage of missing values to be in a column before it is eligible for removal.
Returns:
Pandas DataFrame or Series.
'''
if data is None:
raise ValueError("data: Expecting a DataFrame/ numpy2d array, got 'None'")
missing_percent = (data.isna().sum() / data.shape[0]) * 100
cols_2_drop = missing_percent[missing_percent.values > percent].index
print("Dropped {}".format(list(cols_2_drop)))
#Drop missing values
data.drop(cols_2_drop, axis=1, inplace=True)
def drop_redundant(data):
'''
Removes features with the same value in all cell. Drops feature If Nan is the second unique class as well.
Parameters:
-----------------------------
data: DataFrame or named series.
Returns:
DataFrame or named series.
'''
if data is None:
raise ValueError("data: Expecting a DataFrame/ numpy2d array, got 'None'")
#get columns
cols_2_drop = _nan_in_class(data)
print("Dropped {}".format(cols_2_drop))
data.drop(cols_2_drop, axis=1, inplace=True)
def _nan_in_class(data):
cols = []
for col in data.columns:
if len(data[col].unique()) == 1:
cols.append(col)
if len(data[col].unique()) == 2:
if np.nan in list(data[col].unique()):
cols.append(col)
return cols
def fill_missing_cats(data=None, cat_features=None, missing_encoding=None):
'''
Fill missing values using the mode of the categorical features.
Parameters:
------------------------
data: DataFrame or name Series.
Data set to perform operation on.
cat_features: List, Series, Array.
categorical features to perform operation on. If not provided, we automatically infer the categoricals from the dataset.
missing_encoding: List, Series, Array.
Values used in place of missing. Popular formats are [-1, -999, -99, '', ' ']
'''
if data is None:
raise ValueError("data: Expecting a DataFrame/ numpy2d array, got 'None'")
if cat_features is None:
cat_features = get_cat_feats(data)
temp_data = data.copy()
#change all possible missing values to NaN
if missing_encoding is None:
missing_encoding = ['', ' ', -99, -999]
temp_data.replace(missing_encoding, np.NaN, inplace=True)
for col in cat_features:
most_freq = temp_data[col].mode()[0]
temp_data[col] = temp_data[col].replace(np.NaN, most_freq)
return temp_data
def fill_missing_num(data=None, features=None, method='mean'):
'''
fill missing values in numerical columns with specified [method] value
Parameters:
------------------------------
data: DataFrame or name Series.
The data set to fill
features: list.
List of columns to fill
method: str, Default 'mean'.
method to use in calculating fill value.
'''
if data is None:
raise ValueError("data: Expecting a DataFrame/ numpy2d array, got 'None'")
if features is None:
#get numerical features with missing values
num_feats = get_num_feats(data)
temp_data = data[num_feats].isna().sum()
features = list(temp_data[num_feats][temp_data[num_feats] > 0].index)
print("Found {} with missing values.".format(features))
for feat in features:
if method is 'mean':
mean = data[feat].mean()
data[feat].fillna(mean, inplace=True)
elif method is 'median':
median = data[feat].median()
data[feat].fillna(median, inplace=True)
elif method is 'mode':
mode = data[feat].mode()[0]
data[feat].fillna(mode, inplace=True)
return "Filled all missing values successfully"
def merge_groupby(data=None, cat_features=None, statistics=None, col_to_merge=None):
'''
Performs a groupby on the specified categorical features and merges
the result to the original dataframe.
Parameter:
-----------------------
data: DataFrame
Data set to perform operation on.
cat_features: list, series, 1D-array
categorical features to groupby.
statistics: list, series, 1D-array, Default ['mean', 'count]
aggregates to perform on grouped data.
col_to_merge: str
The column to merge on the dataset. Must be present in the data set.
Returns:
Dataframe.
'''
if data is None:
raise ValueError("data: Expecting a DataFrame/ numpy2d array, got 'None'")
if statistics is None:
statistics = ['mean', 'count']
if cat_features is None:
cat_features = get_num_feats(data)
if col_to_merge is None:
raise ValueError("col_to_merge: Expecting a string [column to merge on], got 'None'")
df = data.copy()
for cat in cat_features:
temp = df.groupby([cat]).agg(statistics)[col_to_merge]
#rename columns
temp = temp.rename(columns={'mean': cat + '_' + col_to_merge + '_mean', 'count': cat + '_' + col_to_merge + "_count"})
#merge the data sets
df = df.merge(temp, how='left', on=cat)
return df
def get_qcut(data=None, col=None, q=None, duplicates='drop', return_type='float64'):
'''
Cuts a series into bins using the pandas qcut function
and returns the resulting bins as a series for merging.
Parameter:
-------------
data: DataFrame, named Series
Data set to perform operation on.
col: str
column to cut/binnarize.
q: integer or array of quantiles
Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately
array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles.
duplicates: Default 'drop',
If bin edges are not unique drop non-uniques.
return_type: dtype, Default (float64)
Dtype of series to return. One of [float64, str, int64]
Returns:
--------
Series, 1D-Array
'''
temp_df = pd.qcut(data[col], q=q, duplicates=duplicates).to_frame().astype('str')
#retrieve only the qcut categories
df = temp_df[col].str.split(',').apply(lambda x: x[0][1:]).astype(return_type)
return df
def create_balanced_data(data=None, target=None, categories=None, class_sizes=None, replacement=False ):
'''
Creates a balanced data set from an imbalanced one. Used in a classification task.
Parameter:
----------------------------
data: DataFrame, name series.
The imbalanced dataset.
target: str
Name of the target column.
categories: list
Unique categories in the target column. If not set, we use infer the unique categories in the column.
class_sizes: list
Size of each specified class. Must be in order with categoriess parameter.
replacement: bool, Default True.
samples with or without replacement.
'''
if data is None:
raise ValueError("data: Expecting a DataFrame/ numpy2d array, got 'None'")
if target is None:
raise ValueError("target: Expecting a String got 'None'")
if categories is None:
categories = list(data[target].unique())
if class_sizes is None:
#set size for each class to same value
temp_val = int(data.shape[0] / len(data[target].unique()))
class_sizes = [temp_val for _ in list(data[target].unique())]
temp_data = data.copy()
data_category = []
data_class_indx = []
#get data corrresponding to each of the categories
for cat in categories:
data_category.append(temp_data[temp_data[target] == cat])
#sample and get the index corresponding to each category
for class_size, cat in zip(class_sizes, data_category):
data_class_indx.append(cat.sample(class_size, replace=True).index)
#concat data together
new_data = pd.concat([temp_data.loc[indx] for indx in data_class_indx], ignore_index=True).sample(sum(class_sizes)).reset_index(drop=True)
if not replacement:
for indx in data_class_indx:
temp_data.drop(indx, inplace=True)
return new_data
def to_date(data):
'''
Automatically convert all date time columns to pandas Datetime format
'''
date_cols = get_date_cols(data)
for col in date_cols:
data[col] = pd.to_datetime(data[col])
return data
def haversine_distance(lat1, long1, lat2, long2):
'''
Calculates the Haversine distance between two location with latitude and longitude.
The haversine distance is the great-circle distance between two points on a sphere given their longitudes and latitudes.
Parameter:
---------------------------
lat1: scalar,float
Start point latitude of the location.
lat2: scalar,float
End point latitude of the location.
long1: scalar,float
Start point longitude of the location.
long2: scalar,float
End point longitude of the location.
Returns:
Series: The Harversine distance between (lat1, lat2), (long1, long2)
'''
lat1, long1, lat2, long2 = map(np.radians, (lat1, long1, lat2, long2))
AVG_EARTH_RADIUS = 6371 # in km
lat = lat2 - lat1
lng = long2 - long1
distance = np.sin(lat * 0.5) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(lng * 0.5) ** 2
harvesine_distance = 2 * AVG_EARTH_RADIUS * np.arcsin(np.sqrt(distance))
harvesine_distance_df = pd.Series(harvesine_distance)
return harvesine_distance_df
def manhattan_distance(lat1, long1, lat2, long2):
'''
Calculates the Manhattan distance between two points.
It is the sum of horizontal and vertical distance between any two points given their latitudes and longitudes.
Parameter:
-------------------
lat1: scalar,float
Start point latitude of the location.
lat2: scalar,float
End point latitude of the location.
long1: scalar,float
Start point longitude of the location.
long2: scalar,float
End point longitude of the location.
Returns: Series
The Manhattan distance between (lat1, lat2) and (long1, long2)
'''
a = np.abs(lat2 -lat1)
b = np.abs(long1 - long2)
manhattan_distance = a + b
manhattan_distance_df = pd.Series(manhattan_distance)
return manhattan_distance_df
def bearing(lat1, long1, lat2, long2):
'''
Calculates the Bearing between two points.
The bearing is the compass direction to travel from a starting point, and must be within the range 0 to 360.
Parameter:
-------------------------
lat1: scalar,float
Start point latitude of the location.
lat2: scalar,float
End point latitude of the location.
long1: scalar,float
Start point longitude of the location.
long2: scalar,float
End point longitude of the location.
Returns: Series
The Bearing between (lat1, lat2) and (long1, long2)
'''
AVG_EARTH_RADIUS = 6371
long_delta = np.radians(long2 - long1)
lat1, long1, lat2, long2 = map(np.radians, (lat1, long1, lat2, long2))
y = np.sin(long_delta) * np.cos(lat2)
x = np.cos(lat1) * np.sin(lat2) - np.sin(lat1) * np.cos(lat2) * np.cos(long_delta)
bearing = np.degrees(np.arctan2(y, x))
bearing_df =
|
pd.Series(bearing)
|
pandas.Series
|
from datetime import timedelta
from functools import partial
from operator import attrgetter
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import OutOfBoundsDatetime, conversion
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime, offsets,
to_datetime)
from pandas.core.arrays import DatetimeArray, period_array
import pandas.util.testing as tm
class TestDatetimeIndex(object):
@pytest.mark.parametrize('dt_cls', [DatetimeIndex,
DatetimeArray._from_sequence])
def test_freq_validation_with_nat(self, dt_cls):
# GH#11587 make sure we get a useful error message when generate_range
# raises
msg = ("Inferred frequency None from passed values does not conform "
"to passed frequency D")
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01')], freq='D')
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01').value],
freq='D')
def test_categorical_preserves_tz(self):
# GH#18664 retain tz when going DTI-->Categorical-->DTI
# TODO: parametrize over DatetimeIndex/DatetimeArray
# once CategoricalIndex(DTA) works
dti = pd.DatetimeIndex(
[pd.NaT, '2015-01-01', '1999-04-06 15:14:13', '2015-01-01'],
tz='US/Eastern')
ci = pd.CategoricalIndex(dti)
carr = pd.Categorical(dti)
cser = pd.Series(ci)
for obj in [ci, carr, cser]:
result = pd.DatetimeIndex(obj)
tm.assert_index_equal(result, dti)
def test_dti_with_period_data_raises(self):
# GH#23675
data = pd.PeriodIndex(['2016Q1', '2016Q2'], freq='Q')
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(period_array(data))
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(period_array(data))
def test_dti_with_timedelta64_data_deprecation(self):
# GH#23675
data = np.array([0], dtype='m8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
def test_construction_caching(self):
df = pd.DataFrame({'dt': pd.date_range('20130101', periods=3),
'dttz': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'dt_with_null': [pd.Timestamp('20130101'), pd.NaT,
pd.Timestamp('20130103')],
'dtns': pd.date_range('20130101', periods=3,
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
result = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(i, result)
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
if str(tz) in ('UTC', 'tzutc()'):
warn = None
else:
warn = FutureWarning
with tm.assert_produces_warning(warn, check_stacklevel=False):
result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs)
expected = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(result, expected)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
tm.assert_index_equal(i2, expected)
# incompat tz/dtype
pytest.raises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_construction_index_with_mixed_timezones(self):
# gh-11488: no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# Different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_index_with_mixed_timezones_with_NaT(self):
# see gh-11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# Same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT,
|
Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')
|
pandas.Timestamp
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division
import os
import copy
import unittest
import csv
import json
import numpy as np
import pandas as pd
from multiprocessing import set_start_method
from sklearn.exceptions import NotFittedError
from pymatgen import Structure, Lattice, Molecule
from pymatgen.util.testing import PymatgenTest
from matminer.featurizers.composition import ElementProperty
from matminer.featurizers.site import SiteElementalProperty
from matminer.featurizers.structure import DensityFeatures, \
RadialDistributionFunction, PartialRadialDistributionFunction, \
ElectronicRadialDistributionFunction, \
MinimumRelativeDistances, SiteStatsFingerprint, CoulombMatrix, \
SineCoulombMatrix, OrbitalFieldMatrix, GlobalSymmetryFeatures, \
EwaldEnergy, BondFractions, BagofBonds, StructuralHeterogeneity, \
MaximumPackingEfficiency, ChemicalOrdering, StructureComposition, \
Dimensionality, XRDPowderPattern, CGCNNFeaturizer, JarvisCFID, \
GlobalInstabilityIndex, \
StructuralComplexity
# For the CGCNNFeaturizer
try:
import torch
import cgcnn
except ImportError:
torch, cgcnn = None, None
test_dir = os.path.join(os.path.dirname(__file__))
class StructureFeaturesTest(PymatgenTest):
def setUp(self):
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264],
[0, 0, 2.528]]), ["C0+", "C0+"], [[2.554, 1.806, 4.423],
[0.365, 0.258, 0.632]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.diamond_no_oxi = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264],
[0, 0, 2.528]]), ["C", "C"], [[2.554, 1.806, 4.423],
[0.365, 0.258, 0.632]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.nacl = Structure(
Lattice([[3.485, 0, 2.012], [1.162, 3.286, 2.012],
[0, 0, 4.025]]), ["Na1+", "Cl1-"], [[0, 0, 0],
[2.324, 1.643, 4.025]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.cscl = Structure(
Lattice([[4.209, 0, 0], [0, 4.209, 0], [0, 0, 4.209]]),
["Cl1-", "Cs1+"], [[2.105, 2.1045, 2.1045], [0, 0, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.ni3al = Structure(
Lattice([[3.52, 0, 0], [0, 3.52, 0], [0, 0, 3.52]]),
["Al", ] + ["Ni"] * 3,
[[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None)
self.sc = Structure(Lattice([[3.52, 0, 0], [0, 3.52, 0], [0, 0, 3.52]]),
["Al"], [[0, 0, 0]], validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False)
self.bond_angles = range(5, 180, 5)
def test_density_features(self):
df = DensityFeatures()
f = df.featurize(self.diamond)
self.assertAlmostEqual(f[0], 3.49, 2)
self.assertAlmostEqual(f[1], 5.71, 2)
self.assertAlmostEqual(f[2], 0.25, 2)
f = df.featurize(self.nacl)
self.assertAlmostEqual(f[0], 2.105, 2)
self.assertAlmostEqual(f[1], 23.046, 2)
self.assertAlmostEqual(f[2], 0.620, 2)
nacl_disordered = copy.deepcopy(self.nacl)
nacl_disordered.replace_species({"Cl1-": "Cl0.99H0.01"})
self.assertFalse(df.precheck(nacl_disordered))
structures = [self.diamond, self.nacl, nacl_disordered]
df2 = pd.DataFrame({"structure": structures})
self.assertAlmostEqual(df.precheck_dataframe(df2, "structure"), 2 / 3)
def test_global_symmetry(self):
gsf = GlobalSymmetryFeatures()
self.assertEqual(gsf.featurize(self.diamond), [227, "cubic", 1, True])
def test_dimensionality(self):
cscl = PymatgenTest.get_structure("CsCl")
df = Dimensionality(bonds={("Cs", "Cl"): 3.5})
self.assertEqual(df.featurize(cscl)[0], 1)
df = Dimensionality(bonds={("Cs", "Cl"): 3.7})
self.assertEqual(df.featurize(cscl)[0], 3)
def test_rdf_and_peaks(self):
## Test diamond
rdforig = RadialDistributionFunction().featurize(
self.diamond)
rdf = rdforig[0]
# Make sure it the last bin is cutoff-bin_max
self.assertAlmostEqual(max(rdf['distances']), 19.9)
# Verify bin sizes
self.assertEqual(len(rdf['distribution']), 200)
# Make sure it gets all of the peaks
self.assertEqual(np.count_nonzero(rdf['distribution']), 116)
# Check the values for a few individual peaks
self.assertAlmostEqual(
rdf['distribution'][int(round(1.5 / 0.1))], 15.12755155)
self.assertAlmostEqual(
rdf['distribution'][int(round(2.9 / 0.1))], 12.53193948)
self.assertAlmostEqual(
rdf['distribution'][int(round(19.9 / 0.1))], 0.822126129)
# Repeat test with NaCl (omitting comments). Altering cutoff distance
rdforig = RadialDistributionFunction(cutoff=10).featurize(self.nacl)
rdf = rdforig[0]
self.assertAlmostEqual(max(rdf['distances']), 9.9)
self.assertEqual(len(rdf['distribution']), 100)
self.assertEqual(np.count_nonzero(rdf['distribution']), 11)
self.assertAlmostEqual(
rdf['distribution'][int(round(2.8 / 0.1))], 27.09214168)
self.assertAlmostEqual(
rdf['distribution'][int(round(4.0 / 0.1))], 26.83338723)
self.assertAlmostEqual(
rdf['distribution'][int(round(9.8 / 0.1))], 3.024406467)
# Repeat test with CsCl. Altering cutoff distance and bin_size
rdforig = RadialDistributionFunction(
cutoff=8, bin_size=0.5).featurize(self.cscl)
rdf = rdforig[0]
self.assertAlmostEqual(max(rdf['distances']), 7.5)
self.assertEqual(len(rdf['distribution']), 16)
self.assertEqual(np.count_nonzero(rdf['distribution']), 5)
self.assertAlmostEqual(
rdf['distribution'][int(round(3.5 / 0.5))], 6.741265585)
self.assertAlmostEqual(
rdf['distribution'][int(round(4.0 / 0.5))], 3.937582548)
self.assertAlmostEqual(
rdf['distribution'][int(round(7.0 / 0.5))], 1.805505363)
def test_prdf(self):
# Test a few peaks in diamond
# These expected numbers were derived by performing
# the calculation in another code
distances, prdf = PartialRadialDistributionFunction().compute_prdf(self.diamond)
self.assertEqual(len(prdf.values()), 1)
self.assertAlmostEqual(prdf[('C', 'C')][int(round(1.4 / 0.1))], 0)
self.assertAlmostEqual(prdf[('C', 'C')][int(round(1.5 / 0.1))], 1.32445167622)
self.assertAlmostEqual(max(distances), 19.9)
self.assertAlmostEqual(prdf[('C', 'C')][int(round(19.9 / 0.1))], 0.07197902)
# Test a few peaks in CsCl, make sure it gets all types correctly
distances, prdf = PartialRadialDistributionFunction(cutoff=10).compute_prdf(self.cscl)
self.assertEqual(len(prdf.values()), 4)
self.assertAlmostEqual(max(distances), 9.9)
self.assertAlmostEqual(prdf[('Cs', 'Cl')][int(round(3.6 / 0.1))], 0.477823197)
self.assertAlmostEqual(prdf[('Cl', 'Cs')][int(round(3.6 / 0.1))], 0.477823197)
self.assertAlmostEqual(prdf[('Cs', 'Cs')][int(round(3.6 / 0.1))], 0)
# Do Ni3Al, make sure it captures the antisymmetry of Ni/Al sites
distances, prdf = PartialRadialDistributionFunction(cutoff=10, bin_size=0.5)\
.compute_prdf(self.ni3al)
self.assertEqual(len(prdf.values()), 4)
self.assertAlmostEqual(prdf[('Ni', 'Al')][int(round(2 / 0.5))], 0.125236677)
self.assertAlmostEqual(prdf[('Al', 'Ni')][int(round(2 / 0.5))], 0.37571003)
self.assertAlmostEqual(prdf[('Al', 'Al')][int(round(2 / 0.5))], 0)
# Check the fit operation
featurizer = PartialRadialDistributionFunction()
featurizer.fit([self.diamond, self.cscl, self.ni3al])
self.assertEqual({'Cs', 'Cl', 'C', 'Ni', 'Al'}, set(featurizer.elements_))
featurizer.exclude_elems = ['Cs', 'Al']
featurizer.fit([self.diamond, self.cscl, self.ni3al])
self.assertEqual({'Cl', 'C', 'Ni'}, set(featurizer.elements_))
featurizer.include_elems = ['H']
featurizer.fit([self.diamond, self.cscl, self.ni3al])
self.assertEqual({'H', 'Cl', 'C', 'Ni'}, set(featurizer.elements_))
# Check the feature labels
featurizer.exclude_elems = ()
featurizer.include_elems = ()
featurizer.elements_ = ['Al', 'Ni']
labels = featurizer.feature_labels()
n_bins = len(featurizer._make_bins()) - 1
self.assertEqual(3 * n_bins, len(labels))
self.assertIn('Al-Ni PRDF r=0.00-0.10', labels)
# Check the featurize method
featurizer.elements_ = ['C']
features = featurizer.featurize(self.diamond)
prdf = featurizer.compute_prdf(self.diamond)[1]
self.assertArrayAlmostEqual(features, prdf[('C', 'C')])
# Check the featurize_dataframe
df = pd.DataFrame.from_dict({"structure": [self.diamond, self.cscl]})
featurizer.fit(df["structure"])
df = featurizer.featurize_dataframe(df, col_id="structure")
self.assertEqual(df["Cs-Cl PRDF r=0.00-0.10"][0], 0.0)
self.assertAlmostEqual(df["Cl-Cl PRDF r=19.70-19.80"][1], 0.049, 3)
self.assertEqual(df["Cl-Cl PRDF r=19.90-20.00"][0], 0.0)
# Make sure labels and features are in the same order
featurizer.elements_ = ['Al', 'Ni']
features = featurizer.featurize(self.ni3al)
labels = featurizer.feature_labels()
prdf = featurizer.compute_prdf(self.ni3al)[1]
self.assertEqual((n_bins * 3,), features.shape)
self.assertTrue(labels[0].startswith('Al-Al'))
self.assertTrue(labels[n_bins].startswith('Al-Ni'))
self.assertTrue(labels[2 * n_bins].startswith('Ni-Ni'))
self.assertArrayAlmostEqual(features, np.hstack(
[prdf[('Al', 'Al')], prdf[('Al', 'Ni')], prdf[('Ni', 'Ni')]]))
def test_redf(self):
d = ElectronicRadialDistributionFunction().featurize(
self.diamond)[0]
self.assertAlmostEqual(int(1000 * d["distances"][0]), 25)
self.assertAlmostEqual(int(1000 * d["distribution"][0]), 0)
self.assertAlmostEqual(int(1000 * d["distances"][len(
d["distances"]) - 1]), 6175)
self.assertAlmostEqual(int(1000 * d["distribution"][len(
d["distances"]) - 1]), 0)
d = ElectronicRadialDistributionFunction().featurize(
self.nacl)[0]
self.assertAlmostEqual(int(1000 * d["distances"][0]), 25)
self.assertAlmostEqual(int(1000 * d["distribution"][0]), 0)
self.assertAlmostEqual(int(1000 * d["distances"][56]), 2825)
self.assertAlmostEqual(int(1000 * d["distribution"][56]), -2108)
self.assertAlmostEqual(int(1000 * d["distances"][len(
d["distances"]) - 1]), 9875)
d = ElectronicRadialDistributionFunction().featurize(
self.cscl)[0]
self.assertAlmostEqual(int(1000 * d["distances"][0]), 25)
self.assertAlmostEqual(int(1000 * d["distribution"][0]), 0)
self.assertAlmostEqual(int(1000 * d["distances"][72]), 3625)
self.assertAlmostEqual(int(1000 * d["distribution"][72]), -2194)
self.assertAlmostEqual(int(1000 * d["distances"][len(
d["distances"]) - 1]), 7275)
def test_coulomb_matrix(self):
# flat
cm = CoulombMatrix(flatten=True)
df = pd.DataFrame({"s": [self.diamond, self.nacl]})
with self.assertRaises(NotFittedError):
df = cm.featurize_dataframe(df, "s")
df = cm.fit_featurize_dataframe(df, "s")
labels = cm.feature_labels()
self.assertListEqual(labels,
["coulomb matrix eig 0", "coulomb matrix eig 1"])
self.assertArrayAlmostEqual(df[labels].iloc[0],
[49.169453, 24.546758],
decimal=5)
self.assertArrayAlmostEqual(df[labels].iloc[1],
[153.774731, 452.894322],
decimal=5)
# matrix
species = ["C", "C", "H", "H"]
coords = [[0, 0, 0], [0, 0, 1.203], [0, 0, -1.06], [0, 0, 2.263]]
acetylene = Molecule(species, coords)
morig = CoulombMatrix(flatten=False).featurize(acetylene)
mtarget = [[36.858, 15.835391290, 2.995098235, 1.402827813], \
[15.835391290, 36.858, 1.4028278132103624, 2.9950982], \
[2.9368896127, 1.402827813, 0.5, 0.159279959], \
[1.4028278132, 2.995098235, 0.159279959, 0.5]]
self.assertAlmostEqual(
int(np.linalg.norm(morig - np.array(mtarget))), 0)
m = CoulombMatrix(diag_elems=False,
flatten=False).featurize(acetylene)[0]
self.assertAlmostEqual(m[0][0], 0.0)
self.assertAlmostEqual(m[1][1], 0.0)
self.assertAlmostEqual(m[2][2], 0.0)
self.assertAlmostEqual(m[3][3], 0.0)
def test_sine_coulomb_matrix(self):
# flat
scm = SineCoulombMatrix(flatten=True)
df = pd.DataFrame({"s": [self.sc, self.ni3al]})
with self.assertRaises(NotFittedError):
df = scm.featurize_dataframe(df, "s")
df = scm.fit_featurize_dataframe(df, "s")
labels = scm.feature_labels()
self.assertEqual(labels[0], "sine coulomb matrix eig 0")
self.assertArrayAlmostEqual(
df[labels].iloc[0],
[235.740418, 0.0, 0.0, 0.0],
decimal=5)
self.assertArrayAlmostEqual(
df[labels].iloc[1],
[232.578562, 1656.288171, 1403.106576, 1403.106576],
decimal=5)
# matrix
scm = SineCoulombMatrix(flatten=False)
sin_mat = scm.featurize(self.diamond)
mtarget = [[36.8581, 6.147068], [6.147068, 36.8581]]
self.assertAlmostEqual(
np.linalg.norm(sin_mat - np.array(mtarget)), 0.0, places=4)
scm = SineCoulombMatrix(diag_elems=False, flatten=False)
sin_mat = scm.featurize(self.diamond)[0]
self.assertEqual(sin_mat[0][0], 0)
self.assertEqual(sin_mat[1][1], 0)
def test_orbital_field_matrix(self):
ofm_maker = OrbitalFieldMatrix(flatten=False)
ofm = ofm_maker.featurize(self.diamond)[0]
mtarget = np.zeros((32, 32))
mtarget[1][1] = 1.4789015 # 1.3675444
mtarget[1][3] = 1.4789015 # 1.3675444
mtarget[3][1] = 1.4789015 # 1.3675444
mtarget[3][3] = 1.4789015 # 1.3675444 if for a coord# of exactly 4
for i in range(32):
for j in range(32):
if not i in [1, 3] and not j in [1, 3]:
self.assertEqual(ofm[i, j], 0.0)
mtarget = np.matrix(mtarget)
self.assertAlmostEqual(
np.linalg.norm(ofm - mtarget), 0.0, places=4)
ofm_maker = OrbitalFieldMatrix(True, flatten=False)
ofm = ofm_maker.featurize(self.diamond)[0]
mtarget = np.zeros((39, 39))
mtarget[1][1] = 1.4789015
mtarget[1][3] = 1.4789015
mtarget[3][1] = 1.4789015
mtarget[3][3] = 1.4789015
mtarget[1][33] = 1.4789015
mtarget[3][33] = 1.4789015
mtarget[33][1] = 1.4789015
mtarget[33][3] = 1.4789015
mtarget[33][33] = 1.4789015
mtarget = np.matrix(mtarget)
self.assertAlmostEqual(
np.linalg.norm(ofm - mtarget), 0.0, places=4)
ofm_flat = OrbitalFieldMatrix(period_tag=False, flatten=True)
self.assertEqual(len(ofm_flat.feature_labels()), 1024)
ofm_flat = OrbitalFieldMatrix(period_tag=True, flatten=True)
self.assertEqual(len(ofm_flat.feature_labels()), 1521)
ofm_vector = ofm_flat.featurize(self.diamond)
for ix in [40, 42, 72, 118, 120, 150, 1288, 1320]:
self.assertAlmostEqual(ofm_vector[ix], 1.4789015345821415)
def test_min_relative_distances(self):
self.assertAlmostEqual(MinimumRelativeDistances().featurize(
self.diamond_no_oxi)[0][0], 1.1052576)
self.assertAlmostEqual(MinimumRelativeDistances().featurize(
self.nacl)[0][0], 0.8891443)
self.assertAlmostEqual(MinimumRelativeDistances().featurize(
self.cscl)[0][0], 0.9877540)
def test_sitestatsfingerprint(self):
# Test matrix.
op_struct_fp = SiteStatsFingerprint.from_preset("OPSiteFingerprint",
stats=None)
opvals = op_struct_fp.featurize(self.diamond)
oplabels = op_struct_fp.feature_labels()
self.assertAlmostEqual(opvals[10][0], 0.9995, places=7)
self.assertAlmostEqual(opvals[10][1], 0.9995, places=7)
opvals = op_struct_fp.featurize(self.nacl)
self.assertAlmostEqual(opvals[18][0], 0.9995, places=7)
self.assertAlmostEqual(opvals[18][1], 0.9995, places=7)
opvals = op_struct_fp.featurize(self.cscl)
self.assertAlmostEqual(opvals[22][0], 0.9995, places=7)
self.assertAlmostEqual(opvals[22][1], 0.9995, places=7)
# Test stats.
op_struct_fp = SiteStatsFingerprint.from_preset("OPSiteFingerprint")
opvals = op_struct_fp.featurize(self.diamond)
print(opvals, '**')
self.assertAlmostEqual(opvals[0], 0.0005, places=7)
self.assertAlmostEqual(opvals[1], 0, places=7)
self.assertAlmostEqual(opvals[2], 0.0005, places=7)
self.assertAlmostEqual(opvals[3], 0.0, places=7)
self.assertAlmostEqual(opvals[4], 0.0005, places=7)
self.assertAlmostEqual(opvals[18], 0.0805, places=7)
self.assertAlmostEqual(opvals[20], 0.9995, places=7)
self.assertAlmostEqual(opvals[21], 0, places=7)
self.assertAlmostEqual(opvals[22], 0.0075, places=7)
self.assertAlmostEqual(opvals[24], 0.2355, places=7)
self.assertAlmostEqual(opvals[-1], 0.0, places=7)
# Test coordination number
cn_fp = SiteStatsFingerprint.from_preset("JmolNN", stats=("mean",))
cn_vals = cn_fp.featurize(self.diamond)
self.assertEqual(cn_vals[0], 4.0)
# Test the covariance
prop_fp = SiteStatsFingerprint(SiteElementalProperty(properties=["Number", "AtomicWeight"]),
stats=["mean"], covariance=True)
# Test the feature labels
labels = prop_fp.feature_labels()
self.assertEqual(3, len(labels))
# Test a structure with all the same type (cov should be zero)
features = prop_fp.featurize(self.diamond)
self.assertArrayAlmostEqual(features, [6, 12.0107, 0])
# Test a structure with only one atom (cov should be zero too)
features = prop_fp.featurize(self.sc)
self.assertArrayAlmostEqual([13, 26.9815386, 0], features)
# Test a structure with nonzero covariance
features = prop_fp.featurize(self.nacl)
self.assertArrayAlmostEqual([14, 29.22138464, 37.38969216], features)
def test_ewald(self):
# Add oxidation states to all of the structures
for s in [self.nacl, self.cscl, self.diamond]:
s.add_oxidation_state_by_guess()
# Test basic
ewald = EwaldEnergy(accuracy=2)
self.assertArrayAlmostEqual(ewald.featurize(self.diamond), [0])
self.assertAlmostEqual(ewald.featurize(self.nacl)[0], -8.84173626, 2)
self.assertLess(ewald.featurize(self.nacl),
ewald.featurize(self.cscl)) # Atoms are closer in NaCl
# Perform Ewald summation by "hand",
# Using the result from GULP
self.assertArrayAlmostEqual([-8.84173626], ewald.featurize(self.nacl), 2)
def test_bondfractions(self):
# Test individual structures with featurize
bf_md = BondFractions.from_preset("MinimumDistanceNN")
bf_md.no_oxi = True
bf_md.fit([self.diamond_no_oxi])
self.assertArrayEqual(bf_md.featurize(self.diamond), [1.0])
self.assertArrayEqual(bf_md.featurize(self.diamond_no_oxi), [1.0])
bf_voronoi = BondFractions.from_preset("VoronoiNN")
bf_voronoi.bbv = float("nan")
bf_voronoi.fit([self.nacl])
bond_fracs = bf_voronoi.featurize(self.nacl)
bond_names = bf_voronoi.feature_labels()
ref = {'Na+ - Na+ bond frac.': 0.25, 'Cl- - Na+ bond frac.': 0.5,
'Cl- - Cl- bond frac.': 0.25}
self.assertDictEqual(dict(zip(bond_names, bond_fracs)), ref)
# Test to make sure dataframe behavior is as intended
s_list = [self.diamond_no_oxi, self.ni3al]
df = pd.DataFrame.from_dict({'s': s_list})
bf_voronoi.fit(df['s'])
df = bf_voronoi.featurize_dataframe(df, 's')
# Ensure all data is properly labelled and organized
self.assertArrayEqual(df['C - C bond frac.'].as_matrix(), [1.0, np.nan])
self.assertArrayEqual(df['Al - Ni bond frac.'].as_matrix(), [np.nan, 0.5])
self.assertArrayEqual(df['Al - Al bond frac.'].as_matrix(), [np.nan, 0.0])
self.assertArrayEqual(df['Ni - Ni bond frac.'].as_matrix(), [np.nan, 0.5])
# Test to make sure bad_bond_values (bbv) are still changed correctly
# and check inplace behavior of featurize dataframe.
bf_voronoi.bbv = 0.0
df =
|
pd.DataFrame.from_dict({'s': s_list})
|
pandas.DataFrame.from_dict
|
"""Unit tests for the reading functionality in dframeio.parquet"""
# pylint: disable=redefined-outer-name
from pathlib import Path
import pandas as pd
import pandera as pa
import pandera.typing
import pytest
from pandas.testing import assert_frame_equal
import dframeio
class SampleDataSchema(pa.SchemaModel):
"""pandera schema of the parquet test dataset"""
registration_dttm: pa.typing.Series[pa.typing.DateTime]
id: pa.typing.Series[pd.Int64Dtype] = pa.Field(nullable=True, coerce=True)
first_name: pa.typing.Series[pa.typing.String]
last_name: pa.typing.Series[pa.typing.String]
email: pa.typing.Series[pa.typing.String]
gender: pa.typing.Series[pa.typing.String] = pa.Field(coerce=True)
ip_address: pa.typing.Series[pa.typing.String]
cc: pa.typing.Series[pa.typing.String]
country: pa.typing.Series[pa.typing.String]
birthdate: pa.typing.Series[pa.typing.String]
salary: pa.typing.Series[pa.typing.Float64] = pa.Field(nullable=True)
title: pa.typing.Series[pa.typing.String]
comments: pa.typing.Series[pa.typing.String] = pa.Field(nullable=True)
@staticmethod
def length():
"""Known length of the data"""
return 5000
@staticmethod
def n_salary_over_150000():
"""Number of rows with salary > 150000"""
return 2384
@pytest.fixture(params=["multifile", "singlefile.parquet", "multifolder"])
def sample_data_path(request):
"""Path of a parquet dataset for testing"""
return Path(__file__).parent / "data" / "parquet" / request.param
def read_sample_dataframe():
"""Read the sample dataframe to pandas and return a cached copy"""
if not hasattr(read_sample_dataframe, "df"):
parquet_file = Path(__file__).parent / "data" / "parquet" / "singlefile.parquet"
backend = dframeio.ParquetBackend(str(parquet_file.parent))
read_sample_dataframe.df = backend.read_to_pandas(parquet_file.name)
return read_sample_dataframe.df.copy()
@pytest.fixture(scope="function")
def sample_dataframe():
"""Provide the sample dataframe"""
return read_sample_dataframe()
@pytest.fixture(scope="function")
def sample_dataframe_dict():
"""Provide the sample dataframe"""
parquet_file = Path(__file__).parent / "data" / "parquet" / "singlefile.parquet"
backend = dframeio.ParquetBackend(str(parquet_file.parent))
return backend.read_to_dict(parquet_file.name)
@pytest.mark.parametrize(
"kwargs, exception",
[
({"base_path": "/some/dir", "partitions": -1}, TypeError),
({"base_path": "/some/dir", "partitions": 2.2}, TypeError),
({"base_path": "/some/dir", "partitions": "abc"}, TypeError),
({"base_path": "/some/dir", "partitions": b"abc"}, TypeError),
({"base_path": "/some/dir", "rows_per_file": b"abc"}, TypeError),
({"base_path": "/some/dir", "rows_per_file": 1.1}, TypeError),
({"base_path": "/some/dir", "rows_per_file": -5}, ValueError),
],
)
def test_init_argchecks(kwargs, exception):
"""Challenge the argument validation of the constructor"""
with pytest.raises(exception):
dframeio.ParquetBackend(**kwargs)
def test_read_to_pandas(sample_data_path):
"""Read a sample dataset into a pandas dataframe"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name)
SampleDataSchema.to_schema().validate(df)
assert len(df) == SampleDataSchema.length()
def test_read_to_pandas_some_columns(sample_data_path):
"""Read a sample dataset into a pandas dataframe, selecting some columns"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name, columns=["id", "first_name"])
SampleDataSchema.to_schema().select_columns(["id", "first_name"]).validate(df)
assert len(df) == SampleDataSchema.length()
def test_read_to_pandas_some_rows(sample_data_path):
"""Read a sample dataset into a pandas dataframe, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name, row_filter="salary > 150000")
SampleDataSchema.to_schema().validate(df)
assert len(df) == SampleDataSchema.n_salary_over_150000()
def test_read_to_pandas_sample(sample_data_path):
"""Read a sample dataset into a pandas dataframe, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name, sample=10)
SampleDataSchema.to_schema().validate(df)
assert len(df) == 10
@pytest.mark.parametrize("limit", [0, 10])
def test_read_to_pandas_limit(sample_data_path, limit):
"""Read a sample dataset into a pandas dataframe, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name, limit=limit)
SampleDataSchema.to_schema().validate(df)
assert len(df) == limit
def test_read_to_pandas_base_path_check(sample_data_path):
"""Try if it isn't possible to read from outside the base path"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
with pytest.raises(ValueError):
backend.read_to_pandas("/tmp")
def test_read_to_dict(sample_data_path):
"""Read a sample dataset into a dictionary"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_dict(sample_data_path.name)
assert isinstance(df, dict)
assert set(df.keys()) == SampleDataSchema.to_schema().columns.keys()
df = pd.DataFrame(df)
SampleDataSchema.to_schema().validate(df)
assert len(df) == SampleDataSchema.length()
def test_read_to_dict_some_columns(sample_data_path):
"""Read a sample dataset into a dictionary, filtering some columns"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_dict(sample_data_path.name, columns=["id", "first_name"])
assert isinstance(df, dict)
assert set(df.keys()) == {"id", "first_name"}
df = pd.DataFrame(df)
SampleDataSchema.to_schema().select_columns(["id", "first_name"]).validate(df)
assert len(df) == SampleDataSchema.length()
def test_read_to_dict_some_rows(sample_data_path):
"""Read a sample dataset into a dictionary, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_dict(sample_data_path.name, row_filter="salary > 150000")
assert isinstance(df, dict)
assert set(df.keys()) == SampleDataSchema.to_schema().columns.keys()
df = pd.DataFrame(df)
SampleDataSchema.to_schema().validate(df)
assert len(df) == SampleDataSchema.n_salary_over_150000()
def test_read_to_dict_limit(sample_data_path):
"""Read a sample dataset into a dictionary, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_dict(sample_data_path.name, columns=["id", "first_name"], limit=10)
assert isinstance(df, dict)
assert set(df.keys()) == {"id", "first_name"}
df = pd.DataFrame(df)
SampleDataSchema.to_schema().select_columns(["id", "first_name"]).validate(df)
assert len(df) == 10
def test_read_to_dict_sample(sample_data_path):
"""Read a sample dataset into a dictionary, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_dict(sample_data_path.name, sample=10)
assert isinstance(df, dict)
assert set(df.keys()) == SampleDataSchema.to_schema().columns.keys()
df = pd.DataFrame(df)
SampleDataSchema.to_schema().validate(df)
assert len(df) == 10
def test_read_to_dict_base_path_check(sample_data_path):
"""Try if it isn't possible to read from outside the base path"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
with pytest.raises(ValueError):
backend.read_to_dict("/tmp")
@pytest.mark.parametrize("old_content", [False, True])
def test_write_replace_df(sample_dataframe, tmp_path_factory, old_content):
"""Write the dataframe, read it again and check identity"""
tempdir = tmp_path_factory.mktemp("test_write_replace_df")
if old_content:
(tempdir / "data.parquet").open("w").close()
backend = dframeio.ParquetBackend(str(tempdir))
backend.write_replace("data.parquet", sample_dataframe)
backend2 = dframeio.ParquetBackend(str(tempdir))
dataframe_after = backend2.read_to_pandas("data.parquet")
assert_frame_equal(dataframe_after, sample_dataframe)
@pytest.mark.parametrize("old_content", [False, True])
def test_write_replace_df_multifile(sample_dataframe, tmp_path_factory, old_content):
"""Write the dataframe, read it again and check identity"""
tempdir = tmp_path_factory.mktemp("test_write_replace_df")
if old_content:
(tempdir / "data").mkdir()
(tempdir / "data" / "old.parquet").open("w").close()
backend = dframeio.ParquetBackend(str(tempdir), rows_per_file=1000)
backend.write_replace("data", sample_dataframe)
assert sum(1 for _ in (tempdir / "data").glob("*")) == 5, "There should be 5 files"
if old_content:
assert not (tempdir / "data" / "old.parquet").exists()
backend2 = dframeio.ParquetBackend(str(tempdir))
dataframe_after = backend2.read_to_pandas("data")
assert_frame_equal(dataframe_after, sample_dataframe)
@pytest.mark.parametrize("old_content", [False, True])
def test_write_replace_df_partitioned(sample_dataframe, tmp_path_factory, old_content):
"""Write the dataframe, read it again and check identity"""
tempdir = tmp_path_factory.mktemp("test_write_replace_df")
if old_content:
(tempdir / "data").mkdir()
(tempdir / "data" / "old.parquet").open("w").close()
backend = dframeio.ParquetBackend(str(tempdir), partitions=["gender"])
backend.write_replace("data", sample_dataframe)
created_partitions = {f.name for f in (tempdir / "data").glob("*=*")}
assert created_partitions == {"gender=", "gender=Female", "gender=Male"}
if old_content:
assert not (tempdir / "data" / "old.parquet").exists()
backend2 = dframeio.ParquetBackend(str(tempdir))
dataframe_after = backend2.read_to_pandas("data")
# It is o.k. to get the partition keys back as categoricals, because
# that's more efficient. For comparison we make the column string again.
dataframe_after = dataframe_after.assign(gender=dataframe_after["gender"].astype(str))
assert_frame_equal(
dataframe_after,
sample_dataframe,
check_like=True,
)
@pytest.mark.parametrize("partitions", [[5], ["foobar"]])
def test_write_replace_df_invalid_partitions(tmp_path_factory, partitions):
"""Write the dataframe, read it again and check identity"""
tempdir = tmp_path_factory.mktemp("test_write_replace_df")
backend = dframeio.ParquetBackend(str(tempdir), partitions=partitions)
with pytest.raises(ValueError):
backend.write_replace("data.parquet", pd.DataFrame())
@pytest.mark.parametrize("old_content", [False, True])
def test_write_replace_dict(sample_dataframe_dict, tmp_path_factory, old_content):
"""Write the dataframe, read it again and check identity"""
tempdir = tmp_path_factory.mktemp("test_write_replace_df")
if old_content:
(tempdir / "data.parquet").open("w").close()
backend = dframeio.ParquetBackend(str(tempdir))
backend.write_replace("data.parquet", sample_dataframe_dict)
backend2 = dframeio.ParquetBackend(str(tempdir))
dataframe_after = backend2.read_to_dict("data.parquet")
assert dataframe_after == sample_dataframe_dict
@pytest.mark.parametrize("old_content", [False, True])
def test_write_replace_dict_multifile(sample_dataframe_dict, tmp_path_factory, old_content):
"""Write the dataframe, read it again and check identity"""
tempdir = tmp_path_factory.mktemp("test_write_replace_df")
if old_content:
(tempdir / "data").mkdir()
(tempdir / "data" / "old.parquet").open("w").close()
backend = dframeio.ParquetBackend(str(tempdir), rows_per_file=1000)
backend.write_replace("data", sample_dataframe_dict)
assert sum(1 for _ in (tempdir / "data").glob("*")) == 5, "There should be 5 files"
if old_content:
assert not (tempdir / "data" / "old.parquet").exists()
backend2 = dframeio.ParquetBackend(str(tempdir))
dataframe_after = backend2.read_to_dict("data")
assert dataframe_after == sample_dataframe_dict
@pytest.mark.parametrize("old_content", [False, True])
def test_write_replace_dict_partitioned(sample_dataframe_dict, tmp_path_factory, old_content):
"""Write the dataframe, read it again and check identity"""
tempdir = tmp_path_factory.mktemp("test_write_replace_df")
if old_content:
(tempdir / "data").mkdir()
(tempdir / "data" / "old.parquet").open("w").close()
backend = dframeio.ParquetBackend(str(tempdir), partitions=["gender"])
backend.write_replace("data", sample_dataframe_dict)
created_partitions = {f.name for f in (tempdir / "data").glob("*=*")}
assert created_partitions == {"gender=", "gender=Female", "gender=Male"}
if old_content:
assert not (tempdir / "data" / "old.parquet").exists()
backend2 = dframeio.ParquetBackend(str(tempdir))
dataframe_after = backend2.read_to_pandas("data")
# It is o.k. to get the partition keys back as categoricals, because
# that's more efficient. For comparison we make the column string again.
dataframe_after = dataframe_after.assign(gender=dataframe_after["gender"].astype(str))
cols = list(dataframe_after.columns)
assert_frame_equal(
dataframe_after.sort_values(by=cols).reset_index(drop=True),
pd.DataFrame(sample_dataframe_dict).sort_values(by=cols).reset_index(drop=True),
check_like=True,
)
@pytest.mark.parametrize("partitions", [[5], ["foobar"]])
def test_write_replace_dict_invalid_partitions(tmp_path_factory, partitions):
"""Write the dataframe, read it again and check identity"""
tempdir = tmp_path_factory.mktemp("test_write_replace_df")
backend = dframeio.ParquetBackend(str(tempdir), partitions=partitions)
with pytest.raises(ValueError):
backend.write_replace("data.parquet", {})
@pytest.fixture(params=["pandas", "dict"])
def first_chunk(request):
"""First n lines of the sample dataframe"""
if request.param == "pandas":
return read_sample_dataframe().iloc[:100]
return read_sample_dataframe().iloc[:100].to_dict("list")
@pytest.fixture(params=["pandas", "dict"])
def second_chunk(request):
if request.param == "pandas":
return read_sample_dataframe().iloc[100:]
return read_sample_dataframe().iloc[100:].to_dict("list")
def test_write_append_df(sample_dataframe, first_chunk, second_chunk, tmp_path_factory):
"""Write the dataframe in two pieces, read it again and check identity"""
tempdir = tmp_path_factory.mktemp("test_write_append_df")
# Write first chunk
backend = dframeio.ParquetBackend(str(tempdir))
backend.write_append("data.parquet", first_chunk)
# Write second chunk
backend = dframeio.ParquetBackend(str(tempdir))
backend.write_append("data.parquet", second_chunk)
# Read and compare results
backend = dframeio.ParquetBackend(str(tempdir))
dataframe_after = backend.read_to_pandas("data.parquet")
|
assert_frame_equal(dataframe_after, sample_dataframe)
|
pandas.testing.assert_frame_equal
|
import pandas as pd
import yfinance as yf
import time
table=pd.read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
df = table[0]
sp_ticks = df["Symbol"].to_list()
sp_ticks_forYF = [tick.replace(".","-") for tick in sp_ticks]
closes =
|
pd.DataFrame()
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
from pathlib import Path
from genomic_benchmarks.loc2seq.with_biopython import CACHE_PATH, DATASET_DIR_PATH
from genomic_benchmarks.loc2seq.with_biopython import _guess_location, _check_dataset_existence, _get_dataset_name
# TODO: Many of these functions are not prepared for the case when the folder in DATASET_DIR_PATH is not one benchmark but a set of benchmarks.
def info(interval_list_dataset, version=None):
'''
Print info about the bechmark.
Parameters:
interval_list_dataset (str or Path): Either a path or a name of dataset included in this package.
Returns:
DataFrame with counts of seqeunces for each class in a training and testing sets.
'''
interval_list_dataset = _guess_location(interval_list_dataset)
metadata = _check_dataset_existence(interval_list_dataset, version)
dataset_name = _get_dataset_name(interval_list_dataset)
dfs = {}
for c in metadata['classes']:
dfs[c] = {}
for t in ['train', 'test']:
dt_filename = Path(interval_list_dataset) / t / (c + '.csv.gz')
dfs[c][t] =
|
pd.read_csv(dt_filename, compression="gzip")
|
pandas.read_csv
|
import streamlit as st
import numpy as np
import pandas as pd
import datetime
import plotly.express as px
HOST = "postgres_streams"
PORT = "5432"
USER = "postgres" #! DO NOT DO THIS IN A PRODUCTION ENVIRONMENT!
PASSWORD = "<PASSWORD>"
DB = "bahn"
conn_string = f'postgresql://{USER}:{PASSWORD}@{HOST}:{PORT}/{DB}'
from sqlalchemy import create_engine
conn = create_engine(conn_string, echo = True).connect()
query = """SELECT * FROM delays;"""
result = pd.read_sql(query, con = conn)
# * Data wrangling on results df
result["n"] = result["n"].astype("int64")
result["delay"] = result["delay"].fillna(0).astype(np.int64)
result.drop(["stop_id", "timestamp", "ct"], axis = 1, inplace = True)
result.set_index("pt", inplace = True, drop = False)
result["minute"] = result.index.minute.fillna(0).astype(np.int16)
result["hour"] = result.index.hour.fillna(0).astype(np.int16)
result["day"] = result.index.day.fillna(0).astype(np.int16)
result["weekday"] = result.index.day_name()
result["month"] = result.index.month_name()
result["year"] = result.index.strftime("%Y").astype(str)
# * Define sidebars
st.set_page_config(layout="wide")
st.sidebar.header("Filter")
# Datum
today = datetime.date.today()
date_from = st.sidebar.date_input("Start date", result.index.min())
date_to = st.sidebar.date_input("End date", today)
filtered_days = result.loc[date_from.strftime("%Y-%m-%d"):date_to.strftime("%Y-%m-%d")]
# Journey type
# "f": "string", # filter flags. Siehe 1.2.26. D = external, F = long distance, N = regional, S = SBahn
# journey_type = st.sidebar.multiselect("Filter journey types",
# pd.unique(result["f"]),
# default = pd.unique(result["f"]))
# filtered_journeys = filtered_days[filtered_days["f"].isin(journey_type)]
# Train type
train_type = st.sidebar.multiselect("Filter train types",
pd.unique(filtered_days["c"]),
default =
|
pd.unique(filtered_days["c"])
|
pandas.unique
|
import pandas as pd
def load_data(portfolio_data_absolute_path="/home/chris/Dropbox/Finance/data/portfolio_trades.ods",
stock_data_absolute_path="/home/chris/Dropbox/Finance/data/stock_trades.ods",
income_data_absolute_path="/home/chris/Dropbox/Finance/data/income.ods",
etf_master_data_absolute_path="/home/chris/Dropbox/Finance/data/generated/master_data_stocks.ods",
stock_price_data_absolute_path="/home/chris/Dropbox/Finance/data/generated/stock_prices.ods",
cashflow_path = "/home/chris/Dropbox/Finance/data/data_cashflow/bilanz_full.csv",
crypto_path = "/home/chris/Dropbox/Finance/data/crypto/crypto_trades_manual.ods",
include_speculation=False):
"""
Needs odfpy library to load .ods files!
Loads all necessary data sources of the given portfolio: ETF savings portfolio data, speculation data
(stocks, cryptos, etc).
:param order_data__absolute_path: path to source data for ETF portfolio (filetype: .ods)
:param etf_master_data_absolute_path: path to master data of ETFs (filetype: .ods)
:param stock_price_data_absolute_path: path to price data of ETFs (filetype: .ods)
:param include_speculation: Whether orders of speculation portfolio should be included in output
:param cashflow_path: csv file of cashflow data
:return: tupel of pd.DataFrames with portfolio transactions and master data
"""
orders_portfolio = pd.read_excel(portfolio_data_absolute_path, engine="odf", sheet_name="Buys")
dividends_portfolio = pd.read_excel(portfolio_data_absolute_path, engine="odf", sheet_name="Dividends")
orders_speculation = pd.read_excel(stock_data_absolute_path, engine="odf", sheet_name="Buys")
income = pd.read_excel(income_data_absolute_path, engine="odf")
stock_prices = pd.read_csv(stock_price_data_absolute_path)
etf_master = pd.read_csv(etf_master_data_absolute_path)
cashflow_init = pd.read_csv(cashflow_path)
df_crypto_deposits = pd.read_excel(crypto_path, engine="odf", sheet_name="Deposits", skiprows=2, usecols="A:G")
df_crypto_trades = pd.read_excel(crypto_path, engine="odf", sheet_name="Trades", skiprows=1)
if include_speculation == True:
return ((etf_master, orders_portfolio, dividends_portfolio, income, stock_prices, cashflow_init,
orders_speculation, df_crypto_deposits, df_crypto_trades))
else:
return ((etf_master, orders_portfolio, dividends_portfolio, income, stock_prices, cashflow_init,
None, df_crypto_deposits, df_crypto_trades))
def cleaning_cashflow(df_input: pd.DataFrame) -> pd.DataFrame:
"""
Data cleaning and preprocessing of cashflow data.
:param df_input: Multiple toshl monthly-exports appended into a single dataframe
:return: preprocessed dataframe
"""
import numpy as np
assert df_input.drop("Description",
axis=1).isna().sum().sum() == 0, \
f"There are NaN values in inputfile: {path_data}{filename_cashflow}"
### Data cleaning
df_init = df_input.copy()
df_init['Date'] = pd.to_datetime(df_init['Date'], format='%m/%d/%y')
df_init.drop(columns=['Account', 'Currency', 'Main currency', 'Description'], inplace=True)
df_init['Expense amount'] = df_init['Expense amount'].str.replace(',', '')
df_init['Income amount'] = df_init['Income amount'].str.replace(',', '').astype(np.float64)
df_init['In main currency'] = df_init['In main currency'].str.replace(',', '')
df_init['Expense amount'] = df_init['Expense amount'].astype(np.float64)
df_init['In main currency'] = df_init['In main currency'].astype(np.float64)
### Preprocessing of cashflow amounts
df_init['Amount'] = pd.Series([-y if x > 0. else y
for x, y in zip(df_init['Expense amount'],
df_init['In main currency']
)
]
)
assert df_init[(~df_init["Income amount"].isin(["0.0", "0"])) &
(df_init["In main currency"] != df_init["Amount"])
].count().sum() == 0, "Income amount does not match with main currency amount!"
assert df_init[(~df_init["Expense amount"].isin(["0.0", "0"])) &
(-df_init["In main currency"] != df_init["Amount"])
].count().sum() == 0, "Expense amount does not match with main currency amount!"
### Remap all tags with category "Urlaub" to "old-tag, Urlaub" and map afterwards all double-tags
### containing "Urlaub" to the Urlaub tag
df_init.loc[df_init["Category"] == "Urlaub", "Tags"] = df_init["Tags"].apply(lambda tag: tag + ", Urlaub")
df_init["split_tags"] = df_init["Tags"].apply(lambda x: x.split(","))
assert df_init[df_init["split_tags"].apply(len) > 1]["split_tags"].apply(lambda x: \
"Urlaub" in [s.strip() for s in x]
).all() == True,\
'Some entries with multiple tags do not contain "Urlaub"! Mapping not possible!'
df_init.loc[df_init["split_tags"].apply(len) > 1, "Tags"] = "Urlaub"
df_init = df_init[["Date", "Category", "Tags", "Amount"]]
return(df_init)
def split_cashflow_data(df_cleaned: pd.DataFrame) -> pd.DataFrame:
"""
Splits whole cashflow data into incomes and expenses and groups it monthly and sums amounts per tag
:param df_cleaned: Cleaned dataframe of cashflow
:return: Tuple of dataframes holding incomes and expenses, each grouped by month
"""
needed_columns = ["Tags", "Date", "Amount"]
assert set(needed_columns).intersection(set(df_cleaned.columns)) == set(needed_columns), \
"Columns missing! Need: {0}, Have: {1}".format(needed_columns, list(df_cleaned.columns))
df_grouped = df_cleaned.groupby([pd.Grouper(key='Date', freq='1M'), 'Tags']).sum()
incomes = df_grouped[df_grouped["Amount"] > 0.].copy()
expenses = df_grouped[df_grouped["Amount"] <= 0.].copy()
return((incomes, expenses))
def preprocess_cashflow(df: pd.DataFrame) -> pd.DataFrame:
"""
Remap tags of input data to custom categories, and change the format of the dataframe in order to
easily to computations and plots of the cashflow data.
:param df: Dataframe, holding either incomes or expenses (cleaned) and grouped by month (tags as rows)
:return: dataframe, where each row consists of cashflow data of of a month, each column represents a
custom category
"""
assert isinstance(df.index, pd.core.indexes.multi.MultiIndex) and \
set(df.index.names) == set(["Date", "Tags"]) and \
list(df.columns) == ["Amount"], "Dataframe is not grouped by month!"
### Define custom categories for all tags of Toshl: Make sure category names differ from tag-names,
### otherwise column is dropped and aggregate is wrong
category_dict = {
"home": ['rent', 'insurance', 'Miete'],
"food_healthy": ['restaurants', 'Lebensmittel', 'groceries', 'Restaurants', 'Restaurant Mittag'],
"food_unhealthy": ['Fast Food', 'Süßigkeiten'],
"alcoholic_drinks": ['alcohol', 'Alkohol'],
"non-alcoholic_drinks": ['Kaffee und Tee', 'Erfrischungsgetränke', 'coffee & tea', 'soft drinks'],
"travel_vacation": ['sightseeing', 'Sightseeing', 'Beherbergung', 'accommodation', 'Urlaub'],
"transportation": ['bus', 'Bus', 'taxi', 'Taxi', 'metro', 'Metro', 'Eisenbahn', 'train', 'car',
'Auto', 'parking', 'airplane', 'fuel', 'Flugzeug'],
"sports": ['training', 'Training', 'MoTu', 'Turnier', 'sport equipment', 'Billard', 'Konsum Training'],
"events_leisure_books_abos": ['events', 'Events', 'adult fun', 'Spaß für Erwachsene', 'games', 'sport venues',
'membership fees', 'apps', 'music', 'books'],
"clothes_medicine": ['clothes', 'accessories', 'cosmetics', 'medicine', 'hairdresser',
'medical services', 'medical servies', "shoes"],
"private_devices": ['devices', 'bike', 'bicycle', 'movies & TV', 'mobile phone', 'home improvement',
'internet', 'landline phone', 'furniture'],
"presents": ['birthday', 'X-Mas'],
"other": ['wechsel', 'income tax', 'tuition', 'publications', 'Spende'],
"stocks": ['equity purchase'],
#### Income categories
"compensation_caution": ["Entschädigung"],
"salary": ["Salary", "Gehalt Vorschuss"],
"present": ["Geschenk"],
"tax_compensation": ["Kirchensteuer Erstattung", "Steuerausgleich"],
"investment_profit": ["Investing"]
}
from functools import reduce
category_list = reduce(lambda x, y: x + y, category_dict.values())
### Need another format of the table, fill NaNs with zero and drop level 0 index "Amount"
pivot_init = df.unstack()
pivot_init.fillna(0, inplace=True)
pivot_init.columns = pivot_init.columns.droplevel()
#### Extract expenses and incomes from building-upkeep (caution) when switching flats
if 'building upkeep' in pivot_init.columns:
building_upkeep = pivot_init['building upkeep']
pivot_init.drop(columns=['building upkeep'], inplace=True)
elif 'Wechsel' in pivot_init.columns:
building_upkeep = pivot_init['Wechsel']
pivot_init.drop(columns=['Wechsel'], inplace=True)
else:
building_upkeep = None
### Apply custom category definition to dataframe
not_categorized = [tag for tag in pivot_init.columns if tag not in category_list]
assert len(not_categorized) == 0, "There are some tags, which are not yet categorized: {}".format(not_categorized)
pivot = pivot_init.copy()
for category, tag_list in category_dict.items():
tag_list_in_data = list(set(tag_list).intersection(set(pivot.columns)))
pivot[category] = pivot[tag_list_in_data].sum(axis=1)
pivot.drop(columns=tag_list_in_data, inplace=True)
### Keep only categories with non-zero total amount in dataframe
category_sum = pivot.sum().reset_index()
nonzero_categories = list(category_sum[category_sum[0] != 0.]["Tags"])
pivot = pivot[nonzero_categories]
return((building_upkeep, pivot))
def combine_incomes(toshl_income, excel_income):
"""
Combines two data sources of incomes: toshl incomes and incomes from cashflow excel.
:param toshl_income: Preprocessed dataframe of toshl incomes (after cleaning and splitting)
:param excel_income: Raw excel income data
:return: Total income data
"""
df_in = toshl_income.reset_index().copy()
df_in["Tags"] = df_in["Tags"].apply(lambda x: "Salary" if x in ["Privat", "NHK", "OL"] else x)
df_in2 = excel_income.copy()
df_in2 = df_in2[["Datum", "Art", "Betrag"]].rename(columns={"Datum": "Date",
"Art": "Tags",
"Betrag": "Amount"}).dropna()
df_in2["Date"] = pd.to_datetime(df_in2["Date"], format="%d.%m.%Y")
df_in2["Tags"] = df_in2["Tags"].apply(lambda x: "Salary" if x in ["Gehalt", "Sodexo"] else x)
df_income = pd.concat([df_in, df_in2], ignore_index=True)
assert df_income.count()[0] == df_in.count()[0] + df_in2.count()[0], "Some income rows were lost!"
df_income = df_income.groupby([pd.Grouper(key='Date', freq='1M'), 'Tags']).sum()
return(df_income)
def preprocess_prices(df_prices: pd.DataFrame) -> pd.DataFrame:
"""
Preprocessing of price dataframe. Get latest available price.
:param df_prices: Needed columns: ISIN, Price, Datum, Currency
:return: dataframe containing prices of stocks defined by ISIN on latest available date
"""
dfp = df_prices.copy()
assert dfp["Currency"].drop_duplicates().count() == 1, "Multiple currencies used for price data!"
assert dfp["Currency"].iloc[0] == "EUR", "Currency is not Euro!"
dfp["Date"] = pd.to_datetime(dfp["Date"], format="%d.%m.%Y")
latest_date = dfp["Date"].max()
df_current_prices = dfp[dfp["Date"] == latest_date].reset_index(drop=True)
return(df_current_prices)
def preprocess_orders(df_orders: pd.DataFrame) -> pd.DataFrame:
"""
Set datatypes of columns and split input into dividends transactions and savings-plan transactions.
:param df_orders: Includes all transaction data of the portfolio, all columns in list portfolio_columns
need to be present, Kommentar column needs to be either "monatlich" (transaction of the
savings plan, an ETF is bought) or "Dividende" (income)
:return: tuple of orders- and dividend transaction entries
"""
orders_portfolio = df_orders.copy()
portfolio_columns = ["Index", "Datum", "Kurs", "Betrag", "Kosten", "Anbieter", "Name", "ISIN"]
new_portfolio_columns = ["Index", "Date", "Price", "Investment", "Ordercost", "Depotprovider", "Name", "ISIN"]
rename_columns = {key: value for key, value in zip(portfolio_columns, new_portfolio_columns)}
orders_portfolio = orders_portfolio.rename(columns=rename_columns)
assert set(orders_portfolio.columns).intersection(set(new_portfolio_columns)) == set(new_portfolio_columns), \
"Some necessary columns are missing in the input dataframe!"
### Keep only valid entries
orders_portfolio = orders_portfolio[~orders_portfolio["Investment"].isna()]
orders_portfolio = orders_portfolio[orders_portfolio["Art"] == "ETF Sparplan"]
orders_portfolio = orders_portfolio[new_portfolio_columns]
orders_portfolio = orders_portfolio[~orders_portfolio["Date"].isna()]
orders_portfolio["Date"] =
|
pd.to_datetime(orders_portfolio["Date"], format="%d.%m.%Y")
|
pandas.to_datetime
|
import pandas as pd
import copy
import argparse
import helper
env_data = helper.fetch_maze()
def is_move_valid_visited(env_data,visit_map,loc,act):
"""
Judge wether the robot can take action act
at location loc.
Keyword arguments:
env -- list, the environment data
loc -- tuple, robots current location
act -- string, robots meant action
"""
nextloc=list(loc)
if act=='u':
nextloc[0]=nextloc[0]-1
elif act=='d':
nextloc[0]=nextloc[0]+1
elif act=='r':
nextloc[1]=nextloc[1]+1
elif act=='l':
nextloc[1]=nextloc[1]-1
else:
return False
if (nextloc[0] in range(len(env_data))) and (nextloc[1] in range(len(env_data[0]))):
if env_data[nextloc[0]][nextloc[1]]==0 or env_data[nextloc[0]][nextloc[1]]==1 or env_data[nextloc[0]][nextloc[1]]==3:
if visit_map[nextloc[0]][nextloc[1]]==0 or visit_map[nextloc[0]][nextloc[1]]==1 or visit_map[nextloc[0]][nextloc[1]]==3:
return True
else:
return False
else:
return False
else:
return False
def valid_novisit_actions(env_data,visit_map,loc):
valid_action=[]
'''
Follow u,d,r,l direction to move around
'''
for i in ['u','d','r','l']:
if is_move_valid_visited(env_data,visit_map,loc,i):
valid_action.append(i)
return valid_action
def get_valid_neighbor_loc(loc,action_list):
neighbor_list=list()
'''
Follow u,d,r,l direction to move around
'''
for i in action_list:
new_loc=list(loc)
if i=='u':
new_loc[0]=new_loc[0]-1
elif i=='d':
new_loc[0]=new_loc[0]+1
elif i=='r':
new_loc[1]=new_loc[1]+1
elif i=='l':
new_loc[1]=new_loc[1]-1
neighbor_list.append((new_loc[0],new_loc[1]))
return neighbor_list
def move_robot(loc, act):
move_dict ={
'u': (-1,0),
'd': (1,0),
'l': (0,-1),
'r': (0,1)
}
return loc[0] + move_dict[act][0], loc[1] + move_dict[act][1]
def bfs_move_robot(env_data,visit_map,loc,act_list,route_table):
#algorithm reference: https://blog.csdn.net/raphealguo/article/details/7523411
for act in act_list:
new_loc=list(loc)
if act=='u':
new_loc[0]=new_loc[0]-1
elif act=='d':
new_loc[0]=new_loc[0]+1
elif act=='r':
new_loc[1]=new_loc[1]+1
elif act=='l':
new_loc[1]=new_loc[1]-1
mark_visit(visit_map,(new_loc[0],new_loc[1]),'gray')
route_table=route_table.append(pd.DataFrame(data={'source_loc':[(list(loc)[0],list(loc)[1])],'move_direct':act,'next_loc':[(new_loc[0],new_loc[1])],'route_type':'forward'}),ignore_index=True)
if env_data[new_loc[0]][new_loc[1]]==3:
return route_table
else:
Source_loc=new_loc
new_loc=move_back_robot(new_loc,act)
act=roll_back_direction(act)
route_table=route_table.append(pd.DataFrame(data={'source_loc':[(Source_loc[0],Source_loc[1])],'move_direct':act,'next_loc':[(new_loc[0],new_loc[1])],'route_type':'backward'}),ignore_index=True)
continue
return route_table
def move_back_robot(loc,act):
'''Rollback need not check visit_map'''
new_loc=list(loc)
if act=='u':
new_loc[0]=new_loc[0]+1
elif act=='d':
new_loc[0]=new_loc[0]-1
elif act=='r':
new_loc[1]=new_loc[1]-1
elif act=='l':
new_loc[1]=new_loc[1]+1
return (new_loc[0],new_loc[1])
def roll_back_direction(act):
'''Rollback need not check visit_map'''
if act=='u':
new_act='d'
elif act=='d':
new_act='u'
elif act=='l':
new_act='r'
elif act=='r':
new_act='l'
return new_act
def mark_visit(visit_map,loc,color):
new_loc=list(loc)
if color=='dark':
visit_map[new_loc[0]][new_loc[1]]=4
elif color=='gray':
visit_map[new_loc[0]][new_loc[1]]=5
else:
print('Only accept color:dark or gray!')
def trace_route(route_table,initial_loc,from_loc,to_loc):
back_route=pd.DataFrame(columns=['source_loc','move_direct','next_loc','route_type'])
forward_route=
|
pd.DataFrame(columns=['source_loc','move_direct','next_loc','route_type'])
|
pandas.DataFrame
|
# -*- coding: UTF-8 -*-
import pandas as pd
import config
from config import engine
# 今日注册用户
def registUser(self, files):
f1 = pd.DataFrame(pd.read_csv(files, sep='\t', header=None, names=config.name))
# 简化时间至年月日
f1['daytime'] = pd.to_datetime(f1['daytime']).dt.normalize()
# register = f1.drop_duplicates('user_id', 'first')['event_id'].count()
a = f1['user_id'].unique()
for i in a:
if i not in self.registuser:
self.registuser.append(i)
self.register += 1
# 日活
def startUser(self, files):
f1 = pd.DataFrame(pd.read_csv(files, sep='\t', header=None, names=config.name))
# start = f1.drop_duplicates('user_id', 'first')['event_id'].count()
user = f1['user_id'].unique()
for i in user:
if i not in self.liveuser:
self.liveuser.append(i)
self.live += 1
# 留存率
def retentRate(self, file):
f1 = pd.DataFrame(
|
pd.read_csv(file, sep='\t', header=None, names=config.name)
|
pandas.read_csv
|
import numpy as np
import pandas as pd
# Create and populate a 5x2 NumPy array.
my_data = np.array([[0, 3], [10, 7], [20, 9], [30, 14], [40, 15]])
# Create a Python list that holds the names of the two columns.
my_column_names = ['temperature', 'activity']
# Create a DataFrame.
my_dataframe =
|
pd.DataFrame(data=my_data, columns=my_column_names)
|
pandas.DataFrame
|
import sys
import unittest
import pandas as pd
from src.preprocessing import format_ocean_proximity
class FormattingTestCase(unittest.TestCase):
def setUp(self):
self.ref_df = pd.read_csv("housing.csv")
def test_format_ocean_proximity(self):
ref_output = format_ocean_proximity(
|
pd.DataFrame(self.ref_df)
|
pandas.DataFrame
|
"""Predictor.."""
import os
import shutil
import json
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from aircraft_detector.utils.utils import (
retrieve_files,
get_feature_directory_name,
refresh_directory,
print_verbose,
load_spectrum_settings,
load_state_settings,
)
import aircraft_detector.utils.feature_helper as fh
import aircraft_detector.utils.pytorch_earlystopping as es
from aircraft_detector.utils.dynamic_net import Net
import aircraft_detector.utils.plot_helper as ph
from aircraft_detector.utils.dynamic_net import (
set_net_configuration,
train_network,
test_network,
_create_network,
)
class EgoNoisePredictor:
def __init__(
self, root_directory, spectrum_settings=None, states_settings=None,
):
# set root directory
self._dir_root = root_directory
# set the missing feature settings to their defaults
if spectrum_settings is None:
spectrum_settings = {}
self._spectrum = load_spectrum_settings(spectrum_settings)
# set the missing states settings to their defaults
if states_settings is None:
states_settings = {}
self._states = load_state_settings(states_settings)
# derive root input directory (feature dataset) from parameters
self._dir_root_set = os.path.join(
self._dir_root,
"Ego-Noise Prediction",
"Parameter Sets",
get_feature_directory_name(self._spectrum),
)
# verbosity
self.verbose = True
self.super_verbose = False
# parts of the dataset (initialized in load_datasets)
self._train_set = None
self._val_set = None
self._test_set = None
# network configuration (initialized in set_net_configuration)
self._net_config = None
# set loss to MSE loss
self._loss_fn = nn.MSELoss()
# train settings (supplied in train_model)
self._train_settings = None
def load_datasets(self):
# load training, validation, test data
self._train_set = self._load_data(
os.path.join(self._dir_root_set, "Dataset", "Train")
)
self._val_set = self._load_data(
os.path.join(self._dir_root_set, "Dataset", "Val")
)
self._test_set = self._load_data(
os.path.join(self._dir_root_set, "Dataset", "Test")
)
def _load_data(self, dir_split):
# load N files
files_X = retrieve_files(os.path.join(dir_split, "States")) # input
files_Y = retrieve_files(os.path.join(dir_split, "Spectra")) # output
# load states: NxTxS
data_X = [pd.read_csv(f, header=None).to_numpy().transpose() for f in files_X]
# extract only relevant states
data_X = [
fh.extract_relevant_states(data, self._states["states"]) for data in data_X
]
# load spectra: NxTxF
data_Y = [pd.read_csv(f, header=None).to_numpy().transpose() for f in files_Y]
if self._states["context_frames"] > 0:
# add context to the dataset: (NxTxS, NxTxF) -> (NxT-CxCxS, NxT-CxCxF)
data_X, data_Y = list(
zip(*[self._add_context(dX, dY) for dX, dY in zip(data_X, data_Y)])
)
else:
# add placeholder dim. for X: NxTxS -> NxTx1xS
data_X = [np.expand_dims(X, 1) for X in data_X]
# concatenate N and T axes to get 3D set
data_X = np.concatenate(data_X, axis=0)
data_Y = np.concatenate(data_Y, axis=0)
# convert to torch dataset
X = torch.from_numpy(data_X).float()
Y = torch.from_numpy(data_Y).float()
dataset = torch.utils.data.TensorDataset(X, Y)
return dataset
def _add_context(self, states, spectra=None):
# 3D copy of 'original' state data: TxS -> Tx1xS
states_extended = np.expand_dims(states, 1)
# shift states in time dim., then add to extended array
n = 0
while n < self._states["context_frames"]:
# get states at previous time index
states_prev = np.roll(states, n + 1, axis=0)
# add prev. states to 2nd axis of extended states
states_extended = np.concatenate(
(np.expand_dims(states_prev, 1), states_extended), axis=1
)
n += 1
# remove first C time indices for causality: T-CxCxS
states_extended = states_extended[self._states["context_frames"] :]
if spectra is None:
return states_extended
else:
# remove first C time indices to match length of states_extended
spectra_modified = spectra[self._states["context_frames"] :]
return states_extended, spectra_modified
def set_net_configuration(self, layers):
assert (
self._test_set is not None
), "Please load the data via load_datasets before setting a network configuration."
self._net_config = set_net_configuration(layers, self._test_set)
def tune_hyperparameters(self, parameterization_dict, training_settings=None):
"""Use for Bayesian Optimization.
parameterization_dict: contains ax ranges..
"""
# convert the parameterization to the class config representation
new_config = _convert_parameterization_to_config(parameterization_dict)
self.set_net_configuration(new_config)
# train, evaluate model
_, losses, _ = self.train_network(training_settings)
val_loss = losses[1]
return val_loss
def train_network(self, train_settings):
# verify that network config has been set
assert (
self._net_config is not None
), "Please set a network configuration via set_net_configuration before training the network."
# store train settings
self._train_settings = train_settings
# train network
network, loss, loss_history = train_network(
train_settings,
self._train_set,
self._val_set,
self._net_config,
self._loss_fn,
self.verbose,
self.super_verbose,
)
return network, loss, loss_history
def test_network(self, network):
loss = test_network(
network, self._test_set, self._net_config["device"], self._loss_fn
)
return loss
def save_network(self, network, loss, overwrite=False):
# generate output filename and directory for model and config
network_id = "%.6f_c%d" % (loss, self._states["context_frames"])
dir_model = os.path.join(self._dir_root_set, "Models", network_id)
fn_model = "enp_model.pt"
fn_config = "enp_config.json"
# create or overwrite directory
if os.path.exists(dir_model) and not overwrite:
print_verbose(self.verbose, "Network already exists.")
return dir_model
refresh_directory(dir_model)
# save network
torch.save(network.state_dict(), os.path.join(dir_model, fn_model))
# save network config and settings
config_file = open(os.path.join(dir_model, fn_config), "w")
json.dump(
[self._net_config, self._spectrum, self._states, self._train_settings],
config_file,
)
config_file.close()
return dir_model
def save_network_output(self, model, dir_model, subset, plot=True):
# refresh the output directories
output_subdirs = ["Original", "Predicted", "Residual"]
for subdir in output_subdirs:
refresh_directory(os.path.join(dir_model, "Output", subset, subdir))
# load the original files (states, spectra) in the subset
dir_states = os.path.join(self._dir_root_set, "Dataset", subset, "States")
files_states = retrieve_files(dir_states)
dir_spectra = os.path.join(self._dir_root_set, "Dataset", subset, "Spectra")
files_spectra = retrieve_files(dir_spectra)
for i in range(len(files_states)):
# load original spectra and cut-off context
original = pd.read_csv(files_spectra[i], header=None).to_numpy()
if self._states["context_frames"] > 0:
original = original[:, self._states["context_frames"] :]
# predict spectra from states file
predicted = self._predict(model, files_states[i], original.shape)
# compute residual
residual = original - predicted
# plot if desired
if plot:
self._plot_model_output(original, predicted, residual)
# save output
fn = os.path.split(files_states[i])[-1] # target filename
output_spectra = [original, predicted, residual]
for spectrum, subdir in zip(output_spectra, output_subdirs):
# save spectrum
dir_out = os.path.join(dir_model, "Output", subset, subdir)
pd.DataFrame(spectrum).to_csv(
os.path.join(dir_out, fn), index=False, header=False
)
def _predict(self, network, file_states, out_shape):
# load states
S =
|
pd.read_csv(file_states, header=None)
|
pandas.read_csv
|
"""Logs what the user is working on and for how long at a time.
Every second time it logs the starting time and the other time it logs
the time elapsed and the project being worked on.
"""
import os
import csv
import subprocess
import pandas as pd
import tkinter as tk
from datetime import datetime
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg as agg
import widgets
import config
increment = config.worklog['INCREMENT']
now = datetime.now()
SUM_LAST_MONTH = True
class WorkLog:
"""Contains the module functionality."""
def __init__(self):
self.ts = 'timestamp.csv'
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, tb):
pass
def manual_entry(self):
"""Opens the log file for editing mistakes in logging."""
subprocess.call(f'{os.getcwd()}/worklog.csv',
shell=True)
def check_timestamp(self):
"""Checks the latest timestamp. If empty then it throws
a pd.errors.EmptyDataError, which we then catch, write a
new timestamp, give feedback and take no further action.
"""
try:
df_timestamp = pd.read_csv(self.ts,
header=None)
return df_timestamp
except pd.errors.EmptyDataError:
with open(self.ts,
mode='w') as f:
w = csv.writer(f)
w.writerow([now])
self.feedback()
@staticmethod
def feedback():
"""Feedback Box for the user to know
that the logging has successfully started.
"""
widget = widgets.Widget(config=config.feedback)
root = widget.root_mkr()
widget.frm_mkr()
timestamp = now.strftime(r'%Y-%m-%d %H:%M')
msg = f'Logging\nStarted\n{timestamp}'
widget.lbl_mkr(rel_y=0.3,
txt=msg,
anchor='c')
root.after(3000, root.destroy)
root.mainloop()
@staticmethod
def log_work():
"""Logs the project being worked on."""
project = Setup.entry_project.get()
comment = Setup.entry_comment.get()
t_start = Setup.entry_start.get()
t_end = Setup.entry_end.get()
t_i = datetime.strptime(t_start,
config.worklog['DT_FORMAT'])
t_f = datetime.strptime(t_end,
config.worklog['DT_FORMAT'])
delta = t_f-t_i
# Somehow there isn't a command for formatting
# timedeltas like there is strptime for datetimes???
delta = str(delta) if delta.seconds/3600 > 9 else '0'+str(delta)
with open('worklog.csv',
mode='a') as f:
w = csv.writer(f,
delimiter="\t",
lineterminator="\n")
w.writerow([t_start,
t_end,
delta,
project,
comment])
# Rewrite the timestamp file as empty
with open('timestamp.csv', 'w'):
pass
Setup.root.destroy()
def sum_month(self):
"""Logs and plots the total hours worked last month."""
def plot_sum():
"""Bar chart displaying the total hours logged last month."""
root = tk.Tk()
root.title('Work Logger')
df_plt =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python
import pandas as pd
import os
def process_remaining_images(edition_name, existing_rt):
# open directory containing remaining images
dirpath = os.path.join("output", "edition " + str(edition_name), "images")
images = [f for f in os.listdir(dirpath) if not f.startswith('.')]
images = list(map(lambda filename: filename.removesuffix('.png'), images))
images.sort(key=lambda filename: int(filename))
new_rt =
|
pd.DataFrame([], columns=existing_rt.columns)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pyarrow as pa
import pytest
from kartothek.core.cube.conditions import (
C,
Condition,
Conjunction,
EqualityCondition,
GreaterEqualCondition,
GreaterThanCondition,
InequalityCondition,
InIntervalCondition,
IsInCondition,
LessEqualCondition,
LessThanCondition,
)
class TestVirtualColumn:
def test_convert(self):
c = C(b"foo")
assert c.name == "foo"
assert isinstance(c.name, str)
def test_frozen(self):
c = C("foo")
with pytest.raises(AttributeError):
c.name = "bar"
class TestSimpleCondition:
@pytest.mark.parametrize(
"f,t,op,value",
[
(
# f
lambda c: c == 42,
# t
EqualityCondition,
# op
"==",
# value
42,
),
(
# f
lambda c: c != 42,
# t
InequalityCondition,
# op
"!=",
# value
42,
),
(
# f
lambda c: c < 42,
# t
LessThanCondition,
# op
"<",
# value
42,
),
(
# f
lambda c: c <= 42,
# t
LessEqualCondition,
# op
"<=",
# value
42,
),
(
# f
lambda c: c > 42,
# t
GreaterThanCondition,
# op
">",
# value
42,
),
(
# f
lambda c: c >= 42,
# t
GreaterEqualCondition,
# op
">=",
# value
42,
),
(
# f
lambda c: c.isin(42),
# t
IsInCondition,
# op
"in",
# value
(42,),
),
],
)
def test_op(self, f, t, op, value):
c = C("foö")
cond = f(c)
assert isinstance(cond, t)
assert cond.OP == op
assert str(cond) == "foö {} {}".format(op, value)
assert cond.predicate_part == [("foö", op, value)]
assert cond.active
hash(cond)
def test_frozen(self):
cond = C("foö") == 42
with pytest.raises(AttributeError):
cond.column = "bar"
with pytest.raises(AttributeError):
cond.value = 1337
with pytest.raises(AttributeError):
cond.OP = "x"
def test_filter_df(self):
cond = C("foö") == 42
df = pd.DataFrame({"foö": [13, 42, 42, 100], "bar": 0.0})
df_actual = cond.filter_df(df)
df_expected = df.loc[df["foö"] == 42]
pdt.assert_frame_equal(df_actual, df_expected)
def test_fails_null_scalar(self):
with pytest.raises(ValueError) as exc:
C("foö") == None # noqa
assert str(exc.value) == 'Cannot use NULL-value to compare w/ column "foö"'
def test_fails_null_list(self):
with pytest.raises(ValueError) as exc:
C("foö").isin([0, None, 1])
assert str(exc.value) == 'Cannot use NULL-value to compare w/ column "foö"'
def test_fails_colcol_scalar(self):
c1 = C("foö")
c2 = C("bar")
with pytest.raises(TypeError) as exc:
c1 == c2
assert str(exc.value) == "Cannot compare two columns."
def test_fails_colcol_list(self):
c1 = C("foö")
c2 = C("bar")
with pytest.raises(TypeError) as exc:
c1.isin([c2])
assert str(exc.value) == "Cannot compare two columns."
def test_fails_colcond_scalar(self):
c1 = C("foö")
c2 = C("bar")
cond = c2 == 42
with pytest.raises(TypeError) as exc:
c1 == cond
assert str(exc.value) == "Cannot use nested conditions."
def test_fails_colcond_list(self):
c1 = C("foö")
c2 = C("bar")
cond = c2 == 42
with pytest.raises(TypeError) as exc:
c1.isin([cond])
assert str(exc.value) == "Cannot use nested conditions."
def test_fails_colconj_scalar(self):
c1 = C("foö")
c2 = C("bar")
conj = (c2 == 42) & (c2 == 10)
with pytest.raises(TypeError) as exc:
c1 == conj
assert str(exc.value) == "Cannot use nested conditions."
def test_fails_colconj_list(self):
c1 = C("foö")
c2 = C("bar")
conj = (c2 == 42) & (c2 == 10)
with pytest.raises(TypeError) as exc:
c1.isin([conj])
assert str(exc.value) == "Cannot use nested conditions."
def test_fails_doublecompare(self):
with pytest.raises(TypeError) as exc:
1 < C("foö") <= 5
assert str(exc.value).startswith("Cannot check if a condition is non-zero.")
@pytest.mark.parametrize(
"s,expected",
[
("sö == a", C("sö") == "a"),
("sö = a", C("sö") == "a"),
("sö==a", C("sö") == "a"),
("sö=='a b'", C("sö") == "a b"),
("iö != 10", C("iö") != 10),
("iö > 10", C("iö") > 10),
("iö < 10", C("iö") < 10),
("iö >= 10", C("iö") >= 10),
("iö <= 10", C("iö") <= 10),
(" sö == a ", C("sö") == "a"),
("( sö == a )", C("sö") == "a"),
("tö == 2018-01-01", C("tö") == pd.Timestamp("2018-01-01")),
],
)
def test_from_string_ok(self, s, expected):
all_types = {
"sö": pa.string(),
"bö": pa.bool_(),
"iö": pa.int16(),
"tö": pa.timestamp("ns"),
}
actual = Condition.from_string(s, all_types)
assert actual == expected
s2 = str(actual)
actual2 = Condition.from_string(s2, all_types)
assert actual2 == actual
@pytest.mark.parametrize(
"s,expected",
[
("zö == a", 'Unknown column "zö" in condition "zö == a"'),
("sö ==", 'Cannot parse condition "sö =="'),
("== a", 'Cannot parse condition "== a"'),
("sö <=", 'Cannot parse condition "sö <="'),
],
)
def test_from_string_error(self, s, expected):
all_types = {"sö": pa.string(), "bö": pa.bool_(), "iö": pa.int16()}
with pytest.raises(ValueError) as exc:
Condition.from_string(s, all_types)
assert str(exc.value) == expected
class TestInIntervaCondition:
def test_simple(self):
cond = C("foö").in_interval(10, 20)
assert isinstance(cond, InIntervalCondition)
assert str(cond) == "foö.in_interval(10, 20)"
assert cond.predicate_part == [("foö", ">=", 10), ("foö", "<", 20)]
assert cond.active
hash(cond)
def test_begin_null(self):
cond = C("foö").in_interval(stop=20)
assert isinstance(cond, InIntervalCondition)
assert str(cond) == "foö.in_interval(None, 20)"
assert cond.predicate_part == [("foö", "<", 20)]
assert cond.active
def test_end_null(self):
cond = C("foö").in_interval(10)
assert isinstance(cond, InIntervalCondition)
assert str(cond) == "foö.in_interval(10, None)"
assert cond.predicate_part == [("foö", ">=", 10)]
assert cond.active
def test_both_null(self):
cond = C("foö").in_interval()
assert isinstance(cond, InIntervalCondition)
assert str(cond) == "foö.in_interval(None, None)"
assert cond.predicate_part == []
assert not cond.active
def test_fails_null(self):
col1 = C("foö")
with pytest.raises(ValueError) as exc:
col1.in_interval(10, np.nan)
assert str(exc.value) == 'Cannot use NULL-value to compare w/ column "foö"'
def test_fails_colcol(self):
col1 = C("foö")
col2 = C("bar")
with pytest.raises(TypeError) as exc:
col1.in_interval(10, col2)
assert str(exc.value) == "Cannot compare two columns."
def test_fails_colcond(self):
col1 = C("foö")
col2 = C("bar")
cond = col2 == 42
with pytest.raises(TypeError) as exc:
col1.in_interval(10, cond)
assert str(exc.value) == "Cannot use nested conditions."
def test_fails_colconj(self):
col1 = C("foö")
col2 = C("bar")
conj = (col2 == 42) & (col2 == 10)
with pytest.raises(TypeError) as exc:
col1.in_interval(10, conj)
assert str(exc.value) == "Cannot use nested conditions."
class TestConjunction:
def test_simple(self):
col = C("foö")
cond1 = col < 10
cond2 = col > 0
conj = cond1 & cond2
assert isinstance(conj, Conjunction)
assert conj.conditions == (cond1, cond2)
assert str(conj) == "(foö < 10) & (foö > 0)"
assert conj.columns == {"foö"}
assert conj.predicate == [("foö", "<", 10), ("foö", ">", 0)]
assert conj.split_by_column() == {"foö": conj}
def test_nested_conj_cond(self):
col = C("foö")
cond1 = col < 10
cond2 = col > 0
cond3 = col != 10
conj1 = cond1 & cond2
conj2 = conj1 & cond3
assert isinstance(conj2, Conjunction)
assert conj2.conditions == (cond1, cond2, cond3)
assert str(conj2) == "(foö < 10) & (foö > 0) & (foö != 10)"
assert conj2.columns == {"foö"}
assert conj2.predicate == [
("foö", "<", 10),
("foö", ">", 0),
("foö", "!=", 10),
]
assert conj2.split_by_column() == {"foö": conj2}
def test_nested_cond_conj(self):
col = C("foö")
cond1 = col < 10
cond2 = col > 0
cond3 = col != 10
conj1 = cond2 & cond3
conj2 = cond1 & conj1
assert isinstance(conj2, Conjunction)
assert conj2.conditions == (cond1, cond2, cond3)
def test_nested_conj_conj(self):
col = C("foö")
cond1 = col < 10
cond2 = col > 0
cond3 = col != 10
cond4 = col != 11
conj1 = cond1 & cond2
conj2 = cond3 & cond4
conj3 = conj1 & conj2
assert isinstance(conj3, Conjunction)
assert conj3.conditions == (cond1, cond2, cond3, cond4)
def test_fails_nocond(self):
col = C("foö")
cond1 = col < 10
with pytest.raises(TypeError) as exc:
cond1 & col
assert str(exc.value) == "Can only build conjunction out of conditions."
def test_multicol(self):
col1 = C("foö")
col2 = C("bar")
cond1 = col1 < 10
cond2 = col1 > 0
cond3 = col2 != 10
conj1 = cond1 & cond2
conj2 = conj1 & cond3
assert isinstance(conj2, Conjunction)
assert conj2.conditions == (cond1, cond2, cond3)
assert str(conj2) == "(foö < 10) & (foö > 0) & (bar != 10)"
assert conj2.columns == {"foö", "bar"}
assert conj2.predicate == [
("foö", "<", 10),
("foö", ">", 0),
("bar", "!=", 10),
]
assert conj2.split_by_column() == {"foö": conj1, "bar": Conjunction([cond3])}
def test_empty_real(self):
conj = Conjunction([])
assert conj.conditions == ()
assert str(conj) == ""
assert conj.columns == set()
assert conj.predicate is None
assert conj.split_by_column() == {}
def test_empty_pseudo(self):
cond = InIntervalCondition("x")
conj = Conjunction([cond])
assert conj.conditions == (cond,)
assert str(conj) == "(x.in_interval(None, None))"
assert conj.columns == set()
assert conj.predicate is None
assert conj.split_by_column() == {}
def test_filter_df_some(self):
cond = (C("foö") == 42) & (C("bar") == 2)
df = pd.DataFrame({"foö": [13, 42, 42, 100], "bar": [1, 2, 3, 4], "z": 0.0})
df_actual = cond.filter_df(df)
df_expected = df.loc[(df["foö"] == 42) & (df["bar"] == 2)]
pdt.assert_frame_equal(df_actual, df_expected)
def test_filter_df_empty(self):
cond = Conjunction([])
df = pd.DataFrame({"foö": [13, 42, 42, 100], "bar": [1, 2, 3, 4], "z": 0.0})
df_actual = cond.filter_df(df)
pdt.assert_frame_equal(df_actual, df)
def test_filter_df_nulls(self):
cond = (C("foö") != 42.0) & (C("bar") != 2.0)
df = pd.DataFrame(
{"foö": [13, 42, np.nan, np.nan], "bar": [1, 2, 3, np.nan], "z": np.nan}
)
df_actual = cond.filter_df(df)
df_expected = pd.DataFrame({"foö": [13.0], "bar": [1.0], "z": [np.nan]})
|
pdt.assert_frame_equal(df_actual, df_expected)
|
pandas.testing.assert_frame_equal
|
# coding: utf-8
# # Visualize Networks
# In[78]:
import pandas as pd
import igraph as ig
from timeUtils import clock, elapsed, getTimeSuffix, getDateTime, addDays, printDateTime, getFirstLastDay
from pandasUtils import castDateTime, castInt64, cutDataFrameByDate, convertToDate, isSeries, isDataFrame, getColData
from network import makeNetworkDir, distHash
#import geohash
import pygeohash as geohash
from haversine import haversine
from vertexData import vertex
from edgeData import edge
from networkCategories import categories
def getLoc(ghash):
loc = geohash.decode_exactly(ghash)[:2]
loc = [round(x, 4) for x in loc]
return loc
def getVertexViews(dn, vtxmetrics, homeMetrics, metric='HubScore'):
from numpy import tanh, amax
from pandas import Series
from seaborn import cubehelix_palette
from seaborn import color_palette, light_palette
g = dn.getNetwork()
if metric == "HubScore":
vertexData = Series(g.hub_score())
elif metric == "Centrality":
vertexData = Series(g.centrality())
elif metric == "Degree":
vertexData = Series(g.degree())
else:
raise ValueError("metric {0} was not recognized".format(metric))
qvals = vertexData.quantile([0, 0.687, 0.955, 0.997, 1])
cols = cubehelix_palette(n_colors=7, start=2.8, rot=.1)
#cols = color_palette("OrRd", 7)
#cols = cubehelix_palette(7)
vcols =
|
Series(vertexData.shape[0]*[0])
|
pandas.Series
|
import datetime
import re
import csv
import numpy as np
import pandas as pd
import sklearn
from sklearn.metrics import accuracy_score, classification_report
from sklearn.metrics import confusion_matrix
from challenge.agoda_cancellation_estimator import AgodaCancellationEstimator
from IMLearn.utils import split_train_test
# from __future__ import annotations
# from typing import NoReturn
from IMLearn.base import BaseEstimator
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
def make_condition_to_sum(cond: str, full_price: float,
night_price: float) -> float:
sum = 0
cond1 = re.split("D", cond)
days_before_checking = int(cond1[0])
if cond1[1].find("P") != -1:
percent = int(re.split("P", cond1[1])[0]) / 100
sum += full_price * percent * days_before_checking
else:
num_nights = int(re.split("N", cond1[1])[0])
sum += night_price * num_nights * days_before_checking
return sum
def f10(cancellation: str, full_price: float, night_price: float) -> (float, float):
if cancellation == "UNKNOWN":
return 0, 0
sum = 0
no_show = 0
cond = re.split("_", cancellation)
if len(cond) == 1:
sum += make_condition_to_sum(cond[0], full_price, night_price)
else:
sum += make_condition_to_sum(cond[0], full_price, night_price)
if cond[1].find("D") != -1:
sum += make_condition_to_sum(cond[1], full_price, night_price)
else:
if cond[1].find("P") != -1:
percent = int(re.split("P", cond[1])[0]) / 100
no_show += full_price * percent
else:
num_nights = int(re.split("N", cond[1])[0])
no_show += night_price * num_nights
return sum, no_show
def get_cancellation(features: pd.DataFrame):
sum = []
no_show = []
for index, row in features.iterrows():
a,b = f10(row.cancellation_policy_code, row.original_selling_amount, row.price_per_night)
sum.append(a)
no_show.append(b)
return sum, no_show
def load_data(filename: str, with_lables = True):
"""
Load Agoda booking cancellation dataset
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector in either of the following formats:
1) Single dataframe with last column representing the response
2) Tuple of pandas.DataFrame and Series
3) Tuple of ndarray of shape (n_samples, n_features) and ndarray of shape (n_samples,)
"""
# TODO - replace below code with any desired preprocessing
full_data = pd.read_csv(filename).drop_duplicates()
features = full_data[["booking_datetime",
"checkin_date",
"checkout_date",
"hotel_city_code",
"hotel_star_rating",
"charge_option",
"accommadation_type_name",
"hotel_star_rating",
"customer_nationality",
"guest_is_not_the_customer",
"cancellation_policy_code",
"is_user_logged_in",
"original_payment_method",
"no_of_adults",
"no_of_children",
"original_selling_amount",
"customer_nationality",
"original_payment_type"]]
features["checkin_date"] = pd.to_datetime(features["checkin_date"])
features["checkout_date"] = pd.to_datetime(features["checkout_date"])
features["booking_datetime"] = pd.to_datetime(features["booking_datetime"])
features["duration"] = (features["checkout_date"] - features["checkin_date"]).dt.days.astype(int)
features['checkin_date_day_of_year'] = (features['checkin_date'].dt.dayofyear).astype(int)
features["booking_hour"] = (pd.DatetimeIndex(features['booking_datetime']).hour).astype(int)
features["price_per_night"] = (features["original_selling_amount"] / features["duration"])
# fixing dummies features
features = pd.get_dummies(features, prefix="hotel_star_rating_", columns=["hotel_star_rating"])
features = pd.get_dummies(features, prefix="accommadation_type_name_", columns=["accommadation_type_name"])
features = pd.get_dummies(features, prefix="charge_option_", columns=["charge_option"])
features = pd.get_dummies(features, prefix="customer_nationality_", columns=["customer_nationality"])
features = pd.get_dummies(features, prefix="no_of_adults_", columns=["no_of_adults"])
features = pd.get_dummies(features, prefix="no_of_children_", columns=["no_of_children"])
features = pd.get_dummies(features, prefix="original_payment_type_", columns=["original_payment_type"])
features = pd.get_dummies(features, prefix="original_payment_method_", columns=["original_payment_method"])
features = pd.get_dummies(features, prefix="hotel_city_code_", columns=["hotel_city_code"])
features[features["is_user_logged_in"] == "FALSE"] = 0
features[features["is_user_logged_in"] == "TRUE"] = 1
features["cancellation_sum"], features["cancellation_no_show"] = get_cancellation(features)
# removing old features
for f in ["checkout_date", "booking_datetime", "checkin_date", "cancellation_policy_code"]:
features.drop(f, axis=1, inplace=True)
labels = None
if with_lables:
# making label_for_regression
labels = full_data["cancellation_datetime"]
labels = pd.to_datetime(labels.fillna(
|
pd.Timestamp('21000101')
|
pandas.Timestamp
|
from pathlib import Path
from random import Random
import pandas as pd
from pyspark import SparkContext
from pyspark.sql import SparkSession
from index import Indexer, compress_group, PAGE_SIZE
DATA_PATH = Path(__file__).parent / 'data' / 'data.warc.gz'
random = Random(1)
def test_indexer(spark_context: SparkContext):
indexer = Indexer()
indexer.init_accumulators(spark_context)
sql_context = SparkSession.builder.getOrCreate()
data = spark_context.parallelize([f'file:{DATA_PATH}'])
processed = indexer.create_index(data, sql_context).collect()
assert len(processed) > 0
def shuffle(s):
l = list(s)
random.shuffle(l)
return ''.join(l)
def make_test_data(num_items):
data = {
'term_hash': [37] * num_items,
'term': ['boring'] * num_items,
'uri': [f'https://{shuffle("somethingwebsiteidontknow")}.com' for _ in range(num_items)],
'title': [shuffle('Some Really Long and Boring Title About St.') for _ in range(num_items)],
'extract': [shuffle('Instructors of “Introduction to programming” courses know that '
'students are willing to blame the failures of their programs on '
'anything. Sorting routine discards half of the data? '
'“That might be a Windows virus!” Binary search always fails?')
for _ in range(num_items)],
}
data_frame = pd.DataFrame(data)
return data_frame
def test_compress_group_too_big():
num_items = 100
data_frame = make_test_data(num_items)
compressed = compress_group(data_frame)
data = compressed['data'].iloc[0]
print("Compressed", data)
assert 0 < len(data) < PAGE_SIZE
def test_compress_group_large_item():
num_items = 5
data = {
'term_hash': [37] * num_items,
'term': ['boring'] * num_items,
'uri': [f'https://{shuffle("somethingwebsiteidontknow")}.com' for _ in range(num_items)],
'title': [shuffle('Some Really Long and Boring Title About St.') for _ in range(num_items)],
'extract': [shuffle('Instructors of “Introduction to programming” courses know that '
'students are willing to blame the failures of their programs on '
'anything. Sorting routine discards half of the data? '
'“That might be a Windows virus!” Binary search always fails?'*5000)
for _ in range(num_items)],
}
data_frame =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import pytest
from rulelist.datastructure.attribute.nominal_attribute import activation_nominal, NominalAttribute
class TestNominalAttribute(object):
def test_normal(self):
dictdata = {"column1" : np.array(["below50" if i < 50 else "above49" for i in range(100)]),
"column2" : np.ones(100)}
test_dataframe = pd.DataFrame(data=dictdata)
input_name = "column1"
input_max_operators = 1
input_minsupp = 0
expected_number_items = 2
expected_cardinality_operator = {1: 2}
output_attribute = NominalAttribute(input_name, test_dataframe[input_name], input_max_operators,input_minsupp)
actual_number_items= len(output_attribute.items)
actual_cardinality_operator = output_attribute.cardinality_operator
pd.testing.assert_series_equal(output_attribute.values, test_dataframe[input_name])
assert expected_number_items == actual_number_items
assert expected_cardinality_operator == actual_cardinality_operator
def test_onlyonevalue(self):
dictdata = {"column1" : np.array(["below100" for i in range(100)]),
"column2" : np.ones(100)}
test_dataframe = pd.DataFrame(data=dictdata)
input_name = "column1"
input_max_operators = 1
input_minsupp = 0
expected_number_items = 1
expected_cardinality_operator = {1: 1}
expected_n_cutpoints = 3
output_attribute = NominalAttribute(input_name, test_dataframe[input_name], input_max_operators,input_minsupp)
actual_number_items= len(output_attribute.items)
actual_cardinality_operator = output_attribute.cardinality_operator
pd.testing.assert_series_equal(output_attribute.values, test_dataframe[input_name])
assert expected_number_items == actual_number_items
assert expected_cardinality_operator == actual_cardinality_operator
class TestActivationNominal(object):
def test_left_interval(self):
dictdata = {"column1" : np.array(["below50" if i < 50 else "above49" for i in range(100)]),
"column2" : np.ones(100)}
test_dataframe = pd.DataFrame(data=dictdata)
input_attribute_name = "column1"
input_category = "below50"
expected_vector = pd.Series(name= "column1", data = [True if i < 50 else False for i in range(100)])
actual_vector = activation_nominal(test_dataframe,input_attribute_name,input_category)
pd.testing.assert_series_equal(actual_vector, expected_vector, check_exact=True)
def test_right_interval(self):
dictdata = {"column1": np.array(["below50" if i < 50 else "above49" for i in range(100)]),
"column2": np.ones(100)}
test_dataframe =
|
pd.DataFrame(data=dictdata)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import spacy
from spacy.lang.de.stop_words import STOP_WORDS
from nltk.tokenize import sent_tokenize
from itertools import groupby
import copy
import re
import sys
import textstat
# Method to create a matrix with contains only zeroes and a index starting by 0
def create_matrix_index_zeros(rows, columns):
arr = np.zeros((rows, columns))
for r in range(0, rows):
arr[r, 0] = r
return arr
# Method to get all authors with a given number of texts. Used in chapter 5.1 to get a corpus with 100 Texts for 25
# authors
def get_balanced_df_all_authors(par_df, par_num_text):
author_count = par_df["author"].value_counts()
author_list = []
df_balanced_text = pd.DataFrame(columns=['label_encoded', 'author', 'genres', 'release_date', 'text'])
for i in range(0, len(author_count)):
if author_count[i] >= par_num_text and not author_count.index[i] == "Gast-Rezensent":
author_list.append(author_count.index[i])
texts = [par_num_text for i in range(0, len(author_count))]
for index, row in par_df.iterrows():
if row['author'] in author_list:
if texts[author_list.index(row['author'])] != 0:
d = {'author': [row['author']], 'genres': [row['genres']],
'release_date': [row['release_date']], 'text': [row['text']]}
df_balanced_text = df_balanced_text.append(pd.DataFrame.from_dict(d), ignore_index=True)
texts[author_list.index(row['author'])] -= 1
if sum(texts) == 0:
break
# Label encoding and delete author column after
dic_author_mapping = author_encoding(df_balanced_text)
df_balanced_text['label_encoded'] = get_encoded_author_vector(df_balanced_text, dic_author_mapping)[:, 0]
df_balanced_text.drop("author", axis=1, inplace=True)
# Print author mapping in file
original_stdout = sys.stdout
with open('author_mapping.txt', 'w') as f:
sys.stdout = f
print(dic_author_mapping)
sys.stdout = original_stdout
for i in range(0, len(author_list)):
print(f"Autor {i+1}: {par_num_text - texts[i]} Texte")
return df_balanced_text
# Method to get a specific number of authors with a given number of texts. Used later on to get results for different
# combinations of authors and texts
def get_balanced_df_by_texts_authors(par_df, par_num_text, par_num_author):
author_count = par_df["author"].value_counts()
author_list = []
df_balanced_text = pd.DataFrame(columns=['label_encoded', 'author', 'genres', 'release_date', 'text'])
loop_count, loops = 0, par_num_author
while loop_count < loops:
if author_count[loop_count] >= par_num_text and not author_count.index[loop_count] == "Gast-Rezensent":
author_list.append(author_count.index[loop_count])
# Skip the Author "Gast-Rezensent" if its not the last round and increase the loops by 1
elif author_count.index[loop_count] == "Gast-Rezensent":
loops += 1
loop_count += 1
texts = [par_num_text for i in range(0, len(author_list))]
for index, row in par_df.iterrows():
if row['author'] in author_list:
if texts[author_list.index(row['author'])] != 0:
d = {'author': [row['author']], 'genres': [row['genres']],
'release_date': [row['release_date']], 'text': [row['text']]}
df_balanced_text = df_balanced_text.append(pd.DataFrame.from_dict(d), ignore_index=True)
texts[author_list.index(row['author'])] -= 1
if sum(texts) == 0:
break
# Label encoding and delete author column after
dic_author_mapping = author_encoding(df_balanced_text)
df_balanced_text['label_encoded'] = get_encoded_author_vector(df_balanced_text, dic_author_mapping)[:, 0]
df_balanced_text.drop("author", axis=1, inplace=True)
# Print author mapping in file
original_stdout = sys.stdout
with open('author_mapping.txt', 'w') as f:
sys.stdout = f
print(dic_author_mapping)
sys.stdout = original_stdout
for i in range(0, len(author_list)):
print(f"Autor {i+1}: {par_num_text - texts[i]} Texte")
return df_balanced_text
# Feature extraction of the feature described in chapter 5.6.1
def get_bow_matrix(par_df):
nlp = spacy.load("de_core_news_sm")
d_bow = {}
d_bow_list = []
function_pos = ["ADP", "AUX", "CONJ", "CCONJ", "DET", "PART", "PRON", "SCONJ"]
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
tokens = [word for word in tokens if not word.is_punct and not word.is_space and not
word.is_digit and word.lemma_ not in STOP_WORDS and word.pos_ not in function_pos]
for word in tokens:
try:
d_bow["bow:"+word.lemma_.lower()] += 1
except KeyError:
d_bow["bow:"+word.lemma_.lower()] = 1
d_bow_list.append(copy.deepcopy(d_bow))
d_bow.clear()
return pd.DataFrame(d_bow_list)
# Feature extraction of the feature described in chapter 5.6.2
def get_word_n_grams(par_df, n):
nlp = spacy.load("de_core_news_sm")
d_word_ngram = {}
d_word_ngram_list = []
function_pos = ["ADP", "AUX", "CONJ", "CCONJ", "DET", "PART", "PRON", "SCONJ"]
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
tokens = [word for word in tokens if not word.is_punct and not word.is_space and not
word.is_digit and word.lemma_ not in STOP_WORDS and word.pos_ not in function_pos]
tokens = [token.lemma_.lower() for token in tokens]
for w in range(0, len(tokens)):
if w + n <= len(tokens):
try:
d_word_ngram["w" + str(n) + "g" + ":" + '|'.join(tokens[w:w + n])] += 1
except KeyError:
d_word_ngram["w" + str(n) + "g" + ":" + '|'.join(tokens[w:w + n])] = 1
d_word_ngram_list.append(copy.deepcopy(d_word_ngram))
d_word_ngram.clear()
return pd.DataFrame(d_word_ngram_list)
# Feature extraction of the feature described in chapter 5.6.3
def get_word_count(par_df):
arr_wordcount = np.zeros((len(par_df), 1))
nlp = spacy.load("de_core_news_sm")
only_words = []
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
for t in tokens:
if not t.is_punct and not t.is_space:
only_words.append(t)
arr_wordcount[index] = len(only_words)
only_words.clear()
return pd.DataFrame(data=arr_wordcount, columns=["word_count"])
# Feature extraction of the feature described in chapter 5.6.4 with some variations
# Count all word lengths individually
def get_word_length_matrix(par_df):
nlp = spacy.load("de_core_news_sm")
d_word_len = {}
d_word_len_list = []
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
tokens = [word for word in tokens if not word.is_punct and not word.is_space and not word.is_digit]
for word in tokens:
try:
d_word_len["w_len:"+str(len(word.text))] += 1
except KeyError:
d_word_len["w_len:"+str(len(word.text))] = 1
d_word_len_list.append(copy.deepcopy(d_word_len))
d_word_len.clear()
return pd.DataFrame(d_word_len_list)
# Count word lengths and set 2 intervals
def get_word_length_matrix_with_interval(par_df, border_1, border_2):
arr_wordcount_with_interval = np.zeros((len(par_df), border_1 + 2))
nlp = spacy.load("de_core_news_sm")
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
for word in tokens:
if len(word.text) <= border_1 and not word.is_punct and not word.is_space and not word.is_digit:
arr_wordcount_with_interval[index, len(word.text) - 1] += 1
elif border_1 < len(
word.text) <= border_2 and not word.is_punct and not word.is_space and not word.is_digit:
arr_wordcount_with_interval[index, -2] += 1
elif not word.is_punct and not word.is_space and not word.is_digit:
arr_wordcount_with_interval[index, -1] += 1
word_length_labels = [str(i) for i in range(1, border_1+1)]
word_length_labels.append(f"{border_1+1}-{border_2}")
word_length_labels.append(f">{border_2}")
return pd.DataFrame(data=arr_wordcount_with_interval, columns=word_length_labels)
# Count word lengths and sum all above a defined margin
def get_word_length_matrix_with_margin(par_df, par_margin):
arr_wordcount_with_interval = np.zeros((len(par_df), par_margin + 1))
nlp = spacy.load("de_core_news_sm")
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
for word in tokens:
if len(word.text) <= par_margin and not word.is_punct and not word.is_space and not word.is_digit:
arr_wordcount_with_interval[index, len(word.text) - 1] += 1
elif par_margin < len(word.text) and not word.is_punct and not word.is_space and not word.is_digit:
arr_wordcount_with_interval[index, -1] += 1
word_length_labels = [str(i) for i in range(1, par_margin+1)]
word_length_labels.append(f">{par_margin}")
return pd.DataFrame(data=arr_wordcount_with_interval, columns=word_length_labels)
# Count the average word length of the article
def get_average_word_length(par_df):
arr_avg_word_len_vector = np.zeros((len(par_df), 1))
nlp = spacy.load("de_core_news_sm")
for index, row in par_df.iterrows():
symbol_sum = 0
words = 0
tokens = nlp(row['text'])
for word in tokens:
if not word.is_punct and not word.is_space and not word.is_digit:
symbol_sum += len(word.text)
words += 1
arr_avg_word_len_vector[index, 0] = symbol_sum / words
return pd.DataFrame(data=arr_avg_word_len_vector, columns=["avg_word_length"])
# Feature extraction of the feature described in chapter 5.6.5
def get_yules_k(par_df):
d = {}
nlp = spacy.load("de_core_news_sm")
arr_yulesk = np.zeros((len(par_df), 1))
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
for t in tokens:
if not t.is_punct and not t.is_space and not t.is_digit:
w = t.lemma_.lower()
try:
d[w] += 1
except KeyError:
d[w] = 1
s1 = float(len(d))
s2 = sum([len(list(g)) * (freq ** 2) for freq, g in groupby(sorted(d.values()))])
try:
k = 10000 * (s2 - s1) / (s1 * s1)
arr_yulesk[index] = k
except ZeroDivisionError:
pass
d.clear()
return pd.DataFrame(data=arr_yulesk, columns=["yulesk"])
# Feature extraction of the feature described in chapter 5.6.6
# Get a vector of all special characters
def get_special_char_label_vector(par_df):
nlp = spacy.load("de_core_news_sm")
special_char_label_vector = []
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
for t in tokens:
chars = ' '.join([c for c in t.text])
chars = nlp(chars)
for c in chars:
if c.is_punct and c.text not in special_char_label_vector:
special_char_label_vector.append(c.text)
return special_char_label_vector
# Get a matrix of all special character by a given vector of special chars
def get_special_char_matrix(par_df, par_special_char_label_vector):
nlp = spacy.load("de_core_news_sm")
arr_special_char = np.zeros((len(par_df), len(par_special_char_label_vector)))
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
for t in tokens:
chars = ' '.join([c for c in t.text])
chars = nlp(chars)
for c in chars:
if c.text in par_special_char_label_vector:
arr_special_char[index, par_special_char_label_vector.index(c.text)] += 1
return arr_special_char
# Feature extraction of the feature described in chapter 5.6.7
# Get the char-affix-n-grams by a defined n
def get_char_affix_n_grams(par_df, n):
d_prefix_list, d_suffix_list, d_space_prefix_list, d_space_suffix_list = [], [], [], []
d_prefix, d_suffix, d_space_prefix, d_space_suffix = {}, {}, {}, {}
nlp = spacy.load("de_core_news_sm")
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
for w in range(0, len(tokens)):
# Prefix
if len(tokens[w].text) >= n + 1:
try:
d_prefix["c" + str(n) + "_p: " + tokens[w].text.lower()[0:n]] += 1
except KeyError:
d_prefix["c" + str(n) + "_p: " + tokens[w].text.lower()[0:n]] = 1
# Suffix
if len(tokens[w].text) >= n + 1:
try:
d_suffix["c" + str(n) + "_s: " + tokens[w].text.lower()[-n:]] += 1
except KeyError:
d_suffix["c" + str(n) + "_s: " + tokens[w].text.lower()[-n:]] = 1
d_prefix_list.append(copy.deepcopy(d_prefix))
d_suffix_list.append(copy.deepcopy(d_suffix))
d_prefix.clear()
d_suffix.clear()
for i in range(0, len(row['text'])):
if row['text'][i] == " " and i + n <= len(row['text']) and i - n >= 0:
# Space-prefix
try:
d_space_prefix["c" + str(n) + "_sp: " + row['text'].lower()[i:n + i]] += 1
except KeyError:
d_space_prefix["c" + str(n) + "_sp: " + row['text'].lower()[i:n + i]] = 1
# Space-suffix
try:
d_space_suffix["c" + str(n) + "_ss: " + row['text'].lower()[i - n + 1:i + 1]] += 1
except KeyError:
d_space_suffix["c" + str(n) + "_ss: " + row['text'].lower()[i - n + 1:i + 1]] = 1
d_space_prefix_list.append(copy.deepcopy(d_space_prefix))
d_space_suffix_list.append(copy.deepcopy(d_space_suffix))
d_space_prefix.clear()
d_space_suffix.clear()
df_pre = pd.DataFrame(d_prefix_list)
df_su = pd.DataFrame(d_suffix_list)
df_s_pre = pd.DataFrame(d_space_prefix_list)
df_s_su = pd.DataFrame(d_space_suffix_list)
df_affix = pd.concat([df_pre, df_su, df_s_pre, df_s_su], axis=1)
return df_affix
# Get the char-word-n-grams by a defined n
def get_char_word_n_grams(par_df, n):
d_whole_word_list, d_mid_word_list, d_multi_word_list = [], [], []
d_whole_word, d_mid_word, d_multi_word = {}, {}, {}
match_list = []
nlp = spacy.load("de_core_news_sm")
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
for w in range(0, len(tokens)):
# Whole-word
if len(tokens[w].text) == n:
try:
d_whole_word["c" + str(n) + "_ww: " + tokens[w].text.lower()] += 1
except KeyError:
d_whole_word["c" + str(n) + "_ww: " + tokens[w].text.lower()] = 1
# Mid-word
if len(tokens[w].text) >= n + 2:
for i in range(1, len(tokens[w].text) - n):
try:
d_mid_word["c" + str(n) + "_miw: " + tokens[w].text.lower()[i:i + n]] += 1
except KeyError:
d_mid_word["c" + str(n) + "_miw: " + tokens[w].text.lower()[i:i + n]] = 1
d_whole_word_list.append(copy.deepcopy(d_whole_word))
d_mid_word_list.append(copy.deepcopy(d_mid_word))
d_whole_word.clear()
d_mid_word.clear()
# Multi-word
# ignore special character
trimmed_text = re.sub(r'[\s]+', ' ', re.sub(r'[^\w ]+', '', row['text']))
match_list.clear()
for i in range(1, n - 1):
regex = r"\w{" + str(i) + r"}\s\w{" + str(n - 1 - i) + r"}"
match_list += re.findall(regex, trimmed_text.lower())
for match in match_list:
try:
d_multi_word["c" + str(n) + "_mw: " + match] += 1
except KeyError:
d_multi_word["c" + str(n) + "_mw: " + match] = 1
d_multi_word_list.append(copy.deepcopy(d_multi_word))
d_multi_word.clear()
df_ww =
|
pd.DataFrame(d_whole_word_list)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 25 17:13:22 2018
@author: kitreatakataglushkoff
Kitrea's hand-written copied/adjusted version of the analyze_massredistribution.py,
which was last significantly edited Thursday July 18.
UPDATE - Oct 9, 2018 - Kitrea double-checked code, added some comments.
last updated Wed Nov 14 - to clean out bad data in the new large dataset.
"""
import pandas as pd
import numpy as np
import os
import glob
import matplotlib.pyplot as plt
import copy
import pygem_input as input
import pygemfxns_modelsetup as modelsetup
# Tips, comments, and old portions of code no longer used have been moved to bottom of file
#%% ===== REGION AND GLACIER FILEPATH OPTIONS =====
# User defines regions of interest
rgi_regionO1 = [13, 14, 15]
#rgi_regionO1 = [15]
search_binnedcsv_fn = (input.main_directory + '/../DEMs/Shean_2018_1109/aster_2000-2018_20181109_bins/*_mb_bins.csv')
#%% ===== PLOT OPTIONS =====
# Option to save figures
option_savefigs = 1
fig_fp = input.main_directory + '/../Output/figures/massredistribution/'
# Plot histogram options
option_plot_histogram = 0
histogram_parameters = ['Area', 'Zmed', 'Slope', 'PercDebris']
#histogram_parameters = ['Area', 'Zmin', 'Zmax', 'Zmed', 'Slope', 'Aspect', 'Lmax', 'PercDebris']
# Plot dhdt of each glacier options
option_plot_eachglacier = 0
# Plot glaciers above and below a given parameter threshold (*MAIN FUNCTION TO RUN)
option_plot_multipleglaciers_single_thresholds = 0
# run for specific parameter or all parameters
option_run_specific_pars = 0
# Plot glaciers above and below a given set of multiple thresholds
option_plot_multipleglaciers_multiplethresholds = 0
# Plot glacier characteristics to see if parameters are related
option_plot_compareparameters = 1
#option_plot_multipleglaciers_binned_parameter = 0 #glaciers within a characteristic's defined range
#option_plot_multipleglaciers_indiv_subdivisions = 0 #glaciers binned into 6 categories. (NOT USED)
#option_plots_threshold = 0 #scatter plots relating glacier stats
# Columns to use for mass balance and dhdt (specify mean or median)
mb_cn = 'mb_bin_med_mwea'
dhdt_cn = 'dhdt_bin_med_ma'
dhdt_max = 2.5
dhdt_min = -4
# Threshold for tossing glaciers with too much missing data
perc_area_valid_threshold = 90
# Switch to use merged data or not (0 = don't use, 1 = use merged data)
option_use_mergedata = 0
# Remove glacier options (surging, all positive dhdt, etc.)
option_remove_surge_glac = 1
option_remove_all_pos_dhdt = 1
option_remove_dhdt_acc = 1
acc_dhdt_threshold = 0.5
# Legend option (switch to show legend on multi-glacier figures or not)
option_show_legend = 0
# Transparency value (between 0 & 1: 0 = no plot, 1 = full opaque)
glacier_plots_transparency = 0.3
#user-defined stored variables for ideal thresholds, for each region and parameter
Area_15_thresholds = list(range(5,40, 5))
Area_13_thresholds = list(range(5, 120, 5))
Area_13_thresholds.extend([150, 200, 250, 300, 350]) #if histogram has 2 separate ranges use .extend
Slope_15_thresholds = list(range(10,26,2))
Slope_13_thresholds = list(range(5, 40, 2))
PercDebris_13_thresholds = list(range(0,65,5))
PercDebris_15_thresholds = list(range(0, 65, 5))
Zmin_13_thresholds = list(range(2600,5800, 200))
Zmin_15_thresholds = list(range(3500, 6500, 500))
Zmed_13_thresholds = list(range(3800, 6600, 200))
Zmed_15_thresholds = list(range(4750, 7000, 500))
Aspect_13_thresholds = list(range(0, 450, 90))
Aspect_15_thresholds = list(range(0, 450, 90))
Zmax_15_thresholds = list(range(6000, 7600, 200))
Zmax_13_thresholds = list(range(4000, 7600, 200))
Lmax_15_thresholds = list(range(4000, 14000, 2000))
Lmax_13_thresholds = list(range(4400, 40000, 2000))
Lmax_13_thresholds.extend([56000, 58000, 6000])
dhdt_13_thresholds = [1]
Area_14_thresholds = list(range(5, 120, 5,))
Area_14_thresholds.extend([150, 200, 250, 300, 350])
Zmin_14_thresholds = list(range(2600, 5800, 200))
Zmax_14_thresholds = list(range(5000, 7600, 200))
Zmed_14_thresholds = list(range(3800,6400, 200))
Slope_14_thresholds = list(range(10, 42, 2))
Aspect_14_thresholds = list(range(0,450,90))
Lmax_14_thresholds = list(range(4000, 45000,2000))
PercDebris_14_thresholds = list(range(0, 65,5))
#For plotting one parameter at a time
#User defines parameter for multi-glacier and histogram runs
#set the threshold equal to one of the above, defined thresholds, depending on the current
#keep in mind for threshold, that the subplots are examining >= and < the threshold
#If you have not yet evaluated the histograms to define the threshold ranges,
#then you must define the following variable
#For plotting multiple parameters in one run
#Create dictionary. key = parameter found in main_glac_rgi, value = thresholds
all_13_pars = {'Area': Area_13_thresholds, 'Zmin': Zmin_13_thresholds ,
'Zmax':Zmax_13_thresholds, 'Zmed': Zmed_13_thresholds,
'Slope': Slope_13_thresholds, 'Aspect': Aspect_13_thresholds,
'Lmax': Lmax_13_thresholds, 'PercDebris': PercDebris_13_thresholds}
all_14_pars = {'Area': Area_14_thresholds, 'Zmin': Zmin_14_thresholds ,
'Zmax':Zmax_14_thresholds, 'Zmed': Zmed_14_thresholds,
'Slope': Slope_14_thresholds, 'Aspect': Aspect_14_thresholds,
'Lmax': Lmax_14_thresholds, 'PercDebris': PercDebris_14_thresholds}
all_15_pars = {'Area': Area_15_thresholds , 'Zmin': Zmin_15_thresholds ,
'Zmax':Zmax_15_thresholds, 'Zmed': Zmed_15_thresholds,
'Slope': Slope_15_thresholds, 'Aspect': Aspect_15_thresholds,
'Lmax': Lmax_15_thresholds, 'PercDebris': PercDebris_15_thresholds}
#If only plotting one parameter in the run, define the parameter of interest
pars_dict = {'PercDebris': PercDebris_13_thresholds}
if option_run_specific_pars == 1:
region_pars = pars_dict
else:
if rgi_regionO1[0] == 13:
region_pars = all_13_pars
elif rgi_regionO1[0] == 14:
region_pars = all_14_pars
elif rgi_regionO1[0] == 15:
region_pars = all_15_pars
else:
print("Please Check Region Specification")
#Binned CSV column name conversion dictionary
# change column names so they are easier to work with (remove spaces, etc.)
sheancoldict = {'# bin_center_elev_m': 'bin_center_elev_m',
' z1_bin_count_valid': 'z1_bin_count_valid',
' z1_bin_area_valid_km2': 'z1_bin_area_valid_km2',
' z1_bin_area_perc': 'z1_bin_area_perc',
' z2_bin_count_valid': 'z2_bin_count_valid',
' z2_bin_area_valid_km2': 'z2_bin_area_valid_km2',
' z2_bin_area_perc': 'z2_bin_area_perc',
' dhdt_bin_count' : 'dhdt_bin_count',
' dhdt_bin_area_valid_km2' : 'dhdt_bin_area_valid_km2',
' dhdt_bin_area_perc' : 'dhdt_bin_area_perc',
' dhdt_bin_med_ma': 'dhdt_bin_med_ma',
' dhdt_bin_mad_ma': 'dhdt_bin_mad_ma',
' dhdt_bin_mean_ma': 'dhdt_bin_mean_ma',
' dhdt_bin_std_ma': 'dhdt_bin_std_ma',
' mb_bin_med_mwea': 'mb_bin_med_mwea',
' mb_bin_mad_mwea': 'mb_bin_mad_mwea',
' mb_bin_mean_mwea': 'mb_bin_mean_mwea',
' mb_bin_std_mwea': 'mb_bin_std_mwea',
' debris_thick_med_m': 'debris_thick_med_m',
' debris_thick_mad_m': 'debris_thick_mad_m',
' perc_debris': 'perc_debris',
' perc_pond': 'perc_pond',
' perc_clean': 'perc_clean',
' dhdt_debris_med' : 'dhdt_debris_med',
' dhdt_pond_med' : 'dhdt_pond_med',
' dhdt_clean_med' : 'dhdt_clean_med',
' vm_med' : 'vm_med',
' vm_mad' : 'vm_mad',
' H_mean' : 'H_mean',
' H_std' : 'H_std'}
#%% Select Files
# Find files for analysis; create list of all binnedcsv filenames (fn)
binnedcsv_files_all = glob.glob(search_binnedcsv_fn)
# Fill in dataframe of glacier names and RGI IDs, of ALL glaciers with binnedcsv, regardless of the region
df_glacnames_all =
|
pd.DataFrame()
|
pandas.DataFrame
|
'''
Old Script of assessing classifier models that were trained and evaluated individually on multiple patient-stay-slices. (Deprecated)
'''
import os
import argparse
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import glob
import re
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--clf_performance_dir', default=None, type=str,
help='Directory where classifeir performance csvs are saved')
args = parser.parse_args()
models = ['random_forest', 'logistic_regression', 'mews']
#models = ['random_forest']
# get performance vals for classifiers at tstep=2,4,6,10,14,-1 etc. -1 is full history
final_perf_df_list=list()
for model in models:
model_tstep_folders = glob.glob(os.path.join(args.clf_performance_dir, model, '*'))
for tstep_folder in model_tstep_folders:
#perf_csv = os.path.join(tstep_folder, 'performance_df.csv')
perf_csvs = glob.glob(os.path.join(tstep_folder, 'performance_df_random_seed*.csv'))
for perf_csv in perf_csvs:
if os.path.exists(perf_csv):
perf_df =
|
pd.read_csv(perf_csv)
|
pandas.read_csv
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.