gamedayspx / model_day.py
wnstnb's picture
added more feats
08b8ea5
raw
history blame
16.5 kB
import streamlit as st
import pandas as pd
import pandas_datareader as pdr
import numpy as np
import yfinance as yf
import json
import requests
from bs4 import BeautifulSoup
from typing import List
import xgboost as xgb
from tqdm import tqdm
from sklearn import linear_model
import joblib
import os
from sklearn.metrics import roc_auc_score, precision_score, recall_score
import datetime
from pandas.tseries.offsets import BDay
import lightgbm as lgb
def walk_forward_validation(df, target_column, num_training_rows, num_periods):
# Create an XGBRegressor model
# model = xgb.XGBRegressor(n_estimators=100, objective='reg:squarederror', random_state = 42)
model = linear_model.LinearRegression()
overall_results = []
# Iterate over the rows in the DataFrame, one step at a time
for i in tqdm(range(num_training_rows, df.shape[0] - num_periods + 1),desc='LR Model'):
# Split the data into training and test sets
X_train = df.drop(target_column, axis=1).iloc[:i]
y_train = df[target_column].iloc[:i]
X_test = df.drop(target_column, axis=1).iloc[i:i+num_periods]
y_test = df[target_column].iloc[i:i+num_periods]
# Fit the model to the training data
model.fit(X_train, y_train)
# Make a prediction on the test data
predictions = model.predict(X_test)
# Create a DataFrame to store the true and predicted values
result_df = pd.DataFrame({'True': y_test, 'Predicted': predictions}, index=y_test.index)
overall_results.append(result_df)
df_results = pd.concat(overall_results)
# model.save_model('model_lr.bin')
# Return the true and predicted values, and fitted model
return df_results, model
model_cols = [
'BigNewsDay',
'Quarter',
'Perf5Day',
'Perf5Day_n1',
'DaysGreen',
'DaysRed',
'CurrentGap',
'RangePct',
'RangePct_n1',
'RangePct_n2',
'OHLC4_VIX',
'OHLC4_VIX_n1',
'OHLC4_VIX_n2',
'VIXOpen',
'VVIXOpen',
'OpenL1',
'OpenL2',
'OpenH1',
'OpenH2',
'L1TouchPct',
'L2TouchPct',
'H1TouchPct',
'H2TouchPct',
'L1BreakPct',
'L2BreakPct',
'H1BreakPct',
'H2BreakPct',
'H1BreakTouchPct',
'H2BreakTouchPct',
'L1BreakTouchPct',
'L2BreakTouchPct'
]
def walk_forward_validation_seq(df, target_column_clf, target_column_regr, num_training_rows, num_periods):
# Create run the regression model to get its target
res, model1 = walk_forward_validation(df.drop(columns=[target_column_clf]).dropna(), target_column_regr, num_training_rows, num_periods)
# joblib.dump(model1, 'model1.bin')
# Merge the result df back on the df for feeding into the classifier
for_merge = res[['Predicted']]
for_merge.columns = ['RegrModelOut']
for_merge['RegrModelOut'] = for_merge['RegrModelOut'] > 0
df = df.merge(for_merge, left_index=True, right_index=True)
df = df.drop(columns=[target_column_regr])
df = df[model_cols + ['RegrModelOut', target_column_clf]]
df[target_column_clf] = df[target_column_clf].astype(bool)
df['RegrModelOut'] = df['RegrModelOut'].astype(bool)
# Create an XGBRegressor model
# model2 = xgb.XGBClassifier(n_estimators=10, random_state = 42)
model2 = lgb.LGBMClassifier(n_estimators=10, random_state=42, verbosity=-1)
# model = linear_model.LogisticRegression(max_iter=1500)
overall_results = []
# Iterate over the rows in the DataFrame, one step at a time
for i in tqdm(range(num_training_rows, df.shape[0] - num_periods + 1),'CLF Model'):
# Split the data into training and test sets
X_train = df.drop(target_column_clf, axis=1).iloc[:i]
y_train = df[target_column_clf].iloc[:i]
X_test = df.drop(target_column_clf, axis=1).iloc[i:i+num_periods]
y_test = df[target_column_clf].iloc[i:i+num_periods]
# Fit the model to the training data
model2.fit(X_train, y_train)
# Make a prediction on the test data
predictions = model2.predict_proba(X_test)[:,-1]
# Create a DataFrame to store the true and predicted values
result_df = pd.DataFrame({'True': y_test, 'Predicted': predictions}, index=y_test.index)
overall_results.append(result_df)
df_results = pd.concat(overall_results)
# Calibrate Probabilities
def get_quantiles(df, col_name, q):
return df.groupby(pd.cut(df[col_name], q))['True'].mean()
greenprobas = []
meanprobas = []
for i, pct in tqdm(enumerate(df_results['Predicted']), desc='Calibrating Probas'):
try:
df_q = get_quantiles(df_results.iloc[:i], 'Predicted', 7)
for q in df_q.index:
if q.left <= pct <= q.right:
p = df_q[q]
c = (q.left + q.right) / 2
except:
p = None
c = None
greenprobas.append(p)
meanprobas.append(c)
df_results['CalibPredicted'] = greenprobas
return df_results, model1, model2
def seq_predict_proba(df, trained_reg_model, trained_clf_model):
regr_pred = trained_reg_model.predict(df)
regr_pred = regr_pred > 0
new_df = df.copy()
new_df['RegrModelOut'] = regr_pred
clf_pred_proba = trained_clf_model.predict_proba(new_df[model_cols + ['RegrModelOut']])[:,-1]
return clf_pred_proba
def get_data():
# f = open('settings.json')
# j = json.load(f)
# API_KEY_FRED = j["API_KEY_FRED"]
API_KEY_FRED = os.getenv('API_KEY_FRED')
def parse_release_dates(release_id: str) -> List[str]:
release_dates_url = f'https://api.stlouisfed.org/fred/release/dates?release_id={release_id}&realtime_start=2015-01-01&include_release_dates_with_no_data=true&api_key={API_KEY_FRED}'
r = requests.get(release_dates_url)
text = r.text
soup = BeautifulSoup(text, 'xml')
dates = []
for release_date_tag in soup.find_all('release_date', {'release_id': release_id}):
dates.append(release_date_tag.text)
return dates
def parse_release_dates_obs(series_id: str) -> List[str]:
obs_url = f'https://api.stlouisfed.org/fred/series/observations?series_id={series_id}&realtime_start=2015-01-01&include_release_dates_with_no_data=true&api_key={API_KEY_FRED}'
r = requests.get(obs_url)
text = r.text
soup = BeautifulSoup(text, 'xml')
observations = []
for observation_tag in soup.find_all('observation'):
date = observation_tag.get('date')
value = observation_tag.get('value')
observations.append((date, value))
return observations
econ_dfs = {}
econ_tickers = [
'WALCL',
'NFCI',
'WRESBAL'
]
for et in tqdm(econ_tickers, desc='getting econ tickers'):
# p = parse_release_dates_obs(et)
# df = pd.DataFrame(columns = ['ds',et], data = p)
df = pdr.get_data_fred(et)
df.index = df.index.rename('ds')
# df.index = pd.to_datetime(df.index.rename('ds')).dt.tz_localize(None)
# df['ds'] = pd.to_datetime(df['ds']).dt.tz_localize(None)
econ_dfs[et] = df
# walcl = pd.DataFrame(columns = ['ds','WALCL'], data = p)
# walcl['ds'] = pd.to_datetime(walcl['ds']).dt.tz_localize(None)
# nfci = pd.DataFrame(columns = ['ds','NFCI'], data = p2)
# nfci['ds'] = pd.to_datetime(nfci['ds']).dt.tz_localize(None)
release_ids = [
"10", # "Consumer Price Index"
"46", # "Producer Price Index"
"50", # "Employment Situation"
"53", # "Gross Domestic Product"
"103", # "Discount Rate Meeting Minutes"
"180", # "Unemployment Insurance Weekly Claims Report"
"194", # "ADP National Employment Report"
"323" # "Trimmed Mean PCE Inflation Rate"
]
release_names = [
"CPI",
"PPI",
"NFP",
"GDP",
"FOMC",
"UNEMP",
"ADP",
"PCE"
]
releases = {}
for rid, n in tqdm(zip(release_ids, release_names), total = len(release_ids), desc='Getting release dates'):
releases[rid] = {}
releases[rid]['dates'] = parse_release_dates(rid)
releases[rid]['name'] = n
# Create a DF that has all dates with the name of the col as 1
# Once merged on the main dataframe, days with econ events will be 1 or None. Fill NA with 0
# This column serves as the true/false indicator of whether there was economic data released that day.
for rid in tqdm(release_ids, desc='Making indicators'):
releases[rid]['df'] = pd.DataFrame(
index=releases[rid]['dates'],
data={
releases[rid]['name']: 1
})
releases[rid]['df'].index = pd.DatetimeIndex(releases[rid]['df'].index)
# releases[rid]['df']['ds'] = pd.to_datetime(releases[rid]['df']['ds']).dt.tz_localize(None)
# releases[rid]['df'] = releases[rid]['df'].set_index('ds')
vix = yf.Ticker('^VIX')
vvix = yf.Ticker('^VVIX')
spx = yf.Ticker('^GSPC')
prices_vix = vix.history(start='2018-07-01', interval='1d')
prices_spx = spx.history(start='2018-07-01', interval='1d')
prices_vvix = vvix.history(start='2018-07-01', interval='1d')
prices_spx['index'] = [str(x).split()[0] for x in prices_spx.index]
prices_spx['index'] = pd.to_datetime(prices_spx['index']).dt.date
prices_spx.index = prices_spx['index']
prices_spx = prices_spx.drop(columns='index')
prices_vix['index'] = [str(x).split()[0] for x in prices_vix.index]
prices_vix['index'] = pd.to_datetime(prices_vix['index']).dt.date
prices_vix.index = prices_vix['index']
prices_vix = prices_vix.drop(columns='index')
prices_vvix['index'] = [str(x).split()[0] for x in prices_vvix.index]
prices_vvix['index'] = pd.to_datetime(prices_vvix['index']).dt.date
prices_vvix.index = prices_vvix['index']
prices_vvix = prices_vvix.drop(columns='index')
data = prices_spx.merge(prices_vix[['Open','High','Low','Close']], left_index=True, right_index=True, suffixes=['','_VIX'])
data = data.merge(prices_vvix[['Open','High','Low','Close']], left_index=True, right_index=True, suffixes=['','_VVIX'])
data.index = pd.DatetimeIndex(data.index)
# Features
data['PrevClose'] = data['Close'].shift(1)
data['Perf5Day'] = data['Close'] > data['Close'].shift(5)
data['Perf5Day_n1'] = data['Perf5Day'].shift(1).astype(bool)
data['GreenDay'] = (data['Close'] > data['PrevClose']) * 1
data['RedDay'] = (data['Close'] <= data['PrevClose']) * 1
data['VIX5Day'] = data['Close_VIX'] > data['Close_VIX'].shift(5)
data['VIX5Day_n1'] = data['VIX5Day'].shift(1).astype(bool)
data['VIXOpen'] = data['Open_VIX'] > data['Close_VIX'].shift(1)
data['VVIXOpen'] = data['Open_VVIX'] > data['Close_VVIX'].shift(1)
data['VIXOpen'] = data['VIXOpen'].astype(bool)
data['VVIXOpen'] = data['VVIXOpen'].astype(bool)
data['Range'] = data[['Open','High']].max(axis=1) - data[['Low','Open']].min(axis=1)
data['RangePct'] = data['Range'] / data['Close']
data['VIXLevel'] = pd.qcut(data['Close_VIX'], 4)
data['OHLC4_VIX'] = data[['Open_VIX','High_VIX','Low_VIX','Close_VIX']].mean(axis=1)
data['OHLC4'] = data[['Open','High','Low','Close']].mean(axis=1)
data['OHLC4_Trend'] = data['OHLC4'] > data['OHLC4'].shift(1)
data['OHLC4_Trend_n1'] = data['OHLC4_Trend'].shift(1).astype(float)
data['OHLC4_Trend_n2'] = data['OHLC4_Trend'].shift(2).astype(float)
data['RangePct_n1'] = data['RangePct'].shift(1)
data['RangePct_n2'] = data['RangePct'].shift(2)
data['OHLC4_VIX_n1'] = data['OHLC4_VIX'].shift(1)
data['OHLC4_VIX_n2'] = data['OHLC4_VIX'].shift(2)
data['CurrentGap'] = ((data['Open'] - data['PrevClose']) / data['PrevClose']).shift(-1)
data['DayOfWeek'] = pd.to_datetime(data.index)
data['DayOfWeek'] = data['DayOfWeek'].dt.day
data['up'] = 100 * (data['High'].shift(1) - data['Open'].shift(1)) / data['Close'].shift(1)
data['upSD'] = data['up'].rolling(30).std(ddof=0)
data['aveUp'] = data['up'].rolling(30).mean()
data['H1'] = data['Open'] + (data['aveUp'] / 100) * data['Open']
data['H2'] = data['Open'] + ((data['aveUp'] + data['upSD']) / 100) * data['Open']
data['down'] = 100 * (data['Open'].shift(1) - data['Low'].shift(1)) / data['Close'].shift(1)
data['downSD'] = data['down'].rolling(30).std(ddof=0)
data['aveDown'] = data['down'].rolling(30).mean()
data['L1'] = data['Open'] - (data['aveDown'] / 100) * data['Open']
data['L2'] = data['Open'] - ((data['aveDown'] + data['upSD']) / 100) * data['Open']
data['L1Touch'] = data['Low'] < data['L1']
data['L2Touch'] = data['Low'] < data['L2']
data['H1Touch'] = data['High'] > data['H1']
data['H2Touch'] = data['High'] > data['H2']
data['L1Break'] = data['Close'] < data['L1']
data['L2Break'] = data['Close'] < data['L2']
data['H1Break'] = data['Close'] > data['H1']
data['H2Break'] = data['Close'] > data['H2']
data['OpenL1'] = data['Open'] / data['L1']
data['OpenL2'] = data['Open'] / data['L2']
data['OpenH1'] = data['Open'] / data['H1']
data['OpenH2'] = data['Open'] / data['H2']
level_cols = [
'L1Touch',
'L2Touch',
'H1Touch',
'H2Touch',
'L1Break',
'L2Break',
'H1Break',
'H2Break'
]
for col in level_cols:
data[col+'Pct'] = data[col].rolling(100).mean()
data['H1BreakTouchPct'] = data['H1Break'].rolling(100).sum() / data['H1Touch'].rolling(100).sum()
data['H2BreakTouchPct'] = data['H2Break'].rolling(100).sum() / data['H2Touch'].rolling(100).sum()
data['L1BreakTouchPct'] = data['L1Break'].rolling(100).sum() / data['L1Touch'].rolling(100).sum()
data['L2BreakTouchPct'] = data['L2Break'].rolling(100).sum() / data['L2Touch'].rolling(100).sum()
# Target -- the next day's low
data['Target'] = (data['OHLC4'] / data['PrevClose']) - 1
data['Target'] = data['Target'].shift(-1)
# data['Target'] = data['RangePct'].shift(-1)
# Target for clf -- whether tomorrow will close above or below today's close
data['Target_clf'] = data['Close'] > data['PrevClose']
data['Target_clf'] = data['Target_clf'].shift(-1)
data['DayOfWeek'] = pd.to_datetime(data.index)
data['Quarter'] = data['DayOfWeek'].dt.quarter
data['DayOfWeek'] = data['DayOfWeek'].dt.weekday
for rid in tqdm(release_ids, desc='Merging econ data'):
# Get the name of the release
n = releases[rid]['name']
# Merge the corresponding DF of the release
data = data.merge(releases[rid]['df'], how = 'left', left_index=True, right_index=True)
# Create a column that shifts the value in the merged column up by 1
data[f'{n}_shift'] = data[n].shift(-1)
# Fill the rest with zeroes
data[n] = data[n].fillna(0)
data[f'{n}_shift'] = data[f'{n}_shift'].fillna(0)
data['BigNewsDay'] = data[[x for x in data.columns if '_shift' in x]].max(axis=1)
def cumul_sum(col):
nums = []
s = 0
for x in col:
if x == 1:
s += 1
elif x == 0:
s = 0
nums.append(s)
return nums
consec_green = cumul_sum(data['GreenDay'].values)
consec_red = cumul_sum(data['RedDay'].values)
data['DaysGreen'] = consec_green
data['DaysRed'] = consec_red
final_row = data.index[-2]
exp_row = data.index[-1]
df_final = data.loc[:final_row,
[
'BigNewsDay',
'Quarter',
'Perf5Day',
'Perf5Day_n1',
'DaysGreen',
'DaysRed',
'CurrentGap',
'RangePct',
'RangePct_n1',
'RangePct_n2',
'OHLC4_VIX',
'OHLC4_VIX_n1',
'OHLC4_VIX_n2',
'VIXOpen',
'VVIXOpen',
'OpenL1',
'OpenL2',
'OpenH1',
'OpenH2',
'L1TouchPct',
'L2TouchPct',
'H1TouchPct',
'H2TouchPct',
'L1BreakPct',
'L2BreakPct',
'H1BreakPct',
'H2BreakPct',
'H1BreakTouchPct',
'H2BreakTouchPct',
'L1BreakTouchPct',
'L2BreakTouchPct',
'Target',
'Target_clf'
]]
df_final = df_final.dropna(subset=['Target','Target_clf','Perf5Day_n1'])
return data, df_final, final_row