Spaces:
Sleeping
Sleeping
import streamlit as st | |
import pandas as pd | |
import pandas_datareader as pdr | |
import numpy as np | |
import yfinance as yf | |
import json | |
import requests | |
from bs4 import BeautifulSoup | |
from typing import List | |
import xgboost as xgb | |
from tqdm import tqdm | |
from sklearn import linear_model | |
import joblib | |
import os | |
from sklearn.metrics import roc_auc_score, precision_score, recall_score | |
import datetime | |
from pandas.tseries.offsets import BDay | |
from datasets import load_dataset | |
def walk_forward_validation(df, target_column, num_training_rows, num_periods): | |
# Create an XGBRegressor model | |
# model = xgb.XGBRegressor(n_estimators=100, objective='reg:squarederror', random_state = 42) | |
model = linear_model.LinearRegression() | |
overall_results = [] | |
# Iterate over the rows in the DataFrame, one step at a time | |
for i in tqdm(range(num_training_rows, df.shape[0] - num_periods + 1),desc='LR Model'): | |
# Split the data into training and test sets | |
X_train = df.drop(target_column, axis=1).iloc[:i] | |
y_train = df[target_column].iloc[:i] | |
X_test = df.drop(target_column, axis=1).iloc[i:i+num_periods] | |
y_test = df[target_column].iloc[i:i+num_periods] | |
# Fit the model to the training data | |
model.fit(X_train, y_train) | |
# Make a prediction on the test data | |
predictions = model.predict(X_test) | |
# Create a DataFrame to store the true and predicted values | |
result_df = pd.DataFrame({'True': y_test, 'Predicted': predictions}, index=y_test.index) | |
overall_results.append(result_df) | |
df_results = pd.concat(overall_results) | |
# model.save_model('model_lr.bin') | |
# Return the true and predicted values, and fitted model | |
return df_results, model | |
def walk_forward_validation_seq(df, target_column_clf, target_column_regr, num_training_rows, num_periods): | |
# Create run the regression model to get its target | |
res, model1 = walk_forward_validation(df.drop(columns=[target_column_clf]).dropna(), target_column_regr, num_training_rows, num_periods) | |
# joblib.dump(model1, 'model1.bin') | |
# Merge the result df back on the df for feeding into the classifier | |
for_merge = res[['Predicted']] | |
for_merge.columns = ['RegrModelOut'] | |
for_merge['RegrModelOut'] = for_merge['RegrModelOut'] > 0 | |
df = df.merge(for_merge, left_index=True, right_index=True) | |
df = df.drop(columns=[target_column_regr]) | |
df = df[[ | |
'CurrentGap','RegrModelOut', | |
'CurrentHigh30toClose', | |
'CurrentLow30toClose', | |
'CurrentClose30toClose', | |
'CurrentRange30', | |
'GapFill30',target_column_clf | |
]] | |
df[target_column_clf] = df[target_column_clf].astype(bool) | |
df['RegrModelOut'] = df['RegrModelOut'].astype(bool) | |
# Create an XGBRegressor model | |
model2 = xgb.XGBClassifier(n_estimators=10, random_state = 42) | |
# model = linear_model.LogisticRegression(max_iter=1500) | |
overall_results = [] | |
# Iterate over the rows in the DataFrame, one step at a time | |
for i in tqdm(range(num_training_rows, df.shape[0] - num_periods + 1),'CLF Model'): | |
# Split the data into training and test sets | |
X_train = df.drop(target_column_clf, axis=1).iloc[:i] | |
y_train = df[target_column_clf].iloc[:i] | |
X_test = df.drop(target_column_clf, axis=1).iloc[i:i+num_periods] | |
y_test = df[target_column_clf].iloc[i:i+num_periods] | |
# Fit the model to the training data | |
model2.fit(X_train, y_train) | |
# Make a prediction on the test data | |
predictions = model2.predict_proba(X_test)[:,-1] | |
# Create a DataFrame to store the true and predicted values | |
result_df = pd.DataFrame({'True': y_test, 'Predicted': predictions}, index=y_test.index) | |
overall_results.append(result_df) | |
df_results = pd.concat(overall_results) | |
# model1.save_model('model_ensemble.bin') | |
# joblib.dump(model2, 'model2.bin') | |
# Return the true and predicted values, and fitted model | |
return df_results, model1, model2 | |
def seq_predict_proba(df, trained_reg_model, trained_clf_model): | |
regr_pred = trained_reg_model.predict(df) | |
regr_pred = regr_pred > 0 | |
new_df = df.copy() | |
new_df['RegrModelOut'] = regr_pred | |
clf_pred_proba = trained_clf_model.predict_proba(new_df[['CurrentGap','RegrModelOut', | |
'CurrentHigh30toClose', | |
'CurrentLow30toClose', | |
'CurrentClose30toClose', | |
'CurrentRange30', | |
'GapFill30']])[:,-1] | |
return clf_pred_proba | |
def get_data(): | |
# f = open('settings.json') | |
# j = json.load(f) | |
# API_KEY_FRED = j["API_KEY_FRED"] | |
API_KEY_FRED = os.getenv('API_KEY_FRED') | |
def parse_release_dates(release_id: str) -> List[str]: | |
release_dates_url = f'https://api.stlouisfed.org/fred/release/dates?release_id={release_id}&realtime_start=2015-01-01&include_release_dates_with_no_data=true&api_key={API_KEY_FRED}' | |
r = requests.get(release_dates_url) | |
text = r.text | |
soup = BeautifulSoup(text, 'xml') | |
dates = [] | |
for release_date_tag in soup.find_all('release_date', {'release_id': release_id}): | |
dates.append(release_date_tag.text) | |
return dates | |
def parse_release_dates_obs(series_id: str) -> List[str]: | |
obs_url = f'https://api.stlouisfed.org/fred/series/observations?series_id={series_id}&realtime_start=2015-01-01&include_release_dates_with_no_data=true&api_key={API_KEY_FRED}' | |
r = requests.get(obs_url) | |
text = r.text | |
soup = BeautifulSoup(text, 'xml') | |
observations = [] | |
for observation_tag in soup.find_all('observation'): | |
date = observation_tag.get('date') | |
value = observation_tag.get('value') | |
observations.append((date, value)) | |
return observations | |
econ_dfs = {} | |
econ_tickers = [ | |
'WALCL', | |
'NFCI', | |
'WRESBAL' | |
] | |
for et in tqdm(econ_tickers, desc='getting econ tickers'): | |
# p = parse_release_dates_obs(et) | |
# df = pd.DataFrame(columns = ['ds',et], data = p) | |
df = pdr.get_data_fred(et) | |
df.index = df.index.rename('ds') | |
# df.index = pd.to_datetime(df.index.rename('ds')).dt.tz_localize(None) | |
# df['ds'] = pd.to_datetime(df['ds']).dt.tz_localize(None) | |
econ_dfs[et] = df | |
# walcl = pd.DataFrame(columns = ['ds','WALCL'], data = p) | |
# walcl['ds'] = pd.to_datetime(walcl['ds']).dt.tz_localize(None) | |
# nfci = pd.DataFrame(columns = ['ds','NFCI'], data = p2) | |
# nfci['ds'] = pd.to_datetime(nfci['ds']).dt.tz_localize(None) | |
release_ids = [ | |
"10", # "Consumer Price Index" | |
"46", # "Producer Price Index" | |
"50", # "Employment Situation" | |
"53", # "Gross Domestic Product" | |
"103", # "Discount Rate Meeting Minutes" | |
"180", # "Unemployment Insurance Weekly Claims Report" | |
"194", # "ADP National Employment Report" | |
"323" # "Trimmed Mean PCE Inflation Rate" | |
] | |
release_names = [ | |
"CPI", | |
"PPI", | |
"NFP", | |
"GDP", | |
"FOMC", | |
"UNEMP", | |
"ADP", | |
"PCE" | |
] | |
releases = {} | |
for rid, n in tqdm(zip(release_ids, release_names), total = len(release_ids), desc='Getting release dates'): | |
releases[rid] = {} | |
releases[rid]['dates'] = parse_release_dates(rid) | |
releases[rid]['name'] = n | |
# Create a DF that has all dates with the name of the col as 1 | |
# Once merged on the main dataframe, days with econ events will be 1 or None. Fill NA with 0 | |
# This column serves as the true/false indicator of whether there was economic data released that day. | |
for rid in tqdm(release_ids, desc='Making indicators'): | |
releases[rid]['df'] = pd.DataFrame( | |
index=releases[rid]['dates'], | |
data={ | |
releases[rid]['name']: 1 | |
}) | |
releases[rid]['df'].index = pd.DatetimeIndex(releases[rid]['df'].index) | |
# releases[rid]['df']['ds'] = pd.to_datetime(releases[rid]['df']['ds']).dt.tz_localize(None) | |
# releases[rid]['df'] = releases[rid]['df'].set_index('ds') | |
vix = yf.Ticker('^VIX') | |
spx = yf.Ticker('^GSPC') | |
# Pull in data | |
data = load_dataset("boomsss/SPX_full_30min", split='train') | |
rows = [d['text'] for d in data] | |
rows = [x.split(',') for x in rows] | |
fr = pd.DataFrame(columns=[ | |
'Datetime','Open','High','Low','Close' | |
], data = rows) | |
fr['Datetime'] = pd.to_datetime(fr['Datetime']) | |
fr['Datetime'] = fr['Datetime'].dt.tz_localize('America/New_York') | |
fr = fr.set_index('Datetime') | |
fr['Open'] = pd.to_numeric(fr['Open']) | |
fr['High'] = pd.to_numeric(fr['High']) | |
fr['Low'] = pd.to_numeric(fr['Low']) | |
fr['Close'] = pd.to_numeric(fr['Close']) | |
# Get incremental date | |
last_date = fr.index.date[-1] | |
last_date = last_date + datetime.timedelta(days=1) | |
# Get incremental data | |
spx1 = yf.Ticker('^GSPC') | |
yfp = spx1.history(start=last_date, interval='30m') | |
# Concat current and incremental | |
df_30m = pd.concat([fr, yfp]) | |
# Get the first 30 minute bar | |
df_30m = df_30m.reset_index() | |
df_30m['Datetime'] = df_30m['Datetime'].dt.date | |
df_30m = df_30m.groupby('Datetime').head(2) | |
df_30m = df_30m.set_index('Datetime',drop=True) | |
# Rename the columns | |
df_30m = df_30m[['Open','High','Low','Close']] | |
opens_1h = df_30m.groupby('Datetime')['Open'].head(1) | |
highs_1h = df_30m.groupby('Datetime')['High'].max() | |
lows_1h = df_30m.groupby('Datetime')['Low'].min() | |
closes_1h = df_30m.groupby('Datetime')['Close'].tail(1) | |
df_1h = pd.DataFrame(index=df_30m.index.unique()) | |
df_1h['Open'] = opens_1h | |
df_1h['High'] = highs_1h | |
df_1h['Low'] = lows_1h | |
df_1h['Close'] = closes_1h | |
df_1h.columns = ['Open30','High30','Low30','Close30'] | |
prices_vix = vix.history(start='2018-07-01', interval='1d') | |
prices_spx = spx.history(start='2018-07-01', interval='1d') | |
prices_spx['index'] = [str(x).split()[0] for x in prices_spx.index] | |
prices_spx['index'] = pd.to_datetime(prices_spx['index']).dt.date | |
prices_spx.index = prices_spx['index'] | |
prices_spx = prices_spx.drop(columns='index') | |
prices_spx.index = pd.DatetimeIndex(prices_spx.index) | |
prices_vix['index'] = [str(x).split()[0] for x in prices_vix.index] | |
prices_vix['index'] = pd.to_datetime(prices_vix['index']).dt.date | |
prices_vix.index = prices_vix['index'] | |
prices_vix = prices_vix.drop(columns='index') | |
prices_vix.index = pd.DatetimeIndex(prices_vix.index) | |
data = prices_spx.merge(df_1h, left_index=True, right_index=True) | |
data = data.merge(prices_vix[['Open','High','Low','Close']], left_index=True, right_index=True, suffixes=['','_VIX']) | |
# Features | |
data['PrevClose'] = data['Close'].shift(1) | |
data['Perf5Day'] = data['Close'] > data['Close'].shift(5) | |
data['Perf5Day_n1'] = data['Perf5Day'].shift(1) | |
data['Perf5Day_n1'] = data['Perf5Day_n1'].astype(bool) | |
data['GreenDay'] = (data['Close'] > data['PrevClose']) * 1 | |
data['RedDay'] = (data['Close'] <= data['PrevClose']) * 1 | |
data['VIX5Day'] = data['Close_VIX'] > data['Close_VIX'].shift(5) | |
data['VIX5Day_n1'] = data['VIX5Day'].astype(bool) | |
data['Range'] = data[['Open','High']].max(axis=1) - data[['Low','Open']].min(axis=1) # Current day range in points | |
data['RangePct'] = data['Range'] / data['Close'] | |
data['VIXLevel'] = pd.qcut(data['Close_VIX'], 4) | |
data['OHLC4_VIX'] = data[['Open_VIX','High_VIX','Low_VIX','Close_VIX']].mean(axis=1) | |
data['OHLC4'] = data[['Open','High','Low','Close']].mean(axis=1) | |
data['OHLC4_Trend'] = data['OHLC4'] > data['OHLC4'].shift(1) | |
data['OHLC4_Trend_n1'] = data['OHLC4_Trend'].shift(1) | |
data['OHLC4_Trend_n1'] = data['OHLC4_Trend_n1'].astype(float) | |
data['OHLC4_Trend_n2'] = data['OHLC4_Trend'].shift(1) | |
data['OHLC4_Trend_n2'] = data['OHLC4_Trend_n2'].astype(float) | |
data['RangePct_n1'] = data['RangePct'].shift(1) | |
data['RangePct_n2'] = data['RangePct'].shift(2) | |
data['OHLC4_VIX_n1'] = data['OHLC4_VIX'].shift(1) | |
data['OHLC4_VIX_n2'] = data['OHLC4_VIX'].shift(2) | |
data['CurrentGap'] = (data['Open'] - data['PrevClose']) / data['PrevClose'] | |
data['CurrentGap'] = data['CurrentGap'].shift(-1) | |
data['DayOfWeek'] = pd.to_datetime(data.index) | |
data['DayOfWeek'] = data['DayOfWeek'].dt.day | |
# Intraday features | |
data['CurrentHigh30'] = data['High30'].shift(-1) | |
data['CurrentLow30'] = data['Low30'].shift(-1) | |
data['CurrentClose30'] = data['Close30'].shift(-1) | |
# Open to High | |
data['CurrentHigh30toClose'] = (data['CurrentHigh30'] / data['Close']) - 1 | |
data['CurrentLow30toClose'] = (data['CurrentLow30'] / data['Close']) - 1 | |
data['CurrentClose30toClose'] = (data['CurrentClose30'] / data['Close']) - 1 | |
data['CurrentRange30'] = (data['CurrentHigh30'] - data['CurrentLow30']) / data['Close'] | |
data['GapFill30'] = [low <= prev_close if gap > 0 else high >= prev_close for high, low, prev_close, gap in zip(data['CurrentHigh30'], data['CurrentLow30'], data['Close'], data['CurrentGap'])] | |
# Target -- the next day's low | |
data['Target'] = (data['OHLC4'] / data['PrevClose']) - 1 | |
data['Target'] = data['Target'].shift(-1) | |
# data['Target'] = data['RangePct'].shift(-1) | |
# Target for clf -- whether tomorrow will close above or below today's close | |
data['Target_clf'] = data['Close'] > data['PrevClose'] | |
data['Target_clf'] = data['Target_clf'].shift(-1) | |
data['DayOfWeek'] = pd.to_datetime(data.index) | |
data['Quarter'] = data['DayOfWeek'].dt.quarter | |
data['DayOfWeek'] = data['DayOfWeek'].dt.weekday | |
for rid in tqdm(release_ids, desc='Merging econ data'): | |
# Get the name of the release | |
n = releases[rid]['name'] | |
# Merge the corresponding DF of the release | |
data = data.merge(releases[rid]['df'], how = 'left', left_index=True, right_index=True) | |
# Create a column that shifts the value in the merged column up by 1 | |
data[f'{n}_shift'] = data[n].shift(-1) | |
# Fill the rest with zeroes | |
data[n] = data[n].fillna(0) | |
data[f'{n}_shift'] = data[f'{n}_shift'].fillna(0) | |
data['BigNewsDay'] = data[[x for x in data.columns if '_shift' in x]].max(axis=1) | |
def cumul_sum(col): | |
nums = [] | |
s = 0 | |
for x in col: | |
if x == 1: | |
s += 1 | |
elif x == 0: | |
s = 0 | |
nums.append(s) | |
return nums | |
consec_green = cumul_sum(data['GreenDay'].values) | |
consec_red = cumul_sum(data['RedDay'].values) | |
data['DaysGreen'] = consec_green | |
data['DaysRed'] = consec_red | |
final_row = data.index[-2] | |
exp_row = data.index[-1] | |
df_final = data.loc[:final_row, | |
[ | |
'BigNewsDay', | |
'Quarter', | |
'Perf5Day', | |
'Perf5Day_n1', | |
'DaysGreen', | |
'DaysRed', | |
'CurrentHigh30toClose', | |
'CurrentLow30toClose', | |
'CurrentClose30toClose', | |
'CurrentRange30', | |
'GapFill30', | |
# 'OHLC4_Trend', | |
# 'OHLC4_Trend_n1', | |
# 'OHLC4_Trend_n2', | |
# 'VIX5Day', | |
# 'VIX5Day_n1', | |
'CurrentGap', | |
'RangePct', | |
'RangePct_n1', | |
'RangePct_n2', | |
'OHLC4_VIX', | |
'OHLC4_VIX_n1', | |
'OHLC4_VIX_n2', | |
'Target', | |
'Target_clf' | |
]] | |
df_final = df_final.dropna(subset=['Target','Target_clf','Perf5Day_n1']) | |
return data, df_final, final_row |