James McCool
Refactor data loading logic in Streamlit app by replacing expander with a container for improved layout. Simplify data reset functionality and ensure session state is cleared effectively.
c11ed09
import numpy as np | |
import pandas as pd | |
import streamlit as st | |
from itertools import combinations | |
import pymongo | |
st.set_page_config(layout="wide") | |
def init_conn(): | |
uri = st.secrets['mongo_uri'] | |
client = pymongo.MongoClient(uri, retryWrites=True, serverSelectionTimeoutMS=500000) | |
db = client["MLB_Database"] | |
return db | |
db = init_conn() | |
game_format = {'Win Percentage': '{:.2%}','First Inning Lead Percentage': '{:.2%}', | |
'Fifth Inning Lead Percentage': '{:.2%}', '8+ runs': '{:.2%}', 'DK LevX': '{:.2%}', 'FD LevX': '{:.2%}'} | |
team_roo_format = {'Top Score%': '{:.2%}','0 Runs': '{:.2%}', '1 Run': '{:.2%}', '2 Runs': '{:.2%}', '3 Runs': '{:.2%}', '4 Runs': '{:.2%}', | |
'5 Runs': '{:.2%}','6 Runs': '{:.2%}', '7 Runs': '{:.2%}', '8 Runs': '{:.2%}', '9 Runs': '{:.2%}', '10 Runs': '{:.2%}'} | |
wrong_acro = ['WSH', 'AZ', 'CHW'] | |
right_acro = ['WAS', 'ARI', 'CWS'] | |
st.markdown(""" | |
<style> | |
/* Tab styling */ | |
.stTabs [data-baseweb="tab-list"] { | |
gap: 8px; | |
padding: 4px; | |
} | |
.stTabs [data-baseweb="tab"] { | |
height: 50px; | |
white-space: pre-wrap; | |
background-color: #FFD700; | |
color: white; | |
border-radius: 10px; | |
gap: 1px; | |
padding: 10px 20px; | |
font-weight: bold; | |
transition: all 0.3s ease; | |
} | |
.stTabs [aria-selected="true"] { | |
background-color: #DAA520; | |
color: white; | |
} | |
.stTabs [data-baseweb="tab"]:hover { | |
background-color: #DAA520; | |
cursor: pointer; | |
} | |
</style>""", unsafe_allow_html=True) | |
def init_stat_load(): | |
collection = db["Player_Range_Of_Outcomes"] | |
cursor = collection.find() | |
raw_display = pd.DataFrame(list(cursor)) | |
raw_display = raw_display[['Player', 'Position', 'Team', 'Salary', 'Floor', 'Median', 'Ceiling', 'Own%', 'Site', 'Slate']] | |
raw_display = raw_display.rename(columns={'Own%': 'Own'}) | |
initial_concat = raw_display.sort_values(by='Own', ascending=False) | |
return initial_concat | |
def convert_df_to_csv(df): | |
return df.to_csv().encode('utf-8') | |
proj_raw = init_stat_load() | |
col1, col2 = st.columns([1, 5]) | |
with col1: | |
with st.container(): | |
if st.button("Load/Reset Data", key='reset1'): | |
st.cache_data.clear() | |
proj_raw = init_stat_load() | |
for key in st.session_state.keys(): | |
del st.session_state[key] | |
site_var1 = st.radio("What site are you working with?", ('Draftkings', 'Fanduel'), key='site_var1') | |
slate_var1 = st.radio("What slate are you working with?", ('Main Slate', 'Secondary Slate'), key='slate_var1') | |
if site_var1 == 'Draftkings': | |
raw_baselines = proj_raw[proj_raw['Site'] == 'Draftkings'] | |
if slate_var1 == 'Main Slate': | |
raw_baselines = raw_baselines[raw_baselines['Slate'] == 'main_slate'] | |
elif slate_var1 == 'Secondary Slate': | |
raw_baselines = raw_baselines[raw_baselines['Slate'] == 'secondary_slate'] | |
raw_baselines = raw_baselines.sort_values(by='Own', ascending=False) | |
elif site_var1 == 'Fanduel': | |
raw_baselines = proj_raw[proj_raw['Site'] == 'Fanduel'] | |
if slate_var1 == 'Main Slate': | |
raw_baselines = raw_baselines[raw_baselines['Slate'] == 'main_slate'] | |
elif slate_var1 == 'Secondary Slate': | |
raw_baselines = raw_baselines[raw_baselines['Slate'] == 'secondary_slate'] | |
raw_baselines = raw_baselines.sort_values(by='Own', ascending=False) | |
split_var2 = st.radio("Would you like to run stack analysis for the full slate or individual teams?", ('Full Slate Run', 'Specific Teams'), key='split_var2') | |
if split_var2 == 'Specific Teams': | |
team_var2 = st.multiselect('Which teams would you like to include in the analysis?', options = raw_baselines['Team'].unique(), key='team_var2') | |
elif split_var2 == 'Full Slate Run': | |
team_var2 = raw_baselines.Team.unique().tolist() | |
pos_split2 = st.radio("Are you viewing all positions, specific groups, or specific positions?", ('All Positions', 'Specific Positions'), key='pos_split2') | |
if pos_split2 == 'Specific Positions': | |
pos_var2 = st.multiselect('What Positions would you like to view?', options = ['SP', 'P', 'C', '1B', '2B', '3B', 'SS', 'OF']) | |
elif pos_split2 == 'All Positions': | |
pos_var2 = 'All' | |
if site_var1 == 'Draftkings': | |
max_sal2 = st.number_input('Max Salary', min_value = 5000, max_value = 50000, value = 35000, step = 100, key='max_sal2') | |
elif site_var1 == 'Fanduel': | |
max_sal2 = st.number_input('Max Salary', min_value = 5000, max_value = 35000, value = 25000, step = 100, key='max_sal2') | |
size_var2 = st.selectbox('What size of stacks are you analyzing?', options = ['3-man', '4-man', '5-man']) | |
if size_var2 == '3-man': | |
stack_size = 3 | |
if size_var2 == '4-man': | |
stack_size = 4 | |
if size_var2 == '5-man': | |
stack_size = 5 | |
team_dict = dict(zip(raw_baselines.Player, raw_baselines.Team)) | |
proj_dict = dict(zip(raw_baselines.Player, raw_baselines.Median)) | |
own_dict = dict(zip(raw_baselines.Player, raw_baselines.Own)) | |
cost_dict = dict(zip(raw_baselines.Player, raw_baselines.Salary)) | |
with col2: | |
stack_hold_container = st.empty() | |
comb_list = [] | |
if pos_split2 == 'All Positions': | |
raw_baselines = raw_baselines | |
elif pos_split2 != 'All Positions': | |
raw_baselines = raw_baselines[raw_baselines['Position'].str.contains('|'.join(pos_var2))] | |
for cur_team in team_var2: | |
working_baselines = raw_baselines | |
working_baselines = working_baselines[working_baselines['Team'] == cur_team] | |
working_baselines = working_baselines[working_baselines['Position'] != 'SP'] | |
working_baselines = working_baselines[working_baselines['Position'] != 'P'] | |
order_list = working_baselines['Player'] | |
comb = combinations(order_list, stack_size) | |
for i in list(comb): | |
comb_list.append(i) | |
comb_DF = pd.DataFrame(comb_list) | |
if stack_size == 3: | |
comb_DF['Team'] = comb_DF[0].map(team_dict) | |
comb_DF['Proj'] = sum([comb_DF[0].map(proj_dict), | |
comb_DF[1].map(proj_dict), | |
comb_DF[2].map(proj_dict)]) | |
comb_DF['Salary'] = sum([comb_DF[0].map(cost_dict), | |
comb_DF[1].map(cost_dict), | |
comb_DF[2].map(cost_dict)]) | |
comb_DF['Own%'] = sum([comb_DF[0].map(own_dict), | |
comb_DF[1].map(own_dict), | |
comb_DF[2].map(own_dict)]) | |
elif stack_size == 4: | |
comb_DF['Team'] = comb_DF[0].map(team_dict) | |
comb_DF['Proj'] = sum([comb_DF[0].map(proj_dict), | |
comb_DF[1].map(proj_dict), | |
comb_DF[2].map(proj_dict), | |
comb_DF[3].map(proj_dict)]) | |
comb_DF['Salary'] = sum([comb_DF[0].map(cost_dict), | |
comb_DF[1].map(cost_dict), | |
comb_DF[2].map(cost_dict), | |
comb_DF[3].map(cost_dict)]) | |
comb_DF['Own%'] = sum([comb_DF[0].map(own_dict), | |
comb_DF[1].map(own_dict), | |
comb_DF[2].map(own_dict), | |
comb_DF[3].map(own_dict)]) | |
elif stack_size == 5: | |
comb_DF['Team'] = comb_DF[0].map(team_dict) | |
comb_DF['Proj'] = sum([comb_DF[0].map(proj_dict), | |
comb_DF[1].map(proj_dict), | |
comb_DF[2].map(proj_dict), | |
comb_DF[3].map(proj_dict), | |
comb_DF[4].map(proj_dict)]) | |
comb_DF['Salary'] = sum([comb_DF[0].map(cost_dict), | |
comb_DF[1].map(cost_dict), | |
comb_DF[2].map(cost_dict), | |
comb_DF[3].map(cost_dict), | |
comb_DF[4].map(cost_dict)]) | |
comb_DF['Own%'] = sum([comb_DF[0].map(own_dict), | |
comb_DF[1].map(own_dict), | |
comb_DF[2].map(own_dict), | |
comb_DF[3].map(own_dict), | |
comb_DF[4].map(own_dict)]) | |
comb_DF = comb_DF.sort_values(by='Proj', ascending=False) | |
comb_DF = comb_DF.loc[comb_DF['Salary'] <= max_sal2] | |
cut_var = 0 | |
if stack_size == 3: | |
while cut_var <= int(len(comb_DF)): | |
try: | |
if int(cut_var) == 0: | |
cur_proj = float(comb_DF.iat[cut_var,4]) | |
cur_own = float(comb_DF.iat[cut_var,6]) | |
elif int(cut_var) >= 1: | |
check_own = float(comb_DF.iat[cut_var,6]) | |
if check_own > cur_own: | |
comb_DF = comb_DF.drop([cut_var]) | |
cur_own = cur_own | |
cut_var = cut_var - 1 | |
comb_DF = comb_DF.reset_index() | |
comb_DF = comb_DF.drop(['index'], axis=1) | |
elif check_own <= cur_own: | |
cur_own = float(comb_DF.iat[cut_var,6]) | |
cut_var = cut_var | |
cut_var += 1 | |
except: | |
cut_var += 1 | |
elif stack_size == 4: | |
while cut_var <= int(len(comb_DF)): | |
try: | |
if int(cut_var) == 0: | |
cur_proj = float(comb_DF.iat[cut_var,5]) | |
cur_own = float(comb_DF.iat[cut_var,7]) | |
elif int(cut_var) >= 1: | |
check_own = float(comb_DF.iat[cut_var,7]) | |
if check_own > cur_own: | |
comb_DF = comb_DF.drop([cut_var]) | |
cur_own = cur_own | |
cut_var = cut_var - 1 | |
comb_DF = comb_DF.reset_index() | |
comb_DF = comb_DF.drop(['index'], axis=1) | |
elif check_own <= cur_own: | |
cur_own = float(comb_DF.iat[cut_var,7]) | |
cut_var = cut_var | |
cut_var += 1 | |
except: | |
cut_var += 1 | |
elif stack_size == 5: | |
while cut_var <= int(len(comb_DF)): | |
try: | |
if int(cut_var) == 0: | |
cur_proj = float(comb_DF.iat[cut_var,6]) | |
cur_own = float(comb_DF.iat[cut_var,8]) | |
elif int(cut_var) >= 1: | |
check_own = float(comb_DF.iat[cut_var,8]) | |
if check_own > cur_own: | |
comb_DF = comb_DF.drop([cut_var]) | |
cur_own = cur_own | |
cut_var = cut_var - 1 | |
comb_DF = comb_DF.reset_index() | |
comb_DF = comb_DF.drop(['index'], axis=1) | |
elif check_own <= cur_own: | |
cur_own = float(comb_DF.iat[cut_var,8]) | |
cut_var = cut_var | |
cut_var += 1 | |
except: | |
cut_var += 1 | |
with stack_hold_container: | |
stack_hold_container = st.empty() | |
st.dataframe(comb_DF.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(precision=2), use_container_width = True) | |
st.download_button( | |
label="Export Tables", | |
data=convert_df_to_csv(comb_DF), | |
file_name='MLB_Stack_Options_export.csv', | |
mime='text/csv', | |
) |