File size: 16,846 Bytes
58cea02 d765ee8 58cea02 9c7e08b 58cea02 45a70a9 1748ccd d18e5a9 58cea02 8e43993 be6c88d 8e43993 d04558f 58cea02 6a12985 0d01fa6 58cea02 265f036 58cea02 1689df1 5db8a23 63c301f 5db8a23 63c301f 58cea02 1689df1 58cea02 63c301f 58cea02 63c301f 68d3916 356c7d4 1e6d51e 6a12985 1e6d51e 63c301f 68d3916 1854e4d 59dc088 1854e4d 59dc088 d765ee8 18b59a2 d765ee8 18b59a2 d765ee8 59dc088 d765ee8 18b59a2 d765ee8 18b59a2 d765ee8 59dc088 3b3771c 59dc088 3b3771c 59dc088 76d511e 59dc088 16fbcab 6db62f0 f49d54b 16fbcab 59dc088 76d511e 59dc088 76d511e 59dc088 6d04e58 59dc088 6d04e58 59dc088 6d04e58 59dc088 76d511e 59dc088 16fbcab 6db62f0 3213b6b 16fbcab 1817a5f 0d01fa6 f978d14 89f3a60 a19edd8 89f3a60 a19edd8 89f3a60 a19edd8 9da8f46 a19edd8 857c2eb 89f3a60 a19edd8 89f3a60 d18e5a9 89f3a60 1748ccd 89f3a60 6d2f6bb 89f3a60 d08e204 3e312de 89f3a60 6d2f6bb 89f3a60 d08e204 3e312de 89f3a60 1748ccd 89f3a60 1748ccd 6d2f6bb 89f3a60 d08e204 89f3a60 1748ccd 6d2f6bb 89f3a60 d08e204 89f3a60 d18e5a9 d08e204 d18e5a9 d08e204 d18e5a9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 |
import streamlit as st
st.set_page_config(layout="wide")
import numpy as np
import pandas as pd
import time
from fuzzywuzzy import process
from collections import Counter
## import global functions
from global_func.load_contest_file import load_contest_file
from global_func.load_file import load_file
from global_func.find_name_mismatches import find_name_mismatches
from global_func.create_player_exposures import create_player_exposures
from global_func.create_stack_exposures import create_stack_exposures
from global_func.create_stack_size_exposures import create_stack_size_exposures
player_exposure_format = {'Exposure Overall': '{:.2%}', 'Exposure Top 1%': '{:.2%}', 'Exposure Top 5%': '{:.2%}', 'Exposure Top 10%': '{:.2%}', 'Exposure Top 20%': '{:.2%}'}
if 'calc_toggle' not in st.session_state:
st.session_state['calc_toggle'] = False
tab1, tab2 = st.tabs(["Data Load", "Contest Analysis"])
with tab1:
if st.button('Clear data', key='reset1'):
st.session_state.clear()
st.session_state['calc_toggle'] = False
col1, col2 = st.columns(2)
with col1:
sport_select = st.selectbox("Select Sport", ['MLB', 'NBA', 'NFL'])
with col2:
type_var = st.selectbox("Select Game Type", ['Classic', 'Showdown'])
# Add file uploaders to your app
col1, col2 = st.columns(2)
with col1:
st.subheader("Contest File")
st.info("Go ahead and upload a Contest file here. Only include player columns and an optional 'Stack' column if you are playing MLB.")
Contest_file = st.file_uploader("Upload Contest File (CSV or Excel)", type=['csv', 'xlsx', 'xls'])
if 'Contest' in st.session_state:
del st.session_state['Contest']
if Contest_file:
st.session_state['Contest'], st.session_state['ownership_dict'], st.session_state['actual_dict'], st.session_state['entry_list'] = load_contest_file(Contest_file, sport_select)
st.session_state['Contest'] = st.session_state['Contest'].dropna(how='all')
st.session_state['Contest'] = st.session_state['Contest'].reset_index(drop=True)
if st.session_state['Contest'] is not None:
st.success('Contest file loaded successfully!')
st.dataframe(st.session_state['Contest'].head(10))
with col2:
st.subheader("Projections File")
st.info("upload a projections file that has 'player_names', 'salary', 'median', 'ownership', and 'captain ownership' (Needed for Showdown) columns. Note that the salary for showdown needs to be the FLEX salary, not the captain salary.")
# Create two columns for the uploader and template button
upload_col, template_col = st.columns([3, 1])
with upload_col:
projections_file = st.file_uploader("Upload Projections File (CSV or Excel)", type=['csv', 'xlsx', 'xls'])
if 'projections_df' in st.session_state:
del st.session_state['projections_df']
with template_col:
# Create empty DataFrame with required columns
template_df = pd.DataFrame(columns=['player_names', 'position', 'team', 'salary', 'median', 'ownership', 'captain ownership'])
# Add download button for template
st.download_button(
label="Template",
data=template_df.to_csv(index=False),
file_name="projections_template.csv",
mime="text/csv"
)
if projections_file:
export_projections, st.session_state['projections_df'] = load_file(projections_file)
if st.session_state['projections_df'] is not None:
st.success('Projections file loaded successfully!')
st.dataframe(st.session_state['projections_df'].head(10))
if Contest_file and projections_file:
st.subheader("Name Matching functions")
st.session_state['Contest'], st.session_state['projections_df'], st.session_state['ownership_dict'], st.session_state['actual_dict'], st.session_state['calc_toggle'] = find_name_mismatches(st.session_state['Contest'], st.session_state['projections_df'], st.session_state['ownership_dict'], st.session_state['actual_dict'], st.session_state['calc_toggle'])
st.session_state['projections_df']['salary'] = (st.session_state['projections_df']['salary'].astype(str).str.replace(',', '').astype(float).astype(int))
st.session_state['ownership_dict'] = dict(zip(st.session_state['ownership_dict']['Player'], st.session_state['ownership_dict']['Own']))
st.session_state['actual_dict'] = dict(zip(st.session_state['actual_dict']['Player'], st.session_state['actual_dict']['FPTS']))
with tab2:
excluded_cols = ['BaseName', 'EntryCount']
player_columns = [col for col in st.session_state['Contest'].columns if col not in excluded_cols]
for col in player_columns:
st.session_state['Contest'][col] = st.session_state['Contest'][col].astype(str)
# Create mapping dictionaries
map_dict = {
'pos_map': dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['position'])),
'team_map': dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['team'])),
'salary_map': dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['salary'])),
'proj_map': dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['median'])),
'own_map': dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['ownership'])),
'own_percent_rank': dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['ownership'].rank(pct=True))),
'cpt_salary_map': dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['salary'])),
'cpt_proj_map': dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['median'] * 1.5)),
'cpt_own_map': dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['captain ownership']))
}
# Create a copy of the dataframe for calculations
working_df = st.session_state['Contest'].copy()
if 'Contest' in st.session_state and 'projections_df' in st.session_state and st.session_state['calc_toggle']:
if type_var == 'Classic':
working_df['stack'] = working_df.apply(
lambda row: Counter(
map_dict['team_map'].get(player, '') for player in row[4:]
if map_dict['team_map'].get(player, '') != ''
).most_common(1)[0][0] if any(map_dict['team_map'].get(player, '') for player in row[4:]) else '',
axis=1
)
working_df['stack_size'] = working_df.apply(
lambda row: Counter(
map_dict['team_map'].get(player, '') for player in row[4:]
if map_dict['team_map'].get(player, '') != ''
).most_common(1)[0][1] if any(map_dict['team_map'].get(player, '') for player in row[4:]) else '',
axis=1
)
working_df['salary'] = working_df.apply(lambda row: sum(map_dict['salary_map'].get(player, 0) for player in row), axis=1)
working_df['median'] = working_df.apply(lambda row: sum(map_dict['proj_map'].get(player, 0) for player in row), axis=1)
working_df['actual_fpts'] = working_df.apply(lambda row: sum(st.session_state['actual_dict'].get(player, 0) for player in row), axis=1)
working_df['Own'] = working_df.apply(lambda row: sum(map_dict['own_map'].get(player, 0) for player in row), axis=1)
working_df['actual_own'] = working_df.apply(lambda row: sum(st.session_state['ownership_dict'].get(player, 0) for player in row), axis=1)
working_df['sorted'] = working_df[player_columns].apply(
lambda row: ','.join(sorted(row.values)),
axis=1
)
working_df['dupes'] = working_df.groupby('sorted').transform('size')
working_df = working_df.reset_index()
working_df['percentile_finish'] = working_df['index'].rank(pct=True)
working_df['finish'] = working_df['index']
working_df = working_df.drop(['sorted', 'index'], axis=1)
elif type_var == 'Showdown':
working_df['stack'] = working_df.apply(
lambda row: Counter(
map_dict['team_map'].get(player, '') for player in row
if map_dict['team_map'].get(player, '') != ''
).most_common(1)[0][0] if any(map_dict['team_map'].get(player, '') for player in row) else '',
axis=1
)
working_df['stack_size'] = working_df.apply(
lambda row: Counter(
map_dict['team_map'].get(player, '') for player in row
if map_dict['team_map'].get(player, '') != ''
).most_common(1)[0][1] if any(map_dict['team_map'].get(player, '') for player in row) else '',
axis=1
)
working_df['salary'] = working_df.apply(
lambda row: map_dict['cpt_salary_map'].get(row.iloc[0], 0) +
sum(map_dict['salary_map'].get(player, 0) for player in row.iloc[1:]),
axis=1
)
working_df['median'] = working_df.apply(
lambda row: map_dict['cpt_proj_map'].get(row.iloc[0], 0) +
sum(map_dict['proj_map'].get(player, 0) for player in row.iloc[1:]),
axis=1
)
working_df['Own'] = working_df.apply(
lambda row: map_dict['cpt_own_map'].get(row.iloc[0], 0) +
sum(map_dict['own_map'].get(player, 0) for player in row.iloc[1:]),
axis=1
)
working_df['sorted'] = working_df[player_columns].apply(
lambda row: row[0] + '|' + ','.join(sorted(row[1:].values)),
axis=1
)
working_df['dupes'] = working_df.groupby('sorted').transform('size')
working_df = working_df.reset_index()
working_df['percentile_finish'] = working_df['index'].rank(pct=True)
working_df['finish'] = working_df['index']
working_df = working_df.drop(['sorted', 'index'], axis=1)
st.session_state['field_player_frame'] = create_player_exposures(working_df, player_columns)
st.session_state['field_stack_frame'] = create_stack_exposures(working_df)
with st.expander("Info and filters"):
if st.button('Clear data', key='reset3'):
st.session_state.clear()
with st.form(key='filter_form'):
entry_parse_var = st.selectbox("Do you want to view a specific player(s) or a group of players?", ['All', 'Specific'])
entry_names = st.multiselect("Select players", options=st.session_state['entry_list'], default=[])
submitted = st.form_submit_button("Submit")
if submitted:
if 'player_frame' in st.session_state:
del st.session_state['player_frame']
if 'stack_frame' in st.session_state:
del st.session_state['stack_frame']
# Apply entry name filter if specific entries are selected
if entry_parse_var == 'Specific' and entry_names:
working_df = working_df[working_df['BaseName'].isin(entry_names)]
st.session_state['calc_toggle'] = True
elif entry_parse_var == 'All':
st.session_state['calc_toggle'] = True
# Initialize pagination in session state if not exists
if 'current_page' not in st.session_state:
st.session_state.current_page = 1
# Calculate total pages
rows_per_page = 500
total_rows = len(working_df)
total_pages = (total_rows + rows_per_page - 1) // rows_per_page
# Create pagination controls in a single row
pagination_cols = st.columns([4, 1, 1, 1, 4])
with pagination_cols[1]:
if st.button(f"Previous Page"):
if st.session_state['current_page'] > 1:
st.session_state.current_page -= 1
else:
st.session_state.current_page = 1
if 'player_frame' in st.session_state:
del st.session_state['player_frame']
if 'stack_frame' in st.session_state:
del st.session_state['stack_frame']
with pagination_cols[3]:
if st.button(f"Next Page"):
st.session_state.current_page += 1
if 'player_frame' in st.session_state:
del st.session_state['player_frame']
if 'stack_frame' in st.session_state:
del st.session_state['stack_frame']
# Calculate start and end indices for current page
start_idx = (st.session_state.current_page - 1) * rows_per_page
end_idx = min((st.session_state.current_page) * rows_per_page, total_rows)
st.dataframe(
working_df.iloc[start_idx:end_idx].style
.background_gradient(axis=0)
.background_gradient(cmap='RdYlGn')
.format(precision=2),
height=500,
use_container_width=True,
hide_index=True
)
with st.container():
tab1, tab2, tab3 = st.tabs(['Player Used Info', 'Stack Used Info', 'Stack Size Info'])
with tab1:
if entry_parse_var == 'All':
st.session_state['player_frame'] = create_player_exposures(working_df, player_columns)
st.dataframe(st.session_state['player_frame'].
sort_values(by='Exposure Overall', ascending=False).
style.background_gradient(cmap='RdYlGn').
format(formatter='{:.2%}', subset=st.session_state['player_frame'].iloc[:, 1:].select_dtypes(include=['number']).columns),
hide_index=True)
else:
st.session_state['player_frame'] = create_player_exposures(working_df, player_columns, entry_names)
st.dataframe(st.session_state['player_frame'].
sort_values(by='Exposure Overall', ascending=False).
style.background_gradient(cmap='RdYlGn').
format(formatter='{:.2%}', subset=st.session_state['player_frame'].iloc[:, 1:].select_dtypes(include=['number']).columns),
hide_index=True)
with tab2:
if entry_parse_var == 'All':
st.session_state['stack_frame'] = create_stack_exposures(working_df)
st.dataframe(st.session_state['stack_frame'].
sort_values(by='Exposure Overall', ascending=False).
style.background_gradient(cmap='RdYlGn').
format(formatter='{:.2%}', subset=st.session_state['stack_frame'].iloc[:, 1:].select_dtypes(include=['number']).columns),
hide_index=True)
else:
st.session_state['stack_frame'] = create_stack_exposures(working_df, entry_names)
st.dataframe(st.session_state['stack_frame'].
sort_values(by='Exposure Overall', ascending=False).
style.background_gradient(cmap='RdYlGn').
format(formatter='{:.2%}', subset=st.session_state['stack_frame'].iloc[:, 1:].select_dtypes(include=['number']).columns),
hide_index=True)
with tab3:
if entry_parse_var == 'All':
st.session_state['stack_size_frame'] = create_stack_size_exposures(working_df)
st.dataframe(st.session_state['stack_size_frame'].
sort_values(by='Exposure Overall', ascending=False).
style.background_gradient(cmap='RdYlGn').
format(formatter='{:.2%}', subset=st.session_state['stack_size_frame'].iloc[:, 1:].select_dtypes(include=['number']).columns),
hide_index=True)
else:
st.session_state['stack_size_frame'] = create_stack_size_exposures(working_df, entry_names)
st.dataframe(st.session_state['stack_size_frame'].
sort_values(by='Exposure Overall', ascending=False).
style.background_gradient(cmap='RdYlGn').
format(formatter='{:.2%}', subset=st.session_state['stack_size_frame'].iloc[:, 1:].select_dtypes(include=['number']).columns),
hide_index=True)
|