James McCool commited on
Commit
58cea02
·
1 Parent(s): cdad3f2

Initial commit

Browse files
Files changed (3) hide show
  1. app.py +1003 -0
  2. app.yaml +10 -0
  3. requirements.txt +10 -0
app.py ADDED
@@ -0,0 +1,1003 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ st.set_page_config(layout="wide")
3
+ import numpy as np
4
+ import pandas as pd
5
+ import time
6
+ from fuzzywuzzy import process
7
+ import random
8
+
9
+ ## import global functions
10
+ from global_func.clean_player_name import clean_player_name
11
+ from global_func.load_file import load_file
12
+ from global_func.load_ss_file import load_ss_file
13
+ from global_func.find_name_mismatches import find_name_mismatches
14
+ from global_func.predict_dupes import predict_dupes
15
+ from global_func.highlight_rows import highlight_changes, highlight_changes_winners, highlight_changes_losers
16
+ from global_func.load_csv import load_csv
17
+ from global_func.find_csv_mismatches import find_csv_mismatches
18
+
19
+ freq_format = {'Finish_percentile': '{:.2%}', 'Lineup Edge': '{:.2%}', 'Win%': '{:.2%}'}
20
+ player_wrong_names_mlb = ['Enrique Hernandez']
21
+ player_right_names_mlb = ['Kike Hernandez']
22
+
23
+ tab1, tab2, tab3 = st.tabs(["Data Load", "Late Swap", "Manage Portfolio"])
24
+ with tab1:
25
+ if st.button('Clear data', key='reset1'):
26
+ st.session_state.clear()
27
+ # Add file uploaders to your app
28
+ col1, col2, col3 = st.columns(3)
29
+
30
+ with col1:
31
+ st.subheader("Draftkings/Fanduel CSV")
32
+ st.info("Upload the player pricing CSV from the site you are playing on. This is used in late swap exporting and/or with SaberSim portfolios, but is not necessary for the portfolio management functions.")
33
+
34
+ upload_csv_col, csv_template_col = st.columns([3, 1])
35
+ with upload_csv_col:
36
+ csv_file = st.file_uploader("Upload CSV File", type=['csv'])
37
+ if 'csv_file' in st.session_state:
38
+ del st.session_state['csv_file']
39
+ with csv_template_col:
40
+
41
+ csv_template_df = pd.DataFrame(columns=['Name', 'ID', 'Roster Position', 'Salary'])
42
+
43
+ st.download_button(
44
+ label="CSV Template",
45
+ data=csv_template_df.to_csv(index=False),
46
+ file_name="csv_template.csv",
47
+ mime="text/csv"
48
+ )
49
+ st.session_state['csv_file'] = load_csv(csv_file)
50
+ try:
51
+ st.session_state['csv_file']['Salary'] = st.session_state['csv_file']['Salary'].astype(str).str.replace(',', '').astype(int)
52
+ except:
53
+ pass
54
+
55
+ if csv_file:
56
+ st.session_state['csv_file'] = st.session_state['csv_file'].drop_duplicates(subset=['Name'])
57
+ st.success('Projections file loaded successfully!')
58
+ st.dataframe(st.session_state['csv_file'].head(10))
59
+
60
+ with col2:
61
+ st.subheader("Portfolio File")
62
+ st.info("Go ahead and upload a portfolio file here. Only include player columns and an optional 'Stack' column if you are playing MLB.")
63
+ saber_toggle = st.radio("Are you uploading from SaberSim?", options=['No', 'Yes'])
64
+ st.info("If you are uploading from SaberSim, you will need to upload a CSV file for the slate for name matching.")
65
+ if saber_toggle == 'Yes':
66
+ if csv_file is not None:
67
+ portfolio_file = st.file_uploader("Upload Portfolio File (CSV or Excel)", type=['csv', 'xlsx', 'xls'])
68
+ if 'portfolio' in st.session_state:
69
+ del st.session_state['portfolio']
70
+ if 'export_portfolio' in st.session_state:
71
+ del st.session_state['export_portfolio']
72
+
73
+ else:
74
+ portfolio_file = st.file_uploader("Upload Portfolio File (CSV or Excel)", type=['csv', 'xlsx', 'xls'])
75
+ if 'portfolio' in st.session_state:
76
+ del st.session_state['portfolio']
77
+ if 'export_portfolio' in st.session_state:
78
+ del st.session_state['export_portfolio']
79
+
80
+ if portfolio_file:
81
+ if saber_toggle == 'Yes':
82
+ st.session_state['export_portfolio'], st.session_state['portfolio'] = load_ss_file(portfolio_file, st.session_state['csv_file'])
83
+ st.session_state['export_portfolio'] = st.session_state['export_portfolio'].dropna(how='all')
84
+ st.session_state['export_portfolio'] = st.session_state['export_portfolio'].reset_index(drop=True)
85
+ st.session_state['portfolio'] = st.session_state['portfolio'].dropna(how='all')
86
+ st.session_state['portfolio'] = st.session_state['portfolio'].reset_index(drop=True)
87
+ else:
88
+ st.session_state['export_portfolio'], st.session_state['portfolio'] = load_file(portfolio_file)
89
+ st.session_state['export_portfolio'] = st.session_state['export_portfolio'].dropna(how='all')
90
+ st.session_state['export_portfolio'] = st.session_state['export_portfolio'].reset_index(drop=True)
91
+ st.session_state['portfolio'] = st.session_state['portfolio'].dropna(how='all')
92
+ st.session_state['portfolio'] = st.session_state['portfolio'].reset_index(drop=True)
93
+ # Check if Stack column exists in the portfolio
94
+ if 'Stack' in st.session_state['portfolio'].columns:
95
+ # Create dictionary mapping index to Stack values
96
+ stack_dict = dict(zip(st.session_state['portfolio'].index, st.session_state['portfolio']['Stack']))
97
+ st.write(f"Found {len(stack_dict)} stack assignments")
98
+ st.session_state['portfolio'] = st.session_state['portfolio'].drop(columns=['Stack'])
99
+ else:
100
+ stack_dict = None
101
+ st.info("No Stack column found in portfolio")
102
+ if st.session_state['portfolio'] is not None:
103
+ st.success('Portfolio file loaded successfully!')
104
+ st.session_state['portfolio'] = st.session_state['portfolio'].apply(lambda x: x.replace(player_wrong_names_mlb, player_right_names_mlb))
105
+ st.dataframe(st.session_state['portfolio'].head(10))
106
+
107
+ with col3:
108
+ st.subheader("Projections File")
109
+ st.info("upload a projections file that has 'player_names', 'salary', 'median', 'ownership', and 'captain ownership' (Needed for Showdown) columns. Note that the salary for showdown needs to be the FLEX salary, not the captain salary.")
110
+
111
+ # Create two columns for the uploader and template button
112
+ upload_col, template_col = st.columns([3, 1])
113
+
114
+ with upload_col:
115
+ projections_file = st.file_uploader("Upload Projections File (CSV or Excel)", type=['csv', 'xlsx', 'xls'])
116
+ if 'projections_df' in st.session_state:
117
+ del st.session_state['projections_df']
118
+
119
+ with template_col:
120
+ # Create empty DataFrame with required columns
121
+ template_df = pd.DataFrame(columns=['player_names', 'position', 'team', 'salary', 'median', 'ownership', 'captain ownership'])
122
+ # Add download button for template
123
+ st.download_button(
124
+ label="Template",
125
+ data=template_df.to_csv(index=False),
126
+ file_name="projections_template.csv",
127
+ mime="text/csv"
128
+ )
129
+
130
+ if projections_file:
131
+ export_projections, projections = load_file(projections_file)
132
+ if projections is not None:
133
+ st.success('Projections file loaded successfully!')
134
+ projections = projections.apply(lambda x: x.replace(player_wrong_names_mlb, player_right_names_mlb))
135
+ st.dataframe(projections.head(10))
136
+
137
+ if portfolio_file and projections_file:
138
+ if st.session_state['portfolio'] is not None and projections is not None:
139
+ st.subheader("Name Matching Analysis")
140
+ # Initialize projections_df in session state if it doesn't exist
141
+ if 'projections_df' not in st.session_state:
142
+ st.session_state['projections_df'] = projections.copy()
143
+ st.session_state['projections_df']['salary'] = (st.session_state['projections_df']['salary'].astype(str).str.replace(',', '').astype(float).astype(int))
144
+
145
+ # Update projections_df with any new matches
146
+ st.session_state['projections_df'] = find_name_mismatches(st.session_state['portfolio'], st.session_state['projections_df'])
147
+ if csv_file is not None and 'export_dict' not in st.session_state:
148
+ # Create a dictionary of Name to Name+ID from csv_file
149
+ try:
150
+ name_id_map = dict(zip(
151
+ st.session_state['csv_file']['Name'],
152
+ st.session_state['csv_file']['Name + ID']
153
+ ))
154
+ except:
155
+ name_id_map = dict(zip(
156
+ st.session_state['csv_file']['Nickname'],
157
+ st.session_state['csv_file']['Id']
158
+ ))
159
+
160
+ # Function to find best match
161
+ def find_best_match(name):
162
+ best_match = process.extractOne(name, name_id_map.keys())
163
+ if best_match and best_match[1] >= 85: # 85% match threshold
164
+ return name_id_map[best_match[0]]
165
+ return name # Return original name if no good match found
166
+
167
+ # Apply the matching
168
+ projections['upload_match'] = projections['player_names'].apply(find_best_match)
169
+ st.session_state['export_dict'] = dict(zip(projections['player_names'], projections['upload_match']))
170
+
171
+ with tab2:
172
+ if st.button('Clear data', key='reset2'):
173
+ st.session_state.clear()
174
+
175
+ if 'portfolio' in st.session_state and 'projections_df' in st.session_state:
176
+
177
+ optimized_df = None
178
+
179
+ map_dict = {
180
+ 'pos_map': dict(zip(st.session_state['projections_df']['player_names'],
181
+ st.session_state['projections_df']['position'])),
182
+ 'salary_map': dict(zip(st.session_state['projections_df']['player_names'],
183
+ st.session_state['projections_df']['salary'])),
184
+ 'proj_map': dict(zip(st.session_state['projections_df']['player_names'],
185
+ st.session_state['projections_df']['median'])),
186
+ 'own_map': dict(zip(st.session_state['projections_df']['player_names'],
187
+ st.session_state['projections_df']['ownership'])),
188
+ 'team_map': dict(zip(st.session_state['projections_df']['player_names'],
189
+ st.session_state['projections_df']['team']))
190
+ }
191
+ # Calculate new stats for optimized lineups
192
+ st.session_state['portfolio']['salary'] = st.session_state['portfolio'].apply(
193
+ lambda row: sum(map_dict['salary_map'].get(player, 0) for player in row if player in map_dict['salary_map']), axis=1
194
+ )
195
+ st.session_state['portfolio']['median'] = st.session_state['portfolio'].apply(
196
+ lambda row: sum(map_dict['proj_map'].get(player, 0) for player in row if player in map_dict['proj_map']), axis=1
197
+ )
198
+
199
+ st.session_state['portfolio']['Own'] = st.session_state['portfolio'].apply(
200
+ lambda row: sum(map_dict['own_map'].get(player, 0) for player in row if player in map_dict['own_map']), axis=1
201
+ )
202
+
203
+ options_container = st.container()
204
+ with options_container:
205
+ col1, col2, col3, col4, col5, col6 = st.columns(6)
206
+ with col1:
207
+ curr_site_var = st.selectbox("Select your current site", options=['DraftKings', 'FanDuel'])
208
+ with col2:
209
+ curr_sport_var = st.selectbox("Select your current sport", options=['NBA', 'MLB', 'NFL', 'NHL', 'MMA'])
210
+ with col3:
211
+ swap_var = st.multiselect("Select late swap strategy", options=['Optimize', 'Increase volatility', 'Decrease volatility'])
212
+ with col4:
213
+ remove_teams_var = st.multiselect("What teams have already played?", options=st.session_state['projections_df']['team'].unique())
214
+ with col5:
215
+ winners_var = st.multiselect("Are there any players doing exceptionally well?", options=st.session_state['projections_df']['player_names'].unique(), max_selections=3)
216
+ with col6:
217
+ losers_var = st.multiselect("Are there any players doing exceptionally poorly?", options=st.session_state['projections_df']['player_names'].unique(), max_selections=3)
218
+ if st.button('Clear Late Swap'):
219
+ if 'optimized_df' in st.session_state:
220
+ del st.session_state['optimized_df']
221
+
222
+ map_dict = {
223
+ 'pos_map': dict(zip(st.session_state['projections_df']['player_names'],
224
+ st.session_state['projections_df']['position'])),
225
+ 'salary_map': dict(zip(st.session_state['projections_df']['player_names'],
226
+ st.session_state['projections_df']['salary'])),
227
+ 'proj_map': dict(zip(st.session_state['projections_df']['player_names'],
228
+ st.session_state['projections_df']['median'])),
229
+ 'own_map': dict(zip(st.session_state['projections_df']['player_names'],
230
+ st.session_state['projections_df']['ownership'])),
231
+ 'team_map': dict(zip(st.session_state['projections_df']['player_names'],
232
+ st.session_state['projections_df']['team']))
233
+ }
234
+ # Calculate new stats for optimized lineups
235
+ st.session_state['portfolio']['salary'] = st.session_state['portfolio'].apply(
236
+ lambda row: sum(map_dict['salary_map'].get(player, 0) for player in row if player in map_dict['salary_map']), axis=1
237
+ )
238
+ st.session_state['portfolio']['median'] = st.session_state['portfolio'].apply(
239
+ lambda row: sum(map_dict['proj_map'].get(player, 0) for player in row if player in map_dict['proj_map']), axis=1
240
+ )
241
+ st.session_state['portfolio']['Own'] = st.session_state['portfolio'].apply(
242
+ lambda row: sum(map_dict['own_map'].get(player, 0) for player in row if player in map_dict['own_map']), axis=1
243
+ )
244
+
245
+ if st.button('Run Late Swap'):
246
+ st.session_state['portfolio'] = st.session_state['portfolio'].drop(columns=['salary', 'median', 'Own'])
247
+ if curr_sport_var == 'NBA':
248
+ if curr_site_var == 'DraftKings':
249
+ st.session_state['portfolio'] = st.session_state['portfolio'].set_axis(['PG', 'SG', 'SF', 'PF', 'C', 'G', 'F', 'UTIL'], axis=1)
250
+ else:
251
+ st.session_state['portfolio'] = st.session_state['portfolio'].set_axis(['PG', 'PG', 'SG', 'SG', 'SF', 'SF', 'PF', 'PF', 'C'], axis=1)
252
+
253
+ # Define roster position rules
254
+ if curr_site_var == 'DraftKings':
255
+ position_rules = {
256
+ 'PG': ['PG'],
257
+ 'SG': ['SG'],
258
+ 'SF': ['SF'],
259
+ 'PF': ['PF'],
260
+ 'C': ['C'],
261
+ 'G': ['PG', 'SG'],
262
+ 'F': ['SF', 'PF'],
263
+ 'UTIL': ['PG', 'SG', 'SF', 'PF', 'C']
264
+ }
265
+ else:
266
+ position_rules = {
267
+ 'PG': ['PG'],
268
+ 'SG': ['SG'],
269
+ 'SF': ['SF'],
270
+ 'PF': ['PF'],
271
+ 'C': ['C'],
272
+ }
273
+ # Create position groups from projections data
274
+ position_groups = {}
275
+ for _, player in st.session_state['projections_df'].iterrows():
276
+ positions = player['position'].split('/')
277
+ for pos in positions:
278
+ if pos not in position_groups:
279
+ position_groups[pos] = []
280
+ position_groups[pos].append({
281
+ 'player_names': player['player_names'],
282
+ 'salary': player['salary'],
283
+ 'median': player['median'],
284
+ 'ownership': player['ownership'],
285
+ 'positions': positions # Store all eligible positions
286
+ })
287
+
288
+ def optimize_lineup(row):
289
+ current_lineup = []
290
+ total_salary = 0
291
+ if curr_site_var == 'DraftKings':
292
+ salary_cap = 50000
293
+ else:
294
+ salary_cap = 60000
295
+ used_players = set()
296
+
297
+ # Convert row to dictionary with roster positions
298
+ roster = {}
299
+ for col, player in zip(row.index, row):
300
+ if col not in ['salary', 'median', 'Own', 'Finish_percentile', 'Dupes', 'Lineup Edge']:
301
+ roster[col] = {
302
+ 'name': player,
303
+ 'position': map_dict['pos_map'].get(player, '').split('/'),
304
+ 'team': map_dict['team_map'].get(player, ''),
305
+ 'salary': map_dict['salary_map'].get(player, 0),
306
+ 'median': map_dict['proj_map'].get(player, 0),
307
+ 'ownership': map_dict['own_map'].get(player, 0)
308
+ }
309
+ total_salary += roster[col]['salary']
310
+ used_players.add(player)
311
+
312
+ # Optimize each roster position in random order
313
+ roster_positions = list(roster.items())
314
+ random.shuffle(roster_positions)
315
+
316
+ for roster_pos, current in roster_positions:
317
+ # Skip optimization for players from removed teams
318
+ if current['team'] in remove_teams_var:
319
+ continue
320
+
321
+ valid_positions = position_rules[roster_pos]
322
+ better_options = []
323
+
324
+ # Find valid replacements for this roster position
325
+ for pos in valid_positions:
326
+ if pos in position_groups:
327
+ pos_options = [
328
+ p for p in position_groups[pos]
329
+ if p['median'] > current['median']
330
+ and (total_salary - current['salary'] + p['salary']) <= salary_cap
331
+ and p['player_names'] not in used_players
332
+ and any(valid_pos in p['positions'] for valid_pos in valid_positions)
333
+ and map_dict['team_map'].get(p['player_names']) not in remove_teams_var # Check team restriction
334
+ ]
335
+ better_options.extend(pos_options)
336
+
337
+ if better_options:
338
+ # Remove duplicates
339
+ better_options = {opt['player_names']: opt for opt in better_options}.values()
340
+
341
+ # Sort by median projection and take the best one
342
+ best_replacement = max(better_options, key=lambda x: x['median'])
343
+
344
+ # Update the lineup and tracking variables
345
+ used_players.remove(current['name'])
346
+ used_players.add(best_replacement['player_names'])
347
+ total_salary = total_salary - current['salary'] + best_replacement['salary']
348
+ roster[roster_pos] = {
349
+ 'name': best_replacement['player_names'],
350
+ 'position': map_dict['pos_map'][best_replacement['player_names']].split('/'),
351
+ 'team': map_dict['team_map'][best_replacement['player_names']],
352
+ 'salary': best_replacement['salary'],
353
+ 'median': best_replacement['median'],
354
+ 'ownership': best_replacement['ownership']
355
+ }
356
+
357
+ # Return optimized lineup maintaining original column order
358
+ return [roster[pos]['name'] for pos in row.index if pos in roster]
359
+
360
+ def optimize_lineup_winners(row):
361
+ current_lineup = []
362
+ total_salary = 0
363
+ if curr_site_var == 'DraftKings':
364
+ salary_cap = 50000
365
+ else:
366
+ salary_cap = 60000
367
+ used_players = set()
368
+
369
+ # Check if any winners are in the lineup and count them
370
+ winners_in_lineup = sum(1 for player in row if player in winners_var)
371
+ changes_needed = min(winners_in_lineup, 3) if winners_in_lineup > 0 else 0
372
+ changes_made = 0
373
+
374
+ # Convert row to dictionary with roster positions
375
+ roster = {}
376
+ for col, player in zip(row.index, row):
377
+ if col not in ['salary', 'median', 'Own', 'Finish_percentile', 'Dupes', 'Lineup Edge']:
378
+ roster[col] = {
379
+ 'name': player,
380
+ 'position': map_dict['pos_map'].get(player, '').split('/'),
381
+ 'team': map_dict['team_map'].get(player, ''),
382
+ 'salary': map_dict['salary_map'].get(player, 0),
383
+ 'median': map_dict['proj_map'].get(player, 0),
384
+ 'ownership': map_dict['own_map'].get(player, 0)
385
+ }
386
+ total_salary += roster[col]['salary']
387
+ used_players.add(player)
388
+
389
+ # Only proceed with ownership-based optimization if we have winners in the lineup
390
+ if changes_needed > 0:
391
+ # Randomize the order of positions to optimize
392
+ roster_positions = list(roster.items())
393
+ random.shuffle(roster_positions)
394
+
395
+ for roster_pos, current in roster_positions:
396
+ # Stop if we've made enough changes
397
+ if changes_made >= changes_needed:
398
+ break
399
+
400
+ # Skip optimization for players from removed teams or if the current player is a winner
401
+ if current['team'] in remove_teams_var or current['name'] in winners_var:
402
+ continue
403
+
404
+ valid_positions = list(position_rules[roster_pos])
405
+ random.shuffle(valid_positions)
406
+ better_options = []
407
+
408
+ # Find valid replacements with higher ownership
409
+ for pos in valid_positions:
410
+ if pos in position_groups:
411
+ pos_options = [
412
+ p for p in position_groups[pos]
413
+ if p['ownership'] > current['ownership']
414
+ and p['median'] >= current['median'] - 3
415
+ and (total_salary - current['salary'] + p['salary']) <= salary_cap
416
+ and (total_salary - current['salary'] + p['salary']) >= salary_cap - 1000
417
+ and p['player_names'] not in used_players
418
+ and any(valid_pos in p['positions'] for valid_pos in valid_positions)
419
+ and map_dict['team_map'].get(p['player_names']) not in remove_teams_var
420
+ ]
421
+ better_options.extend(pos_options)
422
+
423
+ if better_options:
424
+ # Remove duplicates
425
+ better_options = {opt['player_names']: opt for opt in better_options}.values()
426
+
427
+ # Sort by ownership and take the highest owned option
428
+ best_replacement = max(better_options, key=lambda x: x['ownership'])
429
+
430
+ # Update the lineup and tracking variables
431
+ used_players.remove(current['name'])
432
+ used_players.add(best_replacement['player_names'])
433
+ total_salary = total_salary - current['salary'] + best_replacement['salary']
434
+ roster[roster_pos] = {
435
+ 'name': best_replacement['player_names'],
436
+ 'position': map_dict['pos_map'][best_replacement['player_names']].split('/'),
437
+ 'team': map_dict['team_map'][best_replacement['player_names']],
438
+ 'salary': best_replacement['salary'],
439
+ 'median': best_replacement['median'],
440
+ 'ownership': best_replacement['ownership']
441
+ }
442
+ changes_made += 1
443
+
444
+ # Return optimized lineup maintaining original column order
445
+ return [roster[pos]['name'] for pos in row.index if pos in roster]
446
+
447
+ def optimize_lineup_losers(row):
448
+ current_lineup = []
449
+ total_salary = 0
450
+ if curr_site_var == 'DraftKings':
451
+ salary_cap = 50000
452
+ else:
453
+ salary_cap = 60000
454
+ used_players = set()
455
+
456
+ # Check if any winners are in the lineup and count them
457
+ losers_in_lineup = sum(1 for player in row if player in losers_var)
458
+ changes_needed = min(losers_in_lineup, 3) if losers_in_lineup > 0 else 0
459
+ changes_made = 0
460
+
461
+ # Convert row to dictionary with roster positions
462
+ roster = {}
463
+ for col, player in zip(row.index, row):
464
+ if col not in ['salary', 'median', 'Own', 'Finish_percentile', 'Dupes', 'Lineup Edge']:
465
+ roster[col] = {
466
+ 'name': player,
467
+ 'position': map_dict['pos_map'].get(player, '').split('/'),
468
+ 'team': map_dict['team_map'].get(player, ''),
469
+ 'salary': map_dict['salary_map'].get(player, 0),
470
+ 'median': map_dict['proj_map'].get(player, 0),
471
+ 'ownership': map_dict['own_map'].get(player, 0)
472
+ }
473
+ total_salary += roster[col]['salary']
474
+ used_players.add(player)
475
+
476
+ # Only proceed with ownership-based optimization if we have winners in the lineup
477
+ if changes_needed > 0:
478
+ # Randomize the order of positions to optimize
479
+ roster_positions = list(roster.items())
480
+ random.shuffle(roster_positions)
481
+
482
+ for roster_pos, current in roster_positions:
483
+ # Stop if we've made enough changes
484
+ if changes_made >= changes_needed:
485
+ break
486
+
487
+ # Skip optimization for players from removed teams or if the current player is a winner
488
+ if current['team'] in remove_teams_var or current['name'] in losers_var:
489
+ continue
490
+
491
+ valid_positions = list(position_rules[roster_pos])
492
+ random.shuffle(valid_positions)
493
+ better_options = []
494
+
495
+ # Find valid replacements with higher ownership
496
+ for pos in valid_positions:
497
+ if pos in position_groups:
498
+ pos_options = [
499
+ p for p in position_groups[pos]
500
+ if p['ownership'] < current['ownership']
501
+ and p['median'] >= current['median'] - 3
502
+ and (total_salary - current['salary'] + p['salary']) <= salary_cap
503
+ and (total_salary - current['salary'] + p['salary']) >= salary_cap - 1000
504
+ and p['player_names'] not in used_players
505
+ and any(valid_pos in p['positions'] for valid_pos in valid_positions)
506
+ and map_dict['team_map'].get(p['player_names']) not in remove_teams_var
507
+ ]
508
+ better_options.extend(pos_options)
509
+
510
+ if better_options:
511
+ # Remove duplicates
512
+ better_options = {opt['player_names']: opt for opt in better_options}.values()
513
+
514
+ # Sort by ownership and take the highest owned option
515
+ best_replacement = max(better_options, key=lambda x: x['ownership'])
516
+
517
+ # Update the lineup and tracking variables
518
+ used_players.remove(current['name'])
519
+ used_players.add(best_replacement['player_names'])
520
+ total_salary = total_salary - current['salary'] + best_replacement['salary']
521
+ roster[roster_pos] = {
522
+ 'name': best_replacement['player_names'],
523
+ 'position': map_dict['pos_map'][best_replacement['player_names']].split('/'),
524
+ 'team': map_dict['team_map'][best_replacement['player_names']],
525
+ 'salary': best_replacement['salary'],
526
+ 'median': best_replacement['median'],
527
+ 'ownership': best_replacement['ownership']
528
+ }
529
+ changes_made += 1
530
+
531
+ # Return optimized lineup maintaining original column order
532
+ return [roster[pos]['name'] for pos in row.index if pos in roster]
533
+
534
+ # Create a progress bar
535
+ progress_bar = st.progress(0)
536
+ status_text = st.empty()
537
+
538
+ # Process each lineup
539
+ optimized_lineups = []
540
+ total_lineups = len(st.session_state['portfolio'])
541
+
542
+ for idx, row in st.session_state['portfolio'].iterrows():
543
+ # First optimization pass
544
+ first_pass = optimize_lineup(row)
545
+ first_pass_series = pd.Series(first_pass, index=row.index)
546
+
547
+ second_pass = optimize_lineup(first_pass_series)
548
+ second_pass_series = pd.Series(second_pass, index=row.index)
549
+
550
+ third_pass = optimize_lineup(second_pass_series)
551
+ third_pass_series = pd.Series(third_pass, index=row.index)
552
+
553
+ fourth_pass = optimize_lineup(third_pass_series)
554
+ fourth_pass_series = pd.Series(fourth_pass, index=row.index)
555
+
556
+ fifth_pass = optimize_lineup(fourth_pass_series)
557
+ fifth_pass_series = pd.Series(fifth_pass, index=row.index)
558
+
559
+ # Second optimization pass
560
+ final_lineup = optimize_lineup(fifth_pass_series)
561
+ optimized_lineups.append(final_lineup)
562
+
563
+ if 'Optimize' in swap_var:
564
+ progress = (idx + 1) / total_lineups
565
+ progress_bar.progress(progress)
566
+ status_text.text(f'Optimizing Lineups {idx + 1} of {total_lineups}')
567
+ else:
568
+ pass
569
+
570
+ # Create new dataframe with optimized lineups
571
+ if 'Optimize' in swap_var:
572
+ st.session_state['optimized_df_medians'] = pd.DataFrame(optimized_lineups, columns=st.session_state['portfolio'].columns)
573
+ else:
574
+ st.session_state['optimized_df_medians'] = st.session_state['portfolio']
575
+
576
+ # Create a progress bar
577
+ progress_bar_winners = st.progress(0)
578
+ status_text_winners = st.empty()
579
+
580
+ # Process each lineup
581
+ optimized_lineups_winners = []
582
+ total_lineups = len(st.session_state['optimized_df_medians'])
583
+
584
+ for idx, row in st.session_state['optimized_df_medians'].iterrows():
585
+
586
+ final_lineup = optimize_lineup_winners(row)
587
+ optimized_lineups_winners.append(final_lineup)
588
+
589
+ if 'Decrease volatility' in swap_var:
590
+ progress_winners = (idx + 1) / total_lineups
591
+ progress_bar_winners.progress(progress_winners)
592
+ status_text_winners.text(f'Lowering Volatility around Winners {idx + 1} of {total_lineups}')
593
+ else:
594
+ pass
595
+
596
+ # Create new dataframe with optimized lineups
597
+ if 'Decrease volatility' in swap_var:
598
+ st.session_state['optimized_df_winners'] = pd.DataFrame(optimized_lineups_winners, columns=st.session_state['optimized_df_medians'].columns)
599
+ else:
600
+ st.session_state['optimized_df_winners'] = st.session_state['optimized_df_medians']
601
+
602
+ # Create a progress bar
603
+ progress_bar_losers = st.progress(0)
604
+ status_text_losers = st.empty()
605
+
606
+ # Process each lineup
607
+ optimized_lineups_losers = []
608
+ total_lineups = len(st.session_state['optimized_df_winners'])
609
+
610
+ for idx, row in st.session_state['optimized_df_winners'].iterrows():
611
+
612
+ final_lineup = optimize_lineup_losers(row)
613
+ optimized_lineups_losers.append(final_lineup)
614
+
615
+ if 'Increase volatility' in swap_var:
616
+ progress_losers = (idx + 1) / total_lineups
617
+ progress_bar_losers.progress(progress_losers)
618
+ status_text_losers.text(f'Increasing Volatility around Losers {idx + 1} of {total_lineups}')
619
+ else:
620
+ pass
621
+
622
+ # Create new dataframe with optimized lineups
623
+ if 'Increase volatility' in swap_var:
624
+ st.session_state['optimized_df'] = pd.DataFrame(optimized_lineups_losers, columns=st.session_state['optimized_df_winners'].columns)
625
+ else:
626
+ st.session_state['optimized_df'] = st.session_state['optimized_df_winners']
627
+
628
+ # Calculate new stats for optimized lineups
629
+ st.session_state['optimized_df']['salary'] = st.session_state['optimized_df'].apply(
630
+ lambda row: sum(map_dict['salary_map'].get(player, 0) for player in row if player in map_dict['salary_map']), axis=1
631
+ )
632
+ st.session_state['optimized_df']['median'] = st.session_state['optimized_df'].apply(
633
+ lambda row: sum(map_dict['proj_map'].get(player, 0) for player in row if player in map_dict['proj_map']), axis=1
634
+ )
635
+ st.session_state['optimized_df']['Own'] = st.session_state['optimized_df'].apply(
636
+ lambda row: sum(map_dict['own_map'].get(player, 0) for player in row if player in map_dict['own_map']), axis=1
637
+ )
638
+
639
+ # Display results
640
+ st.success('Optimization complete!')
641
+
642
+ if 'optimized_df' in st.session_state:
643
+ st.write("Increase in median highlighted in yellow, descrease in volatility highlighted in blue, increase in volatility highlighted in red:")
644
+ st.dataframe(
645
+ st.session_state['optimized_df'].style
646
+ .apply(highlight_changes, axis=1)
647
+ .apply(highlight_changes_winners, axis=1)
648
+ .apply(highlight_changes_losers, axis=1)
649
+ .background_gradient(axis=0)
650
+ .background_gradient(cmap='RdYlGn')
651
+ .format(precision=2),
652
+ height=1000,
653
+ use_container_width=True
654
+ )
655
+
656
+ # Option to download optimized lineups
657
+ if st.button('Prepare Late Swap Export'):
658
+ export_df = st.session_state['optimized_df'].copy()
659
+
660
+ # Map player names to their export IDs for all player columns
661
+ for col in export_df.columns:
662
+ if col not in ['salary', 'median', 'Own']:
663
+ export_df[col] = export_df[col].map(st.session_state['export_dict'])
664
+
665
+ csv = export_df.to_csv(index=False)
666
+ st.download_button(
667
+ label="Download CSV",
668
+ data=csv,
669
+ file_name="optimized_lineups.csv",
670
+ mime="text/csv"
671
+ )
672
+ else:
673
+ st.write("Current Portfolio")
674
+ st.dataframe(
675
+ st.session_state['portfolio'].style
676
+ .background_gradient(axis=0)
677
+ .background_gradient(cmap='RdYlGn')
678
+ .format(precision=2),
679
+ height=1000,
680
+ use_container_width=True
681
+ )
682
+
683
+ with tab3:
684
+ if st.button('Clear data', key='reset3'):
685
+ st.session_state.clear()
686
+ if 'portfolio' in st.session_state and 'projections_df' in st.session_state:
687
+ col1, col2, col3 = st.columns([1, 8, 1])
688
+ excluded_cols = ['salary', 'median', 'Own', 'Finish_percentile', 'Dupes', 'Stack', 'Win%', 'Lineup Edge']
689
+ with col1:
690
+ site_var = st.selectbox("Select Site", ['Draftkings', 'Fanduel'])
691
+ sport_var = st.selectbox("Select Sport", ['NFL', 'MLB', 'NBA', 'NHL', 'MMA'])
692
+ st.info("It currently does not matter what sport you select, it may matter in the future.")
693
+ type_var = st.selectbox("Select Game Type", ['Classic', 'Showdown'])
694
+ Contest_Size = st.number_input("Enter Contest Size", value=25000, min_value=1, step=1)
695
+ strength_var = st.selectbox("Select field strength", ['Average', 'Sharp', 'Weak'])
696
+ if site_var == 'Draftkings':
697
+ if type_var == 'Classic':
698
+ map_dict = {
699
+ 'pos_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['position'])),
700
+ 'team_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['team'])),
701
+ 'salary_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['salary'])),
702
+ 'proj_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['median'])),
703
+ 'own_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['ownership'])),
704
+ 'own_percent_rank':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['ownership'].rank(pct=True))),
705
+ 'cpt_salary_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['salary'])),
706
+ 'cpt_proj_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['median'] * 1.5)),
707
+ 'cpt_own_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['captain ownership']))
708
+ }
709
+ elif type_var == 'Showdown':
710
+ if sport_var == 'NFL':
711
+ map_dict = {
712
+ 'pos_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['position'])),
713
+ 'team_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['team'])),
714
+ 'salary_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['salary'])),
715
+ 'proj_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['median'])),
716
+ 'own_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['ownership'])),
717
+ 'own_percent_rank':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['ownership'].rank(pct=True))),
718
+ 'cpt_salary_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['salary'] * 1.5)),
719
+ 'cpt_proj_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['median'] * 1.5)),
720
+ 'cpt_own_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['captain ownership']))
721
+ }
722
+ elif sport_var != 'NFL':
723
+ map_dict = {
724
+ 'pos_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['position'])),
725
+ 'team_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['team'])),
726
+ 'salary_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['salary'] / 1.5)),
727
+ 'proj_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['median'])),
728
+ 'own_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['ownership'])),
729
+ 'own_percent_rank':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['ownership'].rank(pct=True))),
730
+ 'cpt_salary_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['salary'])),
731
+ 'cpt_proj_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['median'] * 1.5)),
732
+ 'cpt_own_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['captain ownership']))
733
+ }
734
+ elif site_var == 'Fanduel':
735
+ map_dict = {
736
+ 'pos_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['position'])),
737
+ 'team_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['team'])),
738
+ 'salary_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['salary'])),
739
+ 'proj_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['median'])),
740
+ 'own_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['ownership'])),
741
+ 'own_percent_rank':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['ownership'].rank(pct=True))),
742
+ 'cpt_salary_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['salary'])),
743
+ 'cpt_proj_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['median'] * 1.5)),
744
+ 'cpt_own_map':dict(zip(st.session_state['projections_df']['player_names'], st.session_state['projections_df']['captain ownership']))
745
+ }
746
+
747
+ if type_var == 'Classic':
748
+ st.session_state['portfolio']['salary'] = st.session_state['portfolio'].apply(lambda row: sum(map_dict['salary_map'].get(player, 0) for player in row), axis=1)
749
+ st.session_state['portfolio']['median'] = st.session_state['portfolio'].apply(lambda row: sum(map_dict['proj_map'].get(player, 0) for player in row), axis=1)
750
+ st.session_state['portfolio']['Own'] = st.session_state['portfolio'].apply(lambda row: sum(map_dict['own_map'].get(player, 0) for player in row), axis=1)
751
+ if stack_dict is not None:
752
+ st.session_state['portfolio']['Stack'] = st.session_state['portfolio'].index.map(stack_dict)
753
+ elif type_var == 'Showdown':
754
+ # Calculate salary (CPT uses cpt_salary_map, others use salary_map)
755
+ st.session_state['portfolio']['salary'] = st.session_state['portfolio'].apply(
756
+ lambda row: map_dict['cpt_salary_map'].get(row.iloc[0], 0) +
757
+ sum(map_dict['salary_map'].get(player, 0) for player in row.iloc[1:]),
758
+ axis=1
759
+ )
760
+
761
+ # Calculate median (CPT uses cpt_proj_map, others use proj_map)
762
+ st.session_state['portfolio']['median'] = st.session_state['portfolio'].apply(
763
+ lambda row: map_dict['cpt_proj_map'].get(row.iloc[0], 0) +
764
+ sum(map_dict['proj_map'].get(player, 0) for player in row.iloc[1:]),
765
+ axis=1
766
+ )
767
+
768
+ # Calculate ownership (CPT uses cpt_own_map, others use own_map)
769
+ st.session_state['portfolio']['Own'] = st.session_state['portfolio'].apply(
770
+ lambda row: map_dict['cpt_own_map'].get(row.iloc[0], 0) +
771
+ sum(map_dict['own_map'].get(player, 0) for player in row.iloc[1:]),
772
+ axis=1
773
+ )
774
+ with col3:
775
+ with st.form(key='filter_form'):
776
+ max_dupes = st.number_input("Max acceptable dupes?", value=1000, min_value=1, step=1)
777
+ min_salary = st.number_input("Min acceptable salary?", value=1000, min_value=1000, step=100)
778
+ max_salary = st.number_input("Max acceptable salary?", value=60000, min_value=1000, step=100)
779
+ max_finish_percentile = st.number_input("Max acceptable finish percentile?", value=.50, min_value=0.005, step=.001)
780
+ player_names = set()
781
+ for col in st.session_state['portfolio'].columns:
782
+ if col not in excluded_cols:
783
+ player_names.update(st.session_state['portfolio'][col].unique())
784
+ player_lock = st.multiselect("Lock players?", options=sorted(list(player_names)), default=[])
785
+ player_remove = st.multiselect("Remove players?", options=sorted(list(player_names)), default=[])
786
+ if stack_dict is not None:
787
+ stack_toggle = st.selectbox("Include specific stacks?", options=['All Stacks', 'Specific Stacks'], index=0)
788
+ stack_selections = st.multiselect("If Specific Stacks, Which to include?", options=sorted(list(set(stack_dict.values()))), default=[])
789
+ stack_remove = st.multiselect("If Specific Stacks, Which to remove?", options=sorted(list(set(stack_dict.values()))), default=[])
790
+
791
+ submitted = st.form_submit_button("Submit")
792
+
793
+ with col2:
794
+ st.session_state['portfolio'] = predict_dupes(st.session_state['portfolio'], map_dict, site_var, type_var, Contest_Size, strength_var)
795
+ st.session_state['portfolio'] = st.session_state['portfolio'][st.session_state['portfolio']['Dupes'] <= max_dupes]
796
+ st.session_state['portfolio'] = st.session_state['portfolio'][st.session_state['portfolio']['salary'] >= min_salary]
797
+ st.session_state['portfolio'] = st.session_state['portfolio'][st.session_state['portfolio']['salary'] <= max_salary]
798
+ st.session_state['portfolio'] = st.session_state['portfolio'][st.session_state['portfolio']['Finish_percentile'] <= max_finish_percentile]
799
+ if stack_dict is not None:
800
+ if stack_toggle == 'All Stacks':
801
+ st.session_state['portfolio'] = st.session_state['portfolio']
802
+ st.session_state['portfolio'] = st.session_state['portfolio'][~st.session_state['portfolio']['Stack'].isin(stack_remove)]
803
+ else:
804
+ st.session_state['portfolio'] = st.session_state['portfolio'][st.session_state['portfolio']['Stack'].isin(stack_selections)]
805
+ st.session_state['portfolio'] = st.session_state['portfolio'][~st.session_state['portfolio']['Stack'].isin(stack_remove)]
806
+ if player_remove:
807
+ # Create mask for lineups that contain any of the removed players
808
+ player_columns = [col for col in st.session_state['portfolio'].columns if col not in excluded_cols]
809
+ remove_mask = st.session_state['portfolio'][player_columns].apply(
810
+ lambda row: not any(player in list(row) for player in player_remove), axis=1
811
+ )
812
+ st.session_state['portfolio'] = st.session_state['portfolio'][remove_mask]
813
+
814
+ if player_lock:
815
+ # Create mask for lineups that contain all locked players
816
+ player_columns = [col for col in st.session_state['portfolio'].columns if col not in excluded_cols]
817
+
818
+ lock_mask = st.session_state['portfolio'][player_columns].apply(
819
+ lambda row: all(player in list(row) for player in player_lock), axis=1
820
+ )
821
+ st.session_state['portfolio'] = st.session_state['portfolio'][lock_mask]
822
+ export_file = st.session_state['portfolio'].copy()
823
+ st.session_state['portfolio'] = st.session_state['portfolio'].sort_values(by='median', ascending=False)
824
+ if csv_file is not None:
825
+ player_columns = [col for col in st.session_state['portfolio'].columns if col not in excluded_cols]
826
+ for col in player_columns:
827
+ export_file[col] = export_file[col].map(st.session_state['export_dict'])
828
+ with st.expander("Download options"):
829
+ if stack_dict is not None:
830
+ with st.form(key='stack_form'):
831
+ st.subheader("Stack Count Adjustments")
832
+ st.info("This allows you to fine tune the stacks that you wish to export. If you want to make sure you don't export any of a specific stack you can 0 it out.")
833
+ # Create a container for stack value inputs
834
+ sort_container = st.container()
835
+ with sort_container:
836
+ sort_var = st.selectbox("Sort export portfolio by:", options=['median', 'Lineup Edge', 'Own'])
837
+
838
+ # Get unique stack values
839
+ unique_stacks = sorted(list(set(stack_dict.values())))
840
+
841
+ # Create a dictionary to store stack multipliers
842
+ if 'stack_multipliers' not in st.session_state:
843
+ st.session_state.stack_multipliers = {stack: 0.0 for stack in unique_stacks}
844
+
845
+ # Create columns for the stack inputs
846
+ num_cols = 6 # Number of columns to display
847
+ for i in range(0, len(unique_stacks), num_cols):
848
+ cols = st.columns(num_cols)
849
+ for j, stack in enumerate(unique_stacks[i:i+num_cols]):
850
+ with cols[j]:
851
+ # Create a unique key for each number input
852
+ key = f"stack_count_{stack}"
853
+ # Get the current count of this stack in the portfolio
854
+ current_stack_count = len(st.session_state['portfolio'][st.session_state['portfolio']['Stack'] == stack])
855
+ # Create number input with current value and max value based on actual count
856
+ st.session_state.stack_multipliers[stack] = st.number_input(
857
+ f"{stack} count",
858
+ min_value=0.0,
859
+ max_value=float(current_stack_count),
860
+ value=float(current_stack_count),
861
+ step=1.0,
862
+ key=key
863
+ )
864
+
865
+ # Create a copy of the portfolio
866
+ portfolio_copy = st.session_state['portfolio'].copy()
867
+
868
+ # Create a list to store selected rows
869
+ selected_rows = []
870
+
871
+ # For each stack, select the top N rows based on the count value
872
+ for stack in unique_stacks:
873
+ if stack in st.session_state.stack_multipliers:
874
+ count = int(st.session_state.stack_multipliers[stack])
875
+ # Get rows for this stack
876
+ stack_rows = portfolio_copy[portfolio_copy['Stack'] == stack]
877
+ # Sort by median and take top N rows
878
+ top_rows = stack_rows.nlargest(count, sort_var)
879
+ selected_rows.append(top_rows)
880
+
881
+ # Combine all selected rows
882
+ portfolio_copy = pd.concat(selected_rows)
883
+
884
+ # Update export_file with filtered data
885
+ export_file = portfolio_copy.copy()
886
+
887
+ submitted = st.form_submit_button("Submit")
888
+ if submitted:
889
+ st.write('Export portfolio updated!')
890
+
891
+ st.download_button(label="Download Portfolio", data=export_file.to_csv(index=False), file_name="portfolio.csv", mime="text/csv")
892
+ # Display the paginated dataframe first
893
+ st.dataframe(
894
+ st.session_state['portfolio'].style
895
+ .background_gradient(axis=0)
896
+ .background_gradient(cmap='RdYlGn')
897
+ .background_gradient(cmap='RdYlGn_r', subset=['Finish_percentile', 'Own', 'Dupes'])
898
+ .format(freq_format, precision=2),
899
+ height=1000,
900
+ use_container_width=True
901
+ )
902
+
903
+ # Add pagination controls below the dataframe
904
+ total_rows = len(st.session_state['portfolio'])
905
+ rows_per_page = 500
906
+ total_pages = (total_rows + rows_per_page - 1) // rows_per_page # Ceiling division
907
+
908
+ # Initialize page number in session state if not exists
909
+ if 'current_page' not in st.session_state:
910
+ st.session_state.current_page = 1
911
+
912
+ # Display current page range info and pagination control in a single line
913
+ st.write(
914
+ f"Showing rows {(st.session_state.current_page - 1) * rows_per_page + 1} "
915
+ f"to {min(st.session_state.current_page * rows_per_page, total_rows)} of {total_rows}"
916
+ )
917
+
918
+ # Add page number input
919
+ st.session_state.current_page = st.number_input(
920
+ f"Page (1-{total_pages})",
921
+ min_value=1,
922
+ max_value=total_pages,
923
+ value=st.session_state.current_page
924
+ )
925
+
926
+ # Calculate start and end indices for current page
927
+ start_idx = (st.session_state.current_page - 1) * rows_per_page
928
+ end_idx = min(start_idx + rows_per_page, total_rows)
929
+
930
+ # Get the subset of data for the current page
931
+ current_page_data = st.session_state['portfolio'].iloc[start_idx:end_idx]
932
+
933
+ # Create player summary dataframe
934
+ player_stats = []
935
+ player_columns = [col for col in st.session_state['portfolio'].columns if col not in excluded_cols]
936
+
937
+ if type_var == 'Showdown':
938
+ # Handle Captain positions
939
+ for player in player_names:
940
+ # Create mask for lineups where this player is Captain (first column)
941
+ cpt_mask = st.session_state['portfolio'][player_columns[0]] == player
942
+
943
+ if cpt_mask.any():
944
+ player_stats.append({
945
+ 'Player': f"{player} (CPT)",
946
+ 'Lineup Count': cpt_mask.sum(),
947
+ 'Avg Median': st.session_state['portfolio'][cpt_mask]['median'].mean(),
948
+ 'Avg Own': st.session_state['portfolio'][cpt_mask]['Own'].mean(),
949
+ 'Avg Dupes': st.session_state['portfolio'][cpt_mask]['Dupes'].mean(),
950
+ 'Avg Finish %': st.session_state['portfolio'][cpt_mask]['Finish_percentile'].mean(),
951
+ 'Avg Lineup Edge': st.session_state['portfolio'][cpt_mask]['Lineup Edge'].mean(),
952
+ })
953
+
954
+ # Create mask for lineups where this player is FLEX (other columns)
955
+ flex_mask = st.session_state['portfolio'][player_columns[1:]].apply(
956
+ lambda row: player in list(row), axis=1
957
+ )
958
+
959
+ if flex_mask.any():
960
+ player_stats.append({
961
+ 'Player': f"{player} (FLEX)",
962
+ 'Lineup Count': flex_mask.sum(),
963
+ 'Avg Median': st.session_state['portfolio'][flex_mask]['median'].mean(),
964
+ 'Avg Own': st.session_state['portfolio'][flex_mask]['Own'].mean(),
965
+ 'Avg Dupes': st.session_state['portfolio'][flex_mask]['Dupes'].mean(),
966
+ 'Avg Finish %': st.session_state['portfolio'][flex_mask]['Finish_percentile'].mean(),
967
+ 'Avg Lineup Edge': st.session_state['portfolio'][flex_mask]['Lineup Edge'].mean(),
968
+ })
969
+ else:
970
+ # Original Classic format processing
971
+ for player in player_names:
972
+ player_mask = st.session_state['portfolio'][player_columns].apply(
973
+ lambda row: player in list(row), axis=1
974
+ )
975
+
976
+ if player_mask.any():
977
+ player_stats.append({
978
+ 'Player': player,
979
+ 'Lineup Count': player_mask.sum(),
980
+ 'Avg Median': st.session_state['portfolio'][player_mask]['median'].mean(),
981
+ 'Avg Own': st.session_state['portfolio'][player_mask]['Own'].mean(),
982
+ 'Avg Dupes': st.session_state['portfolio'][player_mask]['Dupes'].mean(),
983
+ 'Avg Finish %': st.session_state['portfolio'][player_mask]['Finish_percentile'].mean(),
984
+ 'Avg Lineup Edge': st.session_state['portfolio'][player_mask]['Lineup Edge'].mean(),
985
+ })
986
+
987
+ player_summary = pd.DataFrame(player_stats)
988
+ player_summary = player_summary.sort_values('Lineup Count', ascending=False)
989
+
990
+ st.subheader("Player Summary")
991
+ st.dataframe(
992
+ player_summary.style
993
+ .background_gradient(axis=0).background_gradient(cmap='RdYlGn').background_gradient(cmap='RdYlGn_r', subset=['Avg Finish %', 'Avg Own', 'Avg Dupes'])
994
+ .format({
995
+ 'Avg Median': '{:.2f}',
996
+ 'Avg Own': '{:.2f}',
997
+ 'Avg Dupes': '{:.2f}',
998
+ 'Avg Finish %': '{:.2%}',
999
+ 'Avg Lineup Edge': '{:.2%}'
1000
+ }),
1001
+ height=400,
1002
+ use_container_width=True
1003
+ )
app.yaml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ runtime: python
2
+ env: flex
3
+
4
+ runtime_config:
5
+ python_version: 3
6
+
7
+ entrypoint: streamlit run streamlit-app.py --server.port $PORT
8
+
9
+ automatic_scaling:
10
+ max_num_instances: 200
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ gspread
3
+ openpyxl
4
+ matplotlib
5
+ fuzzywuzzy
6
+ pulp
7
+ docker
8
+ plotly
9
+ scipy
10
+ pymongo