Multichem commited on
Commit
858bb40
·
1 Parent(s): 23cfc60

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +236 -420
app.py CHANGED
@@ -9,6 +9,8 @@ import numpy as np
9
  import pandas as pd
10
  import streamlit as st
11
  import gspread
 
 
12
 
13
  @st.cache_resource
14
  def init_conn():
@@ -28,22 +30,17 @@ def init_conn():
28
  "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/gspread-connection%40sheets-api-connect-378620.iam.gserviceaccount.com"
29
  }
30
 
31
- gc = gspread.service_account_from_dict(credentials)
32
- return gc
33
-
34
- gc = init_conn()
35
-
36
- game_format = {'Win Percentage': '{:.2%}','First Inning Lead Percentage': '{:.2%}',
37
- 'Fifth Inning Lead Percentage': '{:.2%}', '8+ runs': '{:.2%}', 'DK LevX': '{:.2%}', 'FD LevX': '{:.2%}'}
38
 
39
- player_roo_format = {'Top_finish': '{:.2%}','Top_5_finish': '{:.2%}', 'Top_10_finish': '{:.2%}', '20+%': '{:.2%}', '2x%': '{:.2%}', '3x%': '{:.2%}',
40
- '4x%': '{:.2%}','GPP%': '{:.2%}'}
41
 
42
  freq_format = {'Proj Own': '{:.2%}', 'Exposure': '{:.2%}', 'Edge': '{:.2%}'}
43
 
44
  @st.cache_resource(ttl=600)
45
  def load_dk_player_projections():
46
- sh = gc.open_by_url('https://docs.google.com/spreadsheets/d/1I_1Ve3F4tftgfLQQoRKOJ351XfEG48s36OxXUKxmgS8/edit#gid=1391856348')
47
  worksheet = sh.worksheet('SD_Projections')
48
  load_display = pd.DataFrame(worksheet.get_all_records())
49
  load_display.rename(columns={"PPR": "Median", "name": "Player"}, inplace = True)
@@ -51,13 +48,12 @@ def load_dk_player_projections():
51
  load_display['Ceiling'] = load_display['Median'] + (load_display['Median'] * .75)
52
  load_display.replace('', np.nan, inplace=True)
53
  raw_display = load_display.dropna(subset=['Median'])
54
- del load_display
55
 
56
  return raw_display
57
 
58
  @st.cache_resource(ttl=600)
59
  def load_fd_player_projections():
60
- sh = gc.open_by_url('https://docs.google.com/spreadsheets/d/1I_1Ve3F4tftgfLQQoRKOJ351XfEG48s36OxXUKxmgS8/edit#gid=1391856348')
61
  worksheet = sh.worksheet('FD_SD_Projections')
62
  load_display = pd.DataFrame(worksheet.get_all_records())
63
  load_display.rename(columns={"Half_PPR": "Median", "name": "Player"}, inplace = True)
@@ -65,13 +61,12 @@ def load_fd_player_projections():
65
  load_display['Ceiling'] = load_display['Median'] + (load_display['Median'] * .75)
66
  load_display.replace('', np.nan, inplace=True)
67
  raw_display = load_display.dropna(subset=['Median'])
68
- del load_display
69
 
70
  return raw_display
71
 
72
  @st.cache_resource(ttl=600)
73
  def load_dk_player_projections_2():
74
- sh = gc.open_by_url('https://docs.google.com/spreadsheets/d/1I_1Ve3F4tftgfLQQoRKOJ351XfEG48s36OxXUKxmgS8/edit#gid=1391856348')
75
  worksheet = sh.worksheet('SD_Projections_2')
76
  load_display = pd.DataFrame(worksheet.get_all_records())
77
  load_display.rename(columns={"PPR": "Median", "name": "Player"}, inplace = True)
@@ -79,13 +74,12 @@ def load_dk_player_projections_2():
79
  load_display['Ceiling'] = load_display['Median'] + (load_display['Median'] * .75)
80
  load_display.replace('', np.nan, inplace=True)
81
  raw_display = load_display.dropna(subset=['Median'])
82
- del load_display
83
 
84
  return raw_display
85
 
86
  @st.cache_resource(ttl=600)
87
  def load_fd_player_projections_2():
88
- sh = gc.open_by_url('https://docs.google.com/spreadsheets/d/1I_1Ve3F4tftgfLQQoRKOJ351XfEG48s36OxXUKxmgS8/edit#gid=1391856348')
89
  worksheet = sh.worksheet('FD_SD_Projections_2')
90
  load_display = pd.DataFrame(worksheet.get_all_records())
91
  load_display.rename(columns={"Half_PPR": "Median", "name": "Player"}, inplace = True)
@@ -93,60 +87,106 @@ def load_fd_player_projections_2():
93
  load_display['Ceiling'] = load_display['Median'] + (load_display['Median'] * .75)
94
  load_display.replace('', np.nan, inplace=True)
95
  raw_display = load_display.dropna(subset=['Median'])
96
- del load_display
97
 
98
  return raw_display
99
 
100
- @st.cache_data
101
- def convert_df_to_csv(df):
102
- return df.to_csv().encode('utf-8')
 
103
 
104
- def run_seed_frame(seed_depth1, Strength_var, strength_grow, Teams_used, Total_Runs):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  RunsVar = 1
106
  seed_depth_def = seed_depth1
107
  Strength_var_def = Strength_var
108
  strength_grow_def = strength_grow
109
  Teams_used_def = Teams_used
110
  Total_Runs_def = Total_Runs
 
 
 
111
  while RunsVar <= seed_depth_def:
112
  if RunsVar <= 3:
113
  FieldStrength = Strength_var_def
114
- RandomPortfolio, maps_dict = get_correlated_portfolio_for_sim(Total_Runs_def * .1)
115
- FinalPortfolio = RandomPortfolio
116
- FinalPortfolio2, maps_dict2 = get_uncorrelated_portfolio_for_sim(Total_Runs_def * .1)
117
- FinalPortfolio = pd.concat([FinalPortfolio, FinalPortfolio2], axis=0)
118
  maps_dict.update(maps_dict2)
119
- del FinalPortfolio2
120
- del maps_dict2
121
  elif RunsVar > 3 and RunsVar <= 4:
122
  FieldStrength += (strength_grow_def + ((30 - len(Teams_used_def)) * .001))
123
- FinalPortfolio3, maps_dict3 = get_correlated_portfolio_for_sim(Total_Runs_def * .1)
124
- FinalPortfolio4, maps_dict4 = get_uncorrelated_portfolio_for_sim(Total_Runs_def * .1)
125
- FinalPortfolio = pd.concat([FinalPortfolio, FinalPortfolio3], axis=0)
126
- FinalPortfolio = pd.concat([FinalPortfolio, FinalPortfolio4], axis=0)
127
- FinalPortfolio = FinalPortfolio.drop_duplicates(subset = ['Projection', 'Own'],keep = 'last').reset_index(drop = True)
128
  maps_dict.update(maps_dict3)
129
  maps_dict.update(maps_dict4)
130
- del FinalPortfolio3
131
- del maps_dict3
132
- del FinalPortfolio4
133
- del maps_dict4
134
  elif RunsVar > 4:
135
  FieldStrength = 1
136
- FinalPortfolio3, maps_dict3 = get_correlated_portfolio_for_sim(Total_Runs_def * .1)
137
- FinalPortfolio4, maps_dict4 = get_uncorrelated_portfolio_for_sim(Total_Runs_def * .1)
138
- FinalPortfolio = pd.concat([FinalPortfolio, FinalPortfolio3], axis=0)
139
- FinalPortfolio = pd.concat([FinalPortfolio, FinalPortfolio4], axis=0)
140
- FinalPortfolio = FinalPortfolio.drop_duplicates(subset = ['Projection', 'Own'],keep = 'last').reset_index(drop = True)
141
- maps_dict.update(maps_dict3)
142
- maps_dict.update(maps_dict4)
143
- del FinalPortfolio3
144
- del maps_dict3
145
- del FinalPortfolio4
146
- del maps_dict4
147
  RunsVar += 1
148
-
149
- return FinalPortfolio, maps_dict
150
 
151
  def create_overall_dfs(pos_players, table_name, dict_name, pos):
152
  pos_players = pos_players.sort_values(by='Value', ascending=False)
@@ -155,9 +195,6 @@ def create_overall_dfs(pos_players, table_name, dict_name, pos):
155
  overall_table_name = overall_table_name.assign(Var = range(0,len(overall_table_name)))
156
  overall_dict_name = pd.Series(overall_table_name.Player.values, index=overall_table_name.Var).to_dict()
157
 
158
- del pos_players
159
- del table_name_raw
160
-
161
  return overall_table_name, overall_dict_name
162
 
163
 
@@ -176,7 +213,7 @@ def get_overall_merged_df():
176
 
177
  return df_out, ref_dict
178
 
179
- def create_random_portfolio(Total_Sample_Size):
180
 
181
  O_merge, full_pos_player_dict = get_overall_merged_df()
182
  Overall_Merge = O_merge[['Var', 'Player', 'Team', 'Salary', 'Median', 'Own']].copy()
@@ -201,11 +238,11 @@ def create_random_portfolio(Total_Sample_Size):
201
 
202
  return RandomPortfolio, maps_dict, ranges_dict, full_pos_player_dict
203
 
204
- def get_correlated_portfolio_for_sim(Total_Sample_Size):
205
 
206
- sizesplit = round(Total_Sample_Size * .50)
207
 
208
- RandomPortfolio, maps_dict, ranges_dict, full_pos_player_dict = create_random_portfolio(sizesplit)
209
 
210
  RandomPortfolio['CPT'] = pd.Series(list(RandomPortfolio['CPT'].map(full_pos_player_dict['pos_dicts'][0])), dtype="string[pyarrow]")
211
  RandomPortfolio['FLEX1'] = pd.Series(list(RandomPortfolio['FLEX1'].map(full_pos_player_dict['pos_dicts'][0])), dtype="string[pyarrow]")
@@ -218,10 +255,6 @@ def get_correlated_portfolio_for_sim(Total_Sample_Size):
218
  RandomPortfolio = RandomPortfolio[RandomPortfolio['plyr_count'] == 7].drop(columns=['plyr_list','plyr_count']).\
219
  reset_index(drop=True)
220
 
221
- del sizesplit
222
- del full_pos_player_dict
223
- del ranges_dict
224
-
225
  RandomPortfolio['CPTs'] = RandomPortfolio['CPT'].map(maps_dict['Salary_map']).astype(np.int32) * 1.5
226
  RandomPortfolio['FLEX1s'] = RandomPortfolio['FLEX1'].map(maps_dict['Salary_map']).astype(np.int32)
227
  RandomPortfolio['FLEX2s'] = RandomPortfolio['FLEX2'].map(maps_dict['Salary_map']).astype(np.int32)
@@ -249,7 +282,6 @@ def get_correlated_portfolio_for_sim(Total_Sample_Size):
249
  portHeaderList.append('Own')
250
 
251
  RandomPortArray = RandomPortfolio.to_numpy()
252
- del RandomPortfolio
253
 
254
  RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,7:13].astype(int))]
255
  RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,13:19].astype(np.double))]
@@ -258,9 +290,6 @@ def get_correlated_portfolio_for_sim(Total_Sample_Size):
258
  RandomPortArrayOut = np.delete(RandomPortArray, np.s_[7:25], axis=1)
259
  RandomPortfolioDF = pd.DataFrame(RandomPortArrayOut, columns = ['CPT', 'FLEX1', 'FLEX2', 'FLEX3', 'FLEX4', 'FLEX5', 'User/Field', 'Salary', 'Projection', 'Own'])
260
  RandomPortfolioDF = RandomPortfolioDF.sort_values(by=Sim_function, ascending=False)
261
- del RandomPortArray
262
- del RandomPortArrayOut
263
- # st.table(RandomPortfolioDF.head(50))
264
 
265
  if insert_port == 1:
266
  CleanPortfolio['Salary'] = sum([CleanPortfolio['CPT'].map(up_dict['Salary_map']) * 1.5,
@@ -300,11 +329,11 @@ def get_correlated_portfolio_for_sim(Total_Sample_Size):
300
 
301
  return RandomPortfolio, maps_dict
302
 
303
- def get_uncorrelated_portfolio_for_sim(Total_Sample_Size):
304
 
305
- sizesplit = round(Total_Sample_Size * .50)
306
 
307
- RandomPortfolio, maps_dict, ranges_dict, full_pos_player_dict = create_random_portfolio(sizesplit)
308
 
309
  RandomPortfolio['CPT'] = pd.Series(list(RandomPortfolio['CPT'].map(full_pos_player_dict['pos_dicts'][0])), dtype="string[pyarrow]")
310
  RandomPortfolio['FLEX1'] = pd.Series(list(RandomPortfolio['FLEX1'].map(full_pos_player_dict['pos_dicts'][0])), dtype="string[pyarrow]")
@@ -317,10 +346,6 @@ def get_uncorrelated_portfolio_for_sim(Total_Sample_Size):
317
  RandomPortfolio = RandomPortfolio[RandomPortfolio['plyr_count'] == 7].drop(columns=['plyr_list','plyr_count']).\
318
  reset_index(drop=True)
319
 
320
- del sizesplit
321
- del full_pos_player_dict
322
- del ranges_dict
323
-
324
  RandomPortfolio['CPTs'] = RandomPortfolio['CPT'].map(maps_dict['Salary_map']).astype(np.int32) * 1.5
325
  RandomPortfolio['FLEX1s'] = RandomPortfolio['FLEX1'].map(maps_dict['Salary_map']).astype(np.int32)
326
  RandomPortfolio['FLEX2s'] = RandomPortfolio['FLEX2'].map(maps_dict['Salary_map']).astype(np.int32)
@@ -348,7 +373,6 @@ def get_uncorrelated_portfolio_for_sim(Total_Sample_Size):
348
  portHeaderList.append('Own')
349
 
350
  RandomPortArray = RandomPortfolio.to_numpy()
351
- del RandomPortfolio
352
 
353
  RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,7:13].astype(int))]
354
  RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,13:19].astype(np.double))]
@@ -357,9 +381,6 @@ def get_uncorrelated_portfolio_for_sim(Total_Sample_Size):
357
  RandomPortArrayOut = np.delete(RandomPortArray, np.s_[7:25], axis=1)
358
  RandomPortfolioDF = pd.DataFrame(RandomPortArrayOut, columns = ['CPT', 'FLEX1', 'FLEX2', 'FLEX3', 'FLEX4', 'FLEX5', 'User/Field', 'Salary', 'Projection', 'Own'])
359
  RandomPortfolioDF = RandomPortfolioDF.sort_values(by=Sim_function, ascending=False)
360
- del RandomPortArray
361
- del RandomPortArrayOut
362
- # st.table(RandomPortfolioDF.head(50))
363
 
364
  if insert_port == 1:
365
  CleanPortfolio['Salary'] = sum([CleanPortfolio['CPT'].map(up_dict['Salary_map']) * 1.5,
@@ -399,14 +420,6 @@ def get_uncorrelated_portfolio_for_sim(Total_Sample_Size):
399
 
400
  return RandomPortfolio, maps_dict
401
 
402
- dk_roo_raw = load_dk_player_projections()
403
- dk_roo_raw_2 = load_dk_player_projections_2()
404
- fd_roo_raw = load_fd_player_projections()
405
- fd_roo_raw_2 = load_fd_player_projections_2()
406
-
407
- static_exposure = pd.DataFrame(columns=['Player', 'count'])
408
- overall_exposure = pd.DataFrame(columns=['Player', 'count'])
409
-
410
  tab1, tab2 = st.tabs(['Uploads', 'Contest Sim'])
411
 
412
  with tab1:
@@ -470,8 +483,6 @@ with tab1:
470
  split_portfolio['FLEX4'].map(player_salary_dict),
471
  split_portfolio['FLEX5'].map(player_salary_dict)])
472
 
473
- del player_salary_dict
474
-
475
  split_portfolio['Projection'] = sum([split_portfolio['CPT'].map(player_proj_dict) * 1.5,
476
  split_portfolio['FLEX1'].map(player_proj_dict),
477
  split_portfolio['FLEX2'].map(player_proj_dict),
@@ -479,8 +490,6 @@ with tab1:
479
  split_portfolio['FLEX4'].map(player_proj_dict),
480
  split_portfolio['FLEX5'].map(player_proj_dict)])
481
 
482
- del player_proj_dict
483
-
484
  split_portfolio['Ownership'] = sum([split_portfolio['CPT'].map(player_own_dict) / 4,
485
  split_portfolio['FLEX1'].map(player_own_dict),
486
  split_portfolio['FLEX2'].map(player_own_dict),
@@ -488,21 +497,6 @@ with tab1:
488
  split_portfolio['FLEX4'].map(player_own_dict),
489
  split_portfolio['FLEX5'].map(player_own_dict)])
490
 
491
- del player_own_dict
492
-
493
- split_portfolio['CPT_team'] = split_portfolio['CPT'].map(player_team_dict)
494
- split_portfolio['FLEX1_team'] = split_portfolio['FLEX1'].map(player_team_dict)
495
- split_portfolio['FLEX2_team'] = split_portfolio['FLEX2'].map(player_team_dict)
496
- split_portfolio['FLEX3_team'] = split_portfolio['FLEX3'].map(player_team_dict)
497
- split_portfolio['FLEX4_team'] = split_portfolio['FLEX4'].map(player_team_dict)
498
- split_portfolio['FLEX5_team'] = split_portfolio['FLEX5'].map(player_team_dict)
499
-
500
- split_portfolio = split_portfolio[['CPT', 'FLEX1', 'FLEX2', 'FLEX3', 'FLEX4', 'FLEX5', 'Salary', 'Projection', 'Ownership', 'CPT_team',
501
- 'FLEX1_team', 'FLEX2_team', 'FLEX3_team', 'FLEX4_team', 'FLEX5_team']]
502
-
503
- split_portfolio['Main_Stack'] = 0
504
- split_portfolio['Main_Stack_Size'] = 0
505
- split_portfolio['Main_Stack_Size'] = 0
506
  except:
507
  portfolio_dataframe.columns=["CPT", "FLEX1", "FLEX2", "FLEX3", "FLEX4", "FLEX5"]
508
  split_portfolio = portfolio_dataframe
@@ -534,8 +528,6 @@ with tab1:
534
  split_portfolio['FLEX4'].map(player_salary_dict),
535
  split_portfolio['FLEX5'].map(player_salary_dict)])
536
 
537
- del player_salary_dict
538
-
539
  split_portfolio['Projection'] = sum([split_portfolio['CPT'].map(player_proj_dict) * 1.5,
540
  split_portfolio['FLEX1'].map(player_proj_dict),
541
  split_portfolio['FLEX2'].map(player_proj_dict),
@@ -543,8 +535,6 @@ with tab1:
543
  split_portfolio['FLEX4'].map(player_proj_dict),
544
  split_portfolio['FLEX5'].map(player_proj_dict)])
545
 
546
- del player_proj_dict
547
-
548
  split_portfolio['Ownership'] = sum([split_portfolio['CPT'].map(player_own_dict) / 4,
549
  split_portfolio['FLEX1'].map(player_own_dict),
550
  split_portfolio['FLEX2'].map(player_own_dict),
@@ -552,21 +542,6 @@ with tab1:
552
  split_portfolio['FLEX4'].map(player_own_dict),
553
  split_portfolio['FLEX5'].map(player_own_dict)])
554
 
555
- del player_own_dict
556
-
557
- split_portfolio['CPT_team'] = split_portfolio['CPT'].map(player_team_dict)
558
- split_portfolio['FLEX1_team'] = split_portfolio['FLEX1'].map(player_team_dict)
559
- split_portfolio['FLEX2_team'] = split_portfolio['FLEX2'].map(player_team_dict)
560
- split_portfolio['FLEX3_team'] = split_portfolio['FLEX3'].map(player_team_dict)
561
- split_portfolio['FLEX4_team'] = split_portfolio['FLEX4'].map(player_team_dict)
562
- split_portfolio['FLEX5_team'] = split_portfolio['FLEX5'].map(player_team_dict)
563
-
564
- split_portfolio = split_portfolio[['CPT', 'FLEX1', 'FLEX2', 'FLEX3', 'FLEX4', 'FLEX5', 'Salary', 'Projection', 'Ownership', 'CPT_team',
565
- 'FLEX1_team', 'FLEX2_team', 'FLEX3_team', 'FLEX4_team', 'FLEX5_team']]
566
-
567
- split_portfolio['Main_Stack'] = 0
568
- split_portfolio['Main_Stack_Size'] = 0
569
- split_portfolio['Main_Stack_Size'] = 0
570
  except:
571
  split_portfolio = portfolio_dataframe
572
 
@@ -591,8 +566,6 @@ with tab1:
591
  split_portfolio['FLEX4'].map(player_salary_dict),
592
  split_portfolio['FLEX5'].map(player_salary_dict)])
593
 
594
- del player_salary_dict
595
-
596
  split_portfolio['Projection'] = sum([split_portfolio['CPT'].map(player_proj_dict) * 1.5,
597
  split_portfolio['FLEX1'].map(player_proj_dict),
598
  split_portfolio['FLEX2'].map(player_proj_dict),
@@ -600,8 +573,6 @@ with tab1:
600
  split_portfolio['FLEX4'].map(player_proj_dict),
601
  split_portfolio['FLEX5'].map(player_proj_dict)])
602
 
603
- del player_proj_dict
604
-
605
  split_portfolio['Ownership'] = sum([split_portfolio['CPT'].map(player_own_dict) / 4,
606
  split_portfolio['FLEX1'].map(player_own_dict),
607
  split_portfolio['FLEX2'].map(player_own_dict),
@@ -609,96 +580,16 @@ with tab1:
609
  split_portfolio['FLEX4'].map(player_own_dict),
610
  split_portfolio['FLEX5'].map(player_own_dict)])
611
 
612
- del player_own_dict
613
-
614
- split_portfolio['CPT_team'] = split_portfolio['CPT'].map(player_team_dict)
615
- split_portfolio['FLEX1_team'] = split_portfolio['FLEX1'].map(player_team_dict)
616
- split_portfolio['FLEX2_team'] = split_portfolio['FLEX2'].map(player_team_dict)
617
- split_portfolio['FLEX3_team'] = split_portfolio['FLEX3'].map(player_team_dict)
618
- split_portfolio['FLEX4_team'] = split_portfolio['FLEX4'].map(player_team_dict)
619
- split_portfolio['FLEX5_team'] = split_portfolio['FLEX5'].map(player_team_dict)
620
-
621
- split_portfolio = split_portfolio[['CPT', 'FLEX1', 'FLEX2', 'FLEX3', 'FLEX4', 'FLEX5', 'Salary', 'Projection', 'Ownership', 'CPT_team',
622
- 'FLEX1_team', 'FLEX2_team', 'FLEX3_team', 'FLEX4_team', 'FLEX5_team']]
623
-
624
- split_portfolio['Main_Stack'] = 0
625
- split_portfolio['Main_Stack_Size'] = 0
626
- split_portfolio['Main_Stack_Size'] = 0
627
 
628
- for player_cols in split_portfolio.iloc[:, 0:6]:
629
- static_col_raw = split_portfolio[player_cols].value_counts()
630
- static_col = static_col_raw.to_frame()
631
- static_col.reset_index(inplace=True)
632
- static_col.columns = ['Player', 'count']
633
- static_exposure = pd.concat([static_exposure, static_col], ignore_index=True)
634
- static_exposure['Exposure'] = static_exposure['count'] / len(split_portfolio)
635
- static_exposure = static_exposure[['Player', 'Exposure']]
636
 
637
- del static_col_raw
638
- del static_col
639
- with st.container():
640
- col1, col2 = st.columns([3, 3])
641
-
642
- if portfolio_file is not None:
643
- with col1:
644
- st.write(len(portfolio_dataframe))
645
- team_split_var1 = st.radio("Are you wanting to isolate any lineups with specific main stacks?", ('Full Portfolio', 'Specific Stacks'))
646
- if team_split_var1 == 'Specific Stacks':
647
- team_var1 = st.multiselect('Which main stacks would you like to include in the Portfolio?', options = split_portfolio['Main_Stack'].unique())
648
- elif team_split_var1 == 'Full Portfolio':
649
- team_var1 = split_portfolio.Main_Stack.values.tolist()
650
- with col2:
651
- player_split_var1 = st.radio("Are you wanting to isolate any lineups with specific players?", ('Full Players', 'Specific Players'))
652
- if player_split_var1 == 'Specific Players':
653
- find_var1 = st.multiselect('Which players must be included in the lineups?', options = static_exposure['Player'].unique())
654
- elif player_split_var1 == 'Full Players':
655
- find_var1 = static_exposure.Player.values.tolist()
656
-
657
- split_portfolio = split_portfolio[split_portfolio['Main_Stack'].isin(team_var1)]
658
- if player_split_var1 == 'Specific Players':
659
- split_portfolio = split_portfolio[np.equal.outer(split_portfolio.to_numpy(copy=False), find_var1).any(axis=1).all(axis=1)]
660
- elif player_split_var1 == 'Full Players':
661
- split_portfolio = split_portfolio
662
-
663
- for player_cols in split_portfolio.iloc[:, 0:6]:
664
- exposure_col_raw = split_portfolio[player_cols].value_counts()
665
- exposure_col = exposure_col_raw.to_frame()
666
- exposure_col.reset_index(inplace=True)
667
- exposure_col.columns = ['Player', 'count']
668
- overall_exposure = pd.concat([overall_exposure, exposure_col], ignore_index=True)
669
- overall_exposure['Exposure'] = overall_exposure['count'] / len(split_portfolio)
670
- overall_exposure = overall_exposure.groupby('Player').sum()
671
- overall_exposure.reset_index(inplace=True)
672
- overall_exposure = overall_exposure[['Player', 'Exposure']]
673
- overall_exposure = overall_exposure.set_index('Player')
674
- overall_exposure = overall_exposure.sort_values(by='Exposure', ascending=False)
675
- overall_exposure['Exposure'] = overall_exposure['Exposure'].astype(float).map(lambda n: '{:.2%}'.format(n))
676
-
677
- with st.container():
678
- col1, col2 = st.columns([1, 6])
679
-
680
- with col1:
681
- if portfolio_file is not None:
682
- st.header('Exposure View')
683
- st.dataframe(overall_exposure)
684
-
685
- with col2:
686
- if portfolio_file is not None:
687
- st.header('Portfolio View')
688
- split_portfolio = split_portfolio.reset_index()
689
- split_portfolio['Lineup'] = split_portfolio['index'] + 1
690
- display_portfolio = split_portfolio[['Lineup', 'CPT', 'FLEX1', 'FLEX2', 'FLEX3', 'FLEX4', 'FLEX5', 'Salary', 'Main_Stack', 'Main_Stack_Size', 'Projection', 'Ownership']]
691
- hold_display = display_portfolio
692
- display_portfolio = display_portfolio.set_index('Lineup')
693
- st.dataframe(display_portfolio.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').background_gradient(cmap='RdYlGn_r', subset=['Ownership']).format(precision=2))
694
- del split_portfolio
695
- del exposure_col_raw
696
- del exposure_col
697
  with tab2:
698
- col1, col2 = st.columns([1, 5])
699
  with col1:
700
  if st.button("Load/Reset Data", key='reset1'):
701
  st.cache_data.clear()
 
 
702
  dk_roo_raw = load_dk_player_projections()
703
  dk_roo_raw_2 = load_dk_player_projections_2()
704
  fd_roo_raw = load_fd_player_projections()
@@ -720,10 +611,7 @@ with tab2:
720
  raw_baselines = dk_roo_raw
721
  elif slate_var1 == 'Paydirt (Secondary)':
722
  raw_baselines = dk_roo_raw_2
723
- del dk_roo_raw
724
- del dk_roo_raw_2
725
- del fd_roo_raw
726
- del fd_roo_raw_2
727
  st.info("If you are uploading a portfolio, note that there is an adjustments to projections and deviation mapping to prevent 'Projection Bias' and create a fair simulation")
728
  insert_port1 = st.selectbox("Are you uploading a portfolio?", ('No', 'Yes'))
729
  if insert_port1 == 'Yes':
@@ -736,103 +624,92 @@ with tab2:
736
  elif contest_var1 == 'Medium':
737
  Contest_Size = 2500
738
  elif contest_var1 == 'Large':
739
- Contest_Size = 10000
740
- linenum_var1 = 1000
741
  strength_var1 = st.selectbox("How sharp is the field in the contest?", ('Not Very', 'Average', 'Very'))
742
  if strength_var1 == 'Not Very':
743
- Strength_var = 1
 
744
  scaling_var = 5
745
  elif strength_var1 == 'Average':
746
- Strength_var = .75
 
747
  scaling_var = 10
748
  elif strength_var1 == 'Very':
749
- Strength_var = .5
 
750
  scaling_var = 15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
751
 
752
  with col2:
753
  with st.container():
754
  if st.button("Simulate Contest", key='sim1'):
755
- try:
756
- del dst_freq
757
- del flex_freq
758
- del te_freq
759
- del wr_freq
760
- del rb_freq
761
- del qb_freq
762
- del player_freq
763
- del Sim_Winner_Export
764
- del Sim_Winner_Frame
765
- except:
766
- pass
767
  with st.container():
768
- st.write('Contest Simulation Starting')
769
- Total_Runs = 1000000
770
- seed_depth1 = 5
771
- Total_Runs = 2500000
772
- if Contest_Size <= 1000:
773
- strength_grow = .01
774
- elif Contest_Size > 1000 and Contest_Size <= 2500:
775
- strength_grow = .025
776
- elif Contest_Size > 2500 and Contest_Size <= 5000:
777
- strength_grow = .05
778
- elif Contest_Size > 5000 and Contest_Size <= 20000:
779
- strength_grow = .075
780
- elif Contest_Size > 20000:
781
- strength_grow = .1
782
-
783
- field_growth = 100 * strength_grow
784
-
785
- Sort_function = 'Median'
786
- if Sort_function == 'Median':
787
- Sim_function = 'Projection'
788
- elif Sort_function == 'Own':
789
- Sim_function = 'Own'
790
 
791
  if slate_var1 == 'User':
792
- OwnFrame = proj_dataframe
793
- if contest_var1 == 'Large':
794
- OwnFrame['Own%'] = np.where((OwnFrame['Position'] == 'QB') & (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean() >= 0), OwnFrame['Own'] * (10 * (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean())/100) + OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean(), OwnFrame['Own'])
795
- OwnFrame['Own%'] = np.where((OwnFrame['Position'] != 'QB') & (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean() >= 0), OwnFrame['Own'] * (5 * (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean())/100) + OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean(), OwnFrame['Own%'])
796
- OwnFrame['Own%'] = np.where(OwnFrame['Own%'] > 75, 75, OwnFrame['Own%'])
797
- OwnFrame['Own'] = OwnFrame['Own%'] * (500 / OwnFrame['Own%'].sum())
798
- if contest_var1 == 'Medium':
799
- OwnFrame['Own%'] = np.where((OwnFrame['Position'] == 'QB') & (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean() >= 0), OwnFrame['Own'] * (6 * (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean())/100) + OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean(), OwnFrame['Own'])
800
- OwnFrame['Own%'] = np.where((OwnFrame['Position'] != 'QB') & (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean() >= 0), OwnFrame['Own'] * (3 * (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean())/100) + OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean(), OwnFrame['Own%'])
801
- OwnFrame['Own%'] = np.where(OwnFrame['Own%'] > 75, 75, OwnFrame['Own%'])
802
- OwnFrame['Own'] = OwnFrame['Own%'] * (500 / OwnFrame['Own%'].sum())
803
- if contest_var1 == 'Small':
804
- OwnFrame['Own%'] = np.where((OwnFrame['Position'] == 'QB') & (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean() >= 0), OwnFrame['Own'] * (3 * (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean())/100) + OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean(), OwnFrame['Own'])
805
- OwnFrame['Own%'] = np.where((OwnFrame['Position'] != 'QB') & (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean() >= 0), OwnFrame['Own'] * (1.5 * (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean())/100) + OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean(), OwnFrame['Own%'])
806
- OwnFrame['Own%'] = np.where(OwnFrame['Own%'] > 75, 75, OwnFrame['Own%'])
807
- OwnFrame['Own'] = OwnFrame['Own%'] * (500 / OwnFrame['Own%'].sum())
808
- Overall_Proj = OwnFrame[['Player', 'Team', 'Position', 'Median', 'Own', 'Salary']]
 
 
809
 
810
- del OwnFrame
 
811
 
812
  elif slate_var1 != 'User':
813
- initial_proj = raw_baselines
814
- drop_frame = initial_proj.drop_duplicates(subset = 'Player',keep = 'first')
815
- OwnFrame = drop_frame[['Player', 'Team', 'Position', 'Median', 'Own', 'Floor', 'Ceiling', 'Salary']]
816
- if contest_var1 == 'Large':
817
- OwnFrame['Own%'] = np.where((OwnFrame['Position'] == 'QB') & (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean() >= 0), OwnFrame['Own'] * (10 * (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean())/100) + OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean(), OwnFrame['Own'])
818
- OwnFrame['Own%'] = np.where((OwnFrame['Position'] != 'QB') & (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean() >= 0), OwnFrame['Own'] * (5 * (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean())/100) + OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean(), OwnFrame['Own%'])
819
- OwnFrame['Own%'] = np.where(OwnFrame['Own%'] > 75, 75, OwnFrame['Own%'])
820
- OwnFrame['Own'] = OwnFrame['Own%'] * (500 / OwnFrame['Own%'].sum())
821
- if contest_var1 == 'Medium':
822
- OwnFrame['Own%'] = np.where((OwnFrame['Position'] == 'QB') & (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean() >= 0), OwnFrame['Own'] * (6 * (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean())/100) + OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean(), OwnFrame['Own'])
823
- OwnFrame['Own%'] = np.where((OwnFrame['Position'] != 'QB') & (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean() >= 0), OwnFrame['Own'] * (3 * (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean())/100) + OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean(), OwnFrame['Own%'])
824
- OwnFrame['Own%'] = np.where(OwnFrame['Own%'] > 75, 75, OwnFrame['Own%'])
825
- OwnFrame['Own'] = OwnFrame['Own%'] * (500 / OwnFrame['Own%'].sum())
826
- if contest_var1 == 'Small':
827
- OwnFrame['Own%'] = np.where((OwnFrame['Position'] == 'QB') & (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean() >= 0), OwnFrame['Own'] * (3 * (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean())/100) + OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean(), OwnFrame['Own'])
828
- OwnFrame['Own%'] = np.where((OwnFrame['Position'] != 'QB') & (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean() >= 0), OwnFrame['Own'] * (1.5 * (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean())/100) + OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean(), OwnFrame['Own%'])
829
- OwnFrame['Own%'] = np.where(OwnFrame['Own%'] > 75, 75, OwnFrame['Own%'])
830
- OwnFrame['Own'] = OwnFrame['Own%'] * (500 / OwnFrame['Own%'].sum())
831
- Overall_Proj = OwnFrame[['Player', 'Team', 'Position', 'Median', 'Own', 'Floor', 'Ceiling', 'Salary']]
832
 
833
- del initial_proj
834
- del drop_frame
835
- del OwnFrame
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
836
 
837
  if insert_port == 1:
838
  UserPortfolio = portfolio_dataframe[['CPT', 'FLEX1', 'FLEX2', 'FLEX3', 'FLEX4', 'FLEX5']]
@@ -856,9 +733,6 @@ with tab2:
856
  Teams_used['team_item'] = Teams_used['index'] + 1
857
  Teams_used = Teams_used.drop(columns=['index'])
858
  Teams_used_dictraw = Teams_used.drop(columns=['team_item'])
859
- Teams_used_dict = Teams_used_dictraw.to_dict()
860
-
861
- del Teams_used_dictraw
862
 
863
  team_list = Teams_used['Team'].to_list()
864
  item_list = Teams_used['team_item'].to_list()
@@ -866,8 +740,6 @@ with tab2:
866
  FieldStrength_raw = Strength_var + ((30 - len(Teams_used)) * .01)
867
  FieldStrength = FieldStrength_raw - (FieldStrength_raw * (20000 / Contest_Size))
868
 
869
- del FieldStrength_raw
870
-
871
  if FieldStrength < 0:
872
  FieldStrength = Strength_var
873
  field_split = Strength_var
@@ -883,8 +755,6 @@ with tab2:
883
  pos_players = flex_raw
884
  pos_players.dropna(subset=['Median']).reset_index(drop=True)
885
  pos_players = pos_players.reset_index(drop=True)
886
-
887
- del flex_raw
888
 
889
  if insert_port == 1:
890
  try:
@@ -916,7 +786,7 @@ with tab2:
916
 
917
  # Merge and update nerf_frame DataFrame
918
  nerf_frame = pd.merge(cleaport_players, Overall_Proj, on='Player', how='left')
919
- nerf_frame[['Median', 'Floor', 'Ceiling', 'STDev']] *= 1
920
 
921
  del Raw_Portfolio
922
  except:
@@ -932,7 +802,7 @@ with tab2:
932
 
933
  # Merge and update nerf_frame DataFrame
934
  nerf_frame = pd.merge(cleaport_players, Overall_Proj, on='Player', how='left')
935
- nerf_frame[['Median', 'Floor', 'Ceiling', 'STDev']] *= 1
936
 
937
  st.table(nerf_frame)
938
 
@@ -973,133 +843,71 @@ with tab2:
973
  'team_check_map':dict(zip(cleaport_players.Player,nerf_frame.Team))
974
  }
975
 
976
- del Overall_Proj
977
- del nerf_frame
978
 
979
- RunsVar = 1
980
- st.write('Seed frame creation')
981
- FinalPortfolio, maps_dict = run_seed_frame(seed_depth1, Strength_var, strength_grow, Teams_used, Total_Runs)
982
 
983
- Sim_size = linenum_var1
984
- SimVar = 1
985
- Sim_Winners = []
986
- fp_array = FinalPortfolio.values
 
987
 
988
- if insert_port == 1:
989
- up_array = CleanPortfolio.values
 
990
 
991
- # Pre-vectorize functions
992
- vec_projection_map = np.vectorize(maps_dict['Projection_map'].__getitem__)
993
- vec_stdev_map = np.vectorize(maps_dict['STDev_map'].__getitem__)
994
 
995
- if insert_port == 1:
996
- vec_up_projection_map = np.vectorize(up_dict['Projection_map'].__getitem__)
997
- vec_up_stdev_map = np.vectorize(up_dict['STDev_map'].__getitem__)
998
- st.write('Simulating contest on frames')
999
- while SimVar <= Sim_size:
1000
-
1001
- if insert_port == 1:
1002
- fp_random = fp_array[np.random.choice(fp_array.shape[0], Contest_Size-len(CleanPortfolio))]
1003
- elif insert_port == 0:
1004
- fp_random = fp_array[np.random.choice(fp_array.shape[0], Contest_Size)]
1005
-
1006
- sample_arrays1 = np.c_[
1007
- fp_random,
1008
- np.sum(np.random.normal(
1009
- loc=vec_projection_map(fp_random[:, :-5]),
1010
- scale=vec_stdev_map(fp_random[:, :-5])),
1011
- axis=1)
1012
- ]
1013
-
1014
- if insert_port == 1:
1015
- sample_arrays2 = np.c_[
1016
- up_array,
1017
- np.sum(np.random.normal(
1018
- loc=vec_up_projection_map(up_array[:, :-5]),
1019
- scale=vec_up_stdev_map(up_array[:, :-5])),
1020
- axis=1)
1021
- ]
1022
- sample_arrays = np.vstack((sample_arrays1, sample_arrays2))
1023
- else:
1024
- sample_arrays = sample_arrays1
1025
-
1026
- final_array = sample_arrays[sample_arrays[:, 10].argsort()[::-1]]
1027
- best_lineup = final_array[final_array[:, -1].argsort(kind='stable')[::-1][:1]]
1028
- Sim_Winners.append(best_lineup)
1029
- SimVar += 1
1030
- st.write('Contest simulation complete')
1031
-
1032
- Sim_Winner_Frame = pd.DataFrame(np.concatenate(Sim_Winners), columns=FinalPortfolio.columns.tolist() + ['Fantasy'])
1033
- Sim_Winner_Frame['GPP_Proj'] = (Sim_Winner_Frame['Projection'] + Sim_Winner_Frame['Fantasy']) / 2
1034
- Sim_Winner_Frame['Salary'] = Sim_Winner_Frame['Salary'].astype(int)
1035
- Sim_Winner_Frame['Projection'] = Sim_Winner_Frame['Projection'].astype(np.float16)
1036
- Sim_Winner_Frame['Fantasy'] = Sim_Winner_Frame['Fantasy'].astype(np.float16)
1037
- Sim_Winner_Frame['GPP_Proj'] = Sim_Winner_Frame['GPP_Proj'].astype(np.float16)
1038
- st.session_state.Sim_Winner_Frame = Sim_Winner_Frame.sort_values(by='GPP_Proj', ascending=False)
1039
  st.session_state.Sim_Winner_Export = Sim_Winner_Frame.copy()
1040
 
1041
- del Sim_Winner_Frame
 
1042
 
1043
- player_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Frame.iloc[:,0:6].values, return_counts=True)),
 
 
1044
  columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True)
1045
- player_freq['Freq'] = player_freq['Freq'].astype(int)
1046
- player_freq['Position'] = player_freq['Player'].map(maps_dict['Pos_map'])
1047
- player_freq['Salary'] = player_freq['Player'].map(maps_dict['Salary_map'])
1048
- player_freq['Proj Own'] = (player_freq['Player'].map(maps_dict['Own_map']) / 100)
1049
- player_freq['Exposure'] = player_freq['Freq']/(Sim_size)
1050
- player_freq['Edge'] = player_freq['Exposure'] - player_freq['Proj Own']
1051
- player_freq['Team'] = player_freq['Player'].map(maps_dict['Team_map'])
1052
  for checkVar in range(len(team_list)):
1053
- player_freq['Team'] = player_freq['Team'].replace(item_list, team_list)
1054
-
1055
- st.session_state.player_freq = player_freq[['Player', 'Position', 'Team', 'Salary', 'Proj Own', 'Exposure', 'Edge']]
1056
 
1057
- cpt_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Frame.iloc[:,0:1].values, return_counts=True)),
1058
  columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True)
1059
- cpt_freq['Freq'] = cpt_freq['Freq'].astype(int)
1060
- cpt_freq['Position'] = cpt_freq['Player'].map(maps_dict['Pos_map'])
1061
- cpt_freq['Salary'] = cpt_freq['Player'].map(maps_dict['Salary_map'])
1062
- cpt_freq['Proj Own'] = (cpt_freq['Player'].map(maps_dict['Own_map']) / 4) / 100
1063
- cpt_freq['Exposure'] = cpt_freq['Freq']/(Sim_size)
1064
- cpt_freq['Edge'] = cpt_freq['Exposure'] - cpt_freq['Proj Own']
1065
- cpt_freq['Team'] = cpt_freq['Player'].map(maps_dict['Team_map'])
1066
  for checkVar in range(len(team_list)):
1067
- cpt_freq['Team'] = cpt_freq['Team'].replace(item_list, team_list)
1068
-
1069
- st.session_state.cpt_freq = cpt_freq[['Player', 'Position', 'Team', 'Salary', 'Proj Own', 'Exposure', 'Edge']]
1070
 
1071
- flex_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Frame.iloc[:,[1, 2, 3, 4, 5]].values, return_counts=True)),
1072
  columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True)
1073
- flex_freq['Freq'] = flex_freq['Freq'].astype(int)
1074
- flex_freq['Position'] = flex_freq['Player'].map(maps_dict['Pos_map'])
1075
- flex_freq['Salary'] = flex_freq['Player'].map(maps_dict['Salary_map'])
1076
- flex_freq['Proj Own'] = (flex_freq['Player'].map(maps_dict['Own_map']) / 100) - ((flex_freq['Player'].map(maps_dict['Own_map']) / 4) / 100)
1077
- flex_freq['Exposure'] = flex_freq['Freq']/(Sim_size)
1078
- flex_freq['Edge'] = flex_freq['Exposure'] - flex_freq['Proj Own']
1079
- flex_freq['Team'] = flex_freq['Player'].map(maps_dict['Team_map'])
1080
  for checkVar in range(len(team_list)):
1081
- flex_freq['Team'] = flex_freq['Team'].replace(item_list, team_list)
1082
-
1083
- st.session_state.flex_freq = flex_freq[['Player', 'Position', 'Team', 'Salary', 'Proj Own', 'Exposure', 'Edge']]
1084
-
1085
- del fp_random
1086
- del sample_arrays
1087
- del final_array
1088
- del fp_array
1089
- try:
1090
- del up_array
1091
- except:
1092
- pass
1093
- del best_lineup
1094
- del CleanPortfolio
1095
- del FinalPortfolio
1096
- del maps_dict
1097
- del team_list
1098
- del item_list
1099
- del Sim_size
1100
 
1101
  with st.container():
1102
- simulate_container = st.empty()
1103
  if 'player_freq' in st.session_state:
1104
  player_split_var2 = st.radio("Are you wanting to isolate any lineups with specific players?", ('Full Players', 'Specific Players'), key='player_split_var2')
1105
  if player_split_var2 == 'Specific Players':
@@ -1114,12 +922,12 @@ with tab2:
1114
  if 'Sim_Winner_Display' in st.session_state:
1115
  st.dataframe(st.session_state.Sim_Winner_Display.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').background_gradient(cmap='RdYlGn_r', subset=['Own']).format(precision=2), use_container_width = True)
1116
  if 'Sim_Winner_Export' in st.session_state:
1117
- st.download_button(
1118
- label="Export Tables",
1119
- data=convert_df_to_csv(st.session_state.Sim_Winner_Export),
1120
- file_name='NFL_consim_export.csv',
1121
- mime='text/csv',
1122
- )
1123
 
1124
  with st.container():
1125
  tab1, tab2, tab3 = st.tabs(['Overall Exposures', 'CPT Exposures', 'FLEX Exposures'])
@@ -1128,7 +936,7 @@ with tab2:
1128
  st.dataframe(st.session_state.player_freq.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(freq_format, precision=2), use_container_width = True)
1129
  st.download_button(
1130
  label="Export Exposures",
1131
- data=convert_df_to_csv(st.session_state.player_freq),
1132
  file_name='player_freq_export.csv',
1133
  mime='text/csv',
1134
  )
@@ -1137,7 +945,7 @@ with tab2:
1137
  st.dataframe(st.session_state.cpt_freq.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(freq_format, precision=2), use_container_width = True)
1138
  st.download_button(
1139
  label="Export Exposures",
1140
- data=convert_df_to_csv(st.session_state.cpt_freq),
1141
  file_name='cpt_freq_export.csv',
1142
  mime='text/csv',
1143
  )
@@ -1146,7 +954,15 @@ with tab2:
1146
  st.dataframe(st.session_state.flex_freq.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(freq_format, precision=2), use_container_width = True)
1147
  st.download_button(
1148
  label="Export Exposures",
1149
- data=convert_df_to_csv(st.session_state.flex_freq),
1150
  file_name='flex_freq_export.csv',
1151
  mime='text/csv',
1152
- )
 
 
 
 
 
 
 
 
 
9
  import pandas as pd
10
  import streamlit as st
11
  import gspread
12
+ import random
13
+ import gc
14
 
15
  @st.cache_resource
16
  def init_conn():
 
30
  "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/gspread-connection%40sheets-api-connect-378620.iam.gserviceaccount.com"
31
  }
32
 
33
+ gc_con = gspread.service_account_from_dict(credentials)
34
+
35
+ return gc_con
 
 
 
 
36
 
37
+ gcservice_account = init_conn()
 
38
 
39
  freq_format = {'Proj Own': '{:.2%}', 'Exposure': '{:.2%}', 'Edge': '{:.2%}'}
40
 
41
  @st.cache_resource(ttl=600)
42
  def load_dk_player_projections():
43
+ sh = gcservice_account.open_by_url('https://docs.google.com/spreadsheets/d/1I_1Ve3F4tftgfLQQoRKOJ351XfEG48s36OxXUKxmgS8/edit#gid=1391856348')
44
  worksheet = sh.worksheet('SD_Projections')
45
  load_display = pd.DataFrame(worksheet.get_all_records())
46
  load_display.rename(columns={"PPR": "Median", "name": "Player"}, inplace = True)
 
48
  load_display['Ceiling'] = load_display['Median'] + (load_display['Median'] * .75)
49
  load_display.replace('', np.nan, inplace=True)
50
  raw_display = load_display.dropna(subset=['Median'])
 
51
 
52
  return raw_display
53
 
54
  @st.cache_resource(ttl=600)
55
  def load_fd_player_projections():
56
+ sh = gcservice_account.open_by_url('https://docs.google.com/spreadsheets/d/1I_1Ve3F4tftgfLQQoRKOJ351XfEG48s36OxXUKxmgS8/edit#gid=1391856348')
57
  worksheet = sh.worksheet('FD_SD_Projections')
58
  load_display = pd.DataFrame(worksheet.get_all_records())
59
  load_display.rename(columns={"Half_PPR": "Median", "name": "Player"}, inplace = True)
 
61
  load_display['Ceiling'] = load_display['Median'] + (load_display['Median'] * .75)
62
  load_display.replace('', np.nan, inplace=True)
63
  raw_display = load_display.dropna(subset=['Median'])
 
64
 
65
  return raw_display
66
 
67
  @st.cache_resource(ttl=600)
68
  def load_dk_player_projections_2():
69
+ sh = gcservice_account.open_by_url('https://docs.google.com/spreadsheets/d/1I_1Ve3F4tftgfLQQoRKOJ351XfEG48s36OxXUKxmgS8/edit#gid=1391856348')
70
  worksheet = sh.worksheet('SD_Projections_2')
71
  load_display = pd.DataFrame(worksheet.get_all_records())
72
  load_display.rename(columns={"PPR": "Median", "name": "Player"}, inplace = True)
 
74
  load_display['Ceiling'] = load_display['Median'] + (load_display['Median'] * .75)
75
  load_display.replace('', np.nan, inplace=True)
76
  raw_display = load_display.dropna(subset=['Median'])
 
77
 
78
  return raw_display
79
 
80
  @st.cache_resource(ttl=600)
81
  def load_fd_player_projections_2():
82
+ sh = gcservice_account.open_by_url('https://docs.google.com/spreadsheets/d/1I_1Ve3F4tftgfLQQoRKOJ351XfEG48s36OxXUKxmgS8/edit#gid=1391856348')
83
  worksheet = sh.worksheet('FD_SD_Projections_2')
84
  load_display = pd.DataFrame(worksheet.get_all_records())
85
  load_display.rename(columns={"Half_PPR": "Median", "name": "Player"}, inplace = True)
 
87
  load_display['Ceiling'] = load_display['Median'] + (load_display['Median'] * .75)
88
  load_display.replace('', np.nan, inplace=True)
89
  raw_display = load_display.dropna(subset=['Median'])
 
90
 
91
  return raw_display
92
 
93
+ dk_roo_raw = load_dk_player_projections()
94
+ dk_roo_raw_2 = load_dk_player_projections_2()
95
+ fd_roo_raw = load_fd_player_projections()
96
+ fd_roo_raw_2 = load_fd_player_projections_2()
97
 
98
+ static_exposure = pd.DataFrame(columns=['Player', 'count'])
99
+ overall_exposure = pd.DataFrame(columns=['Player', 'count'])
100
+
101
+ def sim_contest(Sim_size, FinalPortfolio, CleanPortfolio, maps_dict, up_dict, insert_port):
102
+ SimVar = 1
103
+ Sim_Winners = []
104
+ fp_array = FinalPortfolio.values
105
+
106
+ if insert_port == 1:
107
+ up_array = CleanPortfolio.values
108
+
109
+ # Pre-vectorize functions
110
+ vec_projection_map = np.vectorize(maps_dict['Projection_map'].__getitem__)
111
+ vec_stdev_map = np.vectorize(maps_dict['STDev_map'].__getitem__)
112
+
113
+ if insert_port == 1:
114
+ vec_up_projection_map = np.vectorize(up_dict['Projection_map'].__getitem__)
115
+ vec_up_stdev_map = np.vectorize(up_dict['STDev_map'].__getitem__)
116
+
117
+ st.write('Simulating contest on frames')
118
+
119
+ while SimVar <= Sim_size:
120
+ if insert_port == 1:
121
+ fp_random = fp_array[np.random.choice(fp_array.shape[0], Contest_Size-len(CleanPortfolio))]
122
+ elif insert_port == 0:
123
+ fp_random = fp_array[np.random.choice(fp_array.shape[0], Contest_Size)]
124
+
125
+ sample_arrays1 = np.c_[
126
+ fp_random,
127
+ np.sum(np.random.normal(
128
+ loc=vec_projection_map(fp_random[:, :-5]),
129
+ scale=vec_stdev_map(fp_random[:, :-5])),
130
+ axis=1)
131
+ ]
132
+
133
+ if insert_port == 1:
134
+ sample_arrays2 = np.c_[
135
+ up_array,
136
+ np.sum(np.random.normal(
137
+ loc=vec_up_projection_map(up_array[:, :-5]),
138
+ scale=vec_up_stdev_map(up_array[:, :-5])),
139
+ axis=1)
140
+ ]
141
+ sample_arrays = np.vstack((sample_arrays1, sample_arrays2))
142
+ else:
143
+ sample_arrays = sample_arrays1
144
+
145
+ final_array = sample_arrays[sample_arrays[:, 10].argsort()[::-1]]
146
+ best_lineup = final_array[final_array[:, -1].argsort(kind='stable')[::-1][:1]]
147
+ Sim_Winners.append(best_lineup)
148
+ SimVar += 1
149
+
150
+ return Sim_Winners
151
+
152
+ def run_seed_frame(seed_depth1, Strength_var, strength_grow, Teams_used, Total_Runs, field_growth):
153
  RunsVar = 1
154
  seed_depth_def = seed_depth1
155
  Strength_var_def = Strength_var
156
  strength_grow_def = strength_grow
157
  Teams_used_def = Teams_used
158
  Total_Runs_def = Total_Runs
159
+
160
+ st.write('Creating Seed Frames')
161
+
162
  while RunsVar <= seed_depth_def:
163
  if RunsVar <= 3:
164
  FieldStrength = Strength_var_def
165
+ FinalPortfolio, maps_dict = get_correlated_portfolio_for_sim(Total_Runs_def * .25, sharp_split, field_growth)
166
+ FinalPortfolio2, maps_dict2 = get_uncorrelated_portfolio_for_sim(Total_Runs_def * .25, sharp_split, field_growth)
167
+ FinalPortfolio_init = pd.concat([FinalPortfolio, FinalPortfolio2], axis=0)
 
168
  maps_dict.update(maps_dict2)
 
 
169
  elif RunsVar > 3 and RunsVar <= 4:
170
  FieldStrength += (strength_grow_def + ((30 - len(Teams_used_def)) * .001))
171
+ FinalPortfolio3, maps_dict3 = get_correlated_portfolio_for_sim(Total_Runs_def * .25, sharp_split, field_growth)
172
+ FinalPortfolio4, maps_dict4 = get_uncorrelated_portfolio_for_sim(Total_Runs_def * .25, sharp_split, field_growth)
173
+ FinalPortfolio_merge_3 = pd.concat([FinalPortfolio_init, FinalPortfolio3], axis=0)
174
+ FinalPortfolio_merge_4 = pd.concat([FinalPortfolio_merge_3, FinalPortfolio4], axis=0)
175
+ FinalPortfolio_step_2 = FinalPortfolio_merge_4.drop_duplicates(subset = ['Projection', 'Own'],keep = 'last').reset_index(drop = True)
176
  maps_dict.update(maps_dict3)
177
  maps_dict.update(maps_dict4)
 
 
 
 
178
  elif RunsVar > 4:
179
  FieldStrength = 1
180
+ FinalPortfolio5, maps_dict5 = get_correlated_portfolio_for_sim(Total_Runs_def * .25, sharp_split, field_growth)
181
+ FinalPortfolio6, maps_dict6 = get_uncorrelated_portfolio_for_sim(Total_Runs_def * .25, sharp_split, field_growth)
182
+ FinalPortfolio_merge_5 = pd.concat([FinalPortfolio_step_2, FinalPortfolio5], axis=0)
183
+ FinalPortfolio_merge_6 = pd.concat([FinalPortfolio_merge_5, FinalPortfolio6], axis=0)
184
+ FinalPortfolio_export = FinalPortfolio_merge_6.drop_duplicates(subset = ['Projection', 'Own'],keep = 'last').reset_index(drop = True)
185
+ maps_dict.update(maps_dict5)
186
+ maps_dict.update(maps_dict6)
 
 
 
 
187
  RunsVar += 1
188
+
189
+ return FinalPortfolio_export, maps_dict
190
 
191
  def create_overall_dfs(pos_players, table_name, dict_name, pos):
192
  pos_players = pos_players.sort_values(by='Value', ascending=False)
 
195
  overall_table_name = overall_table_name.assign(Var = range(0,len(overall_table_name)))
196
  overall_dict_name = pd.Series(overall_table_name.Player.values, index=overall_table_name.Var).to_dict()
197
 
 
 
 
198
  return overall_table_name, overall_dict_name
199
 
200
 
 
213
 
214
  return df_out, ref_dict
215
 
216
+ def create_random_portfolio(Total_Sample_Size, raw_baselines, field_growth):
217
 
218
  O_merge, full_pos_player_dict = get_overall_merged_df()
219
  Overall_Merge = O_merge[['Var', 'Player', 'Team', 'Salary', 'Median', 'Own']].copy()
 
238
 
239
  return RandomPortfolio, maps_dict, ranges_dict, full_pos_player_dict
240
 
241
+ def get_correlated_portfolio_for_sim(Total_Sample_Size, sharp_split, field_growth):
242
 
243
+ sizesplit = round(Total_Sample_Size * sharp_split)
244
 
245
+ RandomPortfolio, maps_dict, ranges_dict, full_pos_player_dict = create_random_portfolio(sizesplit, raw_baselines, field_growth)
246
 
247
  RandomPortfolio['CPT'] = pd.Series(list(RandomPortfolio['CPT'].map(full_pos_player_dict['pos_dicts'][0])), dtype="string[pyarrow]")
248
  RandomPortfolio['FLEX1'] = pd.Series(list(RandomPortfolio['FLEX1'].map(full_pos_player_dict['pos_dicts'][0])), dtype="string[pyarrow]")
 
255
  RandomPortfolio = RandomPortfolio[RandomPortfolio['plyr_count'] == 7].drop(columns=['plyr_list','plyr_count']).\
256
  reset_index(drop=True)
257
 
 
 
 
 
258
  RandomPortfolio['CPTs'] = RandomPortfolio['CPT'].map(maps_dict['Salary_map']).astype(np.int32) * 1.5
259
  RandomPortfolio['FLEX1s'] = RandomPortfolio['FLEX1'].map(maps_dict['Salary_map']).astype(np.int32)
260
  RandomPortfolio['FLEX2s'] = RandomPortfolio['FLEX2'].map(maps_dict['Salary_map']).astype(np.int32)
 
282
  portHeaderList.append('Own')
283
 
284
  RandomPortArray = RandomPortfolio.to_numpy()
 
285
 
286
  RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,7:13].astype(int))]
287
  RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,13:19].astype(np.double))]
 
290
  RandomPortArrayOut = np.delete(RandomPortArray, np.s_[7:25], axis=1)
291
  RandomPortfolioDF = pd.DataFrame(RandomPortArrayOut, columns = ['CPT', 'FLEX1', 'FLEX2', 'FLEX3', 'FLEX4', 'FLEX5', 'User/Field', 'Salary', 'Projection', 'Own'])
292
  RandomPortfolioDF = RandomPortfolioDF.sort_values(by=Sim_function, ascending=False)
 
 
 
293
 
294
  if insert_port == 1:
295
  CleanPortfolio['Salary'] = sum([CleanPortfolio['CPT'].map(up_dict['Salary_map']) * 1.5,
 
329
 
330
  return RandomPortfolio, maps_dict
331
 
332
+ def get_uncorrelated_portfolio_for_sim(Total_Sample_Size, sharp_split, field_growth):
333
 
334
+ sizesplit = round(Total_Sample_Size * (1-sharp_split))
335
 
336
+ RandomPortfolio, maps_dict, ranges_dict, full_pos_player_dict = create_random_portfolio(sizesplit, raw_baselines, field_growth)
337
 
338
  RandomPortfolio['CPT'] = pd.Series(list(RandomPortfolio['CPT'].map(full_pos_player_dict['pos_dicts'][0])), dtype="string[pyarrow]")
339
  RandomPortfolio['FLEX1'] = pd.Series(list(RandomPortfolio['FLEX1'].map(full_pos_player_dict['pos_dicts'][0])), dtype="string[pyarrow]")
 
346
  RandomPortfolio = RandomPortfolio[RandomPortfolio['plyr_count'] == 7].drop(columns=['plyr_list','plyr_count']).\
347
  reset_index(drop=True)
348
 
 
 
 
 
349
  RandomPortfolio['CPTs'] = RandomPortfolio['CPT'].map(maps_dict['Salary_map']).astype(np.int32) * 1.5
350
  RandomPortfolio['FLEX1s'] = RandomPortfolio['FLEX1'].map(maps_dict['Salary_map']).astype(np.int32)
351
  RandomPortfolio['FLEX2s'] = RandomPortfolio['FLEX2'].map(maps_dict['Salary_map']).astype(np.int32)
 
373
  portHeaderList.append('Own')
374
 
375
  RandomPortArray = RandomPortfolio.to_numpy()
 
376
 
377
  RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,7:13].astype(int))]
378
  RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,13:19].astype(np.double))]
 
381
  RandomPortArrayOut = np.delete(RandomPortArray, np.s_[7:25], axis=1)
382
  RandomPortfolioDF = pd.DataFrame(RandomPortArrayOut, columns = ['CPT', 'FLEX1', 'FLEX2', 'FLEX3', 'FLEX4', 'FLEX5', 'User/Field', 'Salary', 'Projection', 'Own'])
383
  RandomPortfolioDF = RandomPortfolioDF.sort_values(by=Sim_function, ascending=False)
 
 
 
384
 
385
  if insert_port == 1:
386
  CleanPortfolio['Salary'] = sum([CleanPortfolio['CPT'].map(up_dict['Salary_map']) * 1.5,
 
420
 
421
  return RandomPortfolio, maps_dict
422
 
 
 
 
 
 
 
 
 
423
  tab1, tab2 = st.tabs(['Uploads', 'Contest Sim'])
424
 
425
  with tab1:
 
483
  split_portfolio['FLEX4'].map(player_salary_dict),
484
  split_portfolio['FLEX5'].map(player_salary_dict)])
485
 
 
 
486
  split_portfolio['Projection'] = sum([split_portfolio['CPT'].map(player_proj_dict) * 1.5,
487
  split_portfolio['FLEX1'].map(player_proj_dict),
488
  split_portfolio['FLEX2'].map(player_proj_dict),
 
490
  split_portfolio['FLEX4'].map(player_proj_dict),
491
  split_portfolio['FLEX5'].map(player_proj_dict)])
492
 
 
 
493
  split_portfolio['Ownership'] = sum([split_portfolio['CPT'].map(player_own_dict) / 4,
494
  split_portfolio['FLEX1'].map(player_own_dict),
495
  split_portfolio['FLEX2'].map(player_own_dict),
 
497
  split_portfolio['FLEX4'].map(player_own_dict),
498
  split_portfolio['FLEX5'].map(player_own_dict)])
499
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
500
  except:
501
  portfolio_dataframe.columns=["CPT", "FLEX1", "FLEX2", "FLEX3", "FLEX4", "FLEX5"]
502
  split_portfolio = portfolio_dataframe
 
528
  split_portfolio['FLEX4'].map(player_salary_dict),
529
  split_portfolio['FLEX5'].map(player_salary_dict)])
530
 
 
 
531
  split_portfolio['Projection'] = sum([split_portfolio['CPT'].map(player_proj_dict) * 1.5,
532
  split_portfolio['FLEX1'].map(player_proj_dict),
533
  split_portfolio['FLEX2'].map(player_proj_dict),
 
535
  split_portfolio['FLEX4'].map(player_proj_dict),
536
  split_portfolio['FLEX5'].map(player_proj_dict)])
537
 
 
 
538
  split_portfolio['Ownership'] = sum([split_portfolio['CPT'].map(player_own_dict) / 4,
539
  split_portfolio['FLEX1'].map(player_own_dict),
540
  split_portfolio['FLEX2'].map(player_own_dict),
 
542
  split_portfolio['FLEX4'].map(player_own_dict),
543
  split_portfolio['FLEX5'].map(player_own_dict)])
544
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
545
  except:
546
  split_portfolio = portfolio_dataframe
547
 
 
566
  split_portfolio['FLEX4'].map(player_salary_dict),
567
  split_portfolio['FLEX5'].map(player_salary_dict)])
568
 
 
 
569
  split_portfolio['Projection'] = sum([split_portfolio['CPT'].map(player_proj_dict) * 1.5,
570
  split_portfolio['FLEX1'].map(player_proj_dict),
571
  split_portfolio['FLEX2'].map(player_proj_dict),
 
573
  split_portfolio['FLEX4'].map(player_proj_dict),
574
  split_portfolio['FLEX5'].map(player_proj_dict)])
575
 
 
 
576
  split_portfolio['Ownership'] = sum([split_portfolio['CPT'].map(player_own_dict) / 4,
577
  split_portfolio['FLEX1'].map(player_own_dict),
578
  split_portfolio['FLEX2'].map(player_own_dict),
 
580
  split_portfolio['FLEX4'].map(player_own_dict),
581
  split_portfolio['FLEX5'].map(player_own_dict)])
582
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
583
 
584
+ gc.collect()
 
 
 
 
 
 
 
585
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
586
  with tab2:
587
+ col1, col2 = st.columns([1, 7])
588
  with col1:
589
  if st.button("Load/Reset Data", key='reset1'):
590
  st.cache_data.clear()
591
+ for key in st.session_state.keys():
592
+ del st.session_state[key]
593
  dk_roo_raw = load_dk_player_projections()
594
  dk_roo_raw_2 = load_dk_player_projections_2()
595
  fd_roo_raw = load_fd_player_projections()
 
611
  raw_baselines = dk_roo_raw
612
  elif slate_var1 == 'Paydirt (Secondary)':
613
  raw_baselines = dk_roo_raw_2
614
+
 
 
 
615
  st.info("If you are uploading a portfolio, note that there is an adjustments to projections and deviation mapping to prevent 'Projection Bias' and create a fair simulation")
616
  insert_port1 = st.selectbox("Are you uploading a portfolio?", ('No', 'Yes'))
617
  if insert_port1 == 'Yes':
 
624
  elif contest_var1 == 'Medium':
625
  Contest_Size = 2500
626
  elif contest_var1 == 'Large':
627
+ Contest_Size = 5000
 
628
  strength_var1 = st.selectbox("How sharp is the field in the contest?", ('Not Very', 'Average', 'Very'))
629
  if strength_var1 == 'Not Very':
630
+ sharp_split = .33
631
+ Strength_var = .50
632
  scaling_var = 5
633
  elif strength_var1 == 'Average':
634
+ sharp_split = .50
635
+ Strength_var = .25
636
  scaling_var = 10
637
  elif strength_var1 == 'Very':
638
+ sharp_split = .75
639
+ Strength_var = .01
640
  scaling_var = 15
641
+
642
+ Sort_function = 'Median'
643
+ Sim_function = 'Projection'
644
+
645
+ if Contest_Size <= 1000:
646
+ strength_grow = .01
647
+ elif Contest_Size > 1000 and Contest_Size <= 2500:
648
+ strength_grow = .025
649
+ elif Contest_Size > 2500 and Contest_Size <= 5000:
650
+ strength_grow = .05
651
+ elif Contest_Size > 5000 and Contest_Size <= 20000:
652
+ strength_grow = .075
653
+ elif Contest_Size > 20000:
654
+ strength_grow = .1
655
+
656
+ field_growth = 100 * strength_grow
657
 
658
  with col2:
659
  with st.container():
660
  if st.button("Simulate Contest", key='sim1'):
 
 
 
 
 
 
 
 
 
 
 
 
661
  with st.container():
662
+ for key in st.session_state.keys():
663
+ del st.session_state[key]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
664
 
665
  if slate_var1 == 'User':
666
+ initial_proj = proj_dataframe[['Player', 'Team', 'Position', 'Median', 'Own', 'Floor', 'Ceiling', 'Salary']]
667
+
668
+ # Define the calculation to be applied
669
+ def calculate_own(position, own, mean_own, factor, max_own=75):
670
+ return np.where((position == 'QB') & (own - mean_own >= 0),
671
+ own * (factor * (own - mean_own) / 100) + mean_own,
672
+ own)
673
+
674
+ # Set the factors based on the contest_var1
675
+ factor_qb, factor_other = {
676
+ 'Small': (10, 5),
677
+ 'Medium': (6, 3),
678
+ 'Large': (3, 1.5),
679
+ }[contest_var1]
680
+
681
+ # Apply the calculation to the DataFrame
682
+ initial_proj['Own%'] = initial_proj.apply(lambda row: calculate_own(row['Position'], row['Own'], initial_proj.loc[initial_proj['Position'] == row['Position'], 'Own'].mean(), factor_qb if row['Position'] == 'QB' else factor_other), axis=1)
683
+ initial_proj['Own%'] = initial_proj['Own%'].clip(upper=75)
684
+ initial_proj['Own'] = initial_proj['Own%'] * (500 / initial_proj['Own%'].sum())
685
 
686
+ # Drop unnecessary columns and create the final DataFrame
687
+ Overall_Proj = initial_proj[['Player', 'Team', 'Position', 'Median', 'Own', 'Salary']]
688
 
689
  elif slate_var1 != 'User':
690
+ # Copy only the necessary columns
691
+ initial_proj = raw_baselines[['Player', 'Team', 'Position', 'Median', 'Own', 'Floor', 'Ceiling', 'Salary']]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
692
 
693
+ # Define the calculation to be applied
694
+ def calculate_own(position, own, mean_own, factor, max_own=75):
695
+ return np.where((position == 'QB') & (own - mean_own >= 0),
696
+ own * (factor * (own - mean_own) / 100) + mean_own,
697
+ own)
698
+
699
+ # Set the factors based on the contest_var1
700
+ factor_qb, factor_other = {
701
+ 'Small': (10, 5),
702
+ 'Medium': (6, 3),
703
+ 'Large': (3, 1.5),
704
+ }[contest_var1]
705
+
706
+ # Apply the calculation to the DataFrame
707
+ initial_proj['Own%'] = initial_proj.apply(lambda row: calculate_own(row['Position'], row['Own'], initial_proj.loc[initial_proj['Position'] == row['Position'], 'Own'].mean(), factor_qb if row['Position'] == 'QB' else factor_other), axis=1)
708
+ initial_proj['Own%'] = initial_proj['Own%'].clip(upper=75)
709
+ initial_proj['Own'] = initial_proj['Own%'] * (500 / initial_proj['Own%'].sum())
710
+
711
+ # Drop unnecessary columns and create the final DataFrame
712
+ Overall_Proj = initial_proj[['Player', 'Team', 'Position', 'Median', 'Own', 'Salary']]
713
 
714
  if insert_port == 1:
715
  UserPortfolio = portfolio_dataframe[['CPT', 'FLEX1', 'FLEX2', 'FLEX3', 'FLEX4', 'FLEX5']]
 
733
  Teams_used['team_item'] = Teams_used['index'] + 1
734
  Teams_used = Teams_used.drop(columns=['index'])
735
  Teams_used_dictraw = Teams_used.drop(columns=['team_item'])
 
 
 
736
 
737
  team_list = Teams_used['Team'].to_list()
738
  item_list = Teams_used['team_item'].to_list()
 
740
  FieldStrength_raw = Strength_var + ((30 - len(Teams_used)) * .01)
741
  FieldStrength = FieldStrength_raw - (FieldStrength_raw * (20000 / Contest_Size))
742
 
 
 
743
  if FieldStrength < 0:
744
  FieldStrength = Strength_var
745
  field_split = Strength_var
 
755
  pos_players = flex_raw
756
  pos_players.dropna(subset=['Median']).reset_index(drop=True)
757
  pos_players = pos_players.reset_index(drop=True)
 
 
758
 
759
  if insert_port == 1:
760
  try:
 
786
 
787
  # Merge and update nerf_frame DataFrame
788
  nerf_frame = pd.merge(cleaport_players, Overall_Proj, on='Player', how='left')
789
+ nerf_frame[['Median', 'Floor', 'Ceiling', 'STDev']] *= .9
790
 
791
  del Raw_Portfolio
792
  except:
 
802
 
803
  # Merge and update nerf_frame DataFrame
804
  nerf_frame = pd.merge(cleaport_players, Overall_Proj, on='Player', how='left')
805
+ nerf_frame[['Median', 'Floor', 'Ceiling', 'STDev']] *= .9
806
 
807
  st.table(nerf_frame)
808
 
 
843
  'team_check_map':dict(zip(cleaport_players.Player,nerf_frame.Team))
844
  }
845
 
846
+ FinalPortfolio, maps_dict = run_seed_frame(5, Strength_var, strength_grow, Teams_used, 1000000, field_growth)
 
847
 
848
+ Sim_Winners = sim_contest(5000, FinalPortfolio, CleanPortfolio, maps_dict, up_dict, insert_port)
 
 
849
 
850
+ # Initial setup
851
+ Sim_Winner_Frame = pd.DataFrame(np.concatenate(Sim_Winners), columns=FinalPortfolio.columns.tolist() + ['Fantasy'])
852
+ Sim_Winner_Frame['GPP_Proj'] = (Sim_Winner_Frame['Projection'] + Sim_Winner_Frame['Fantasy']) / 2
853
+ Sim_Winner_Frame['unique_id'] = Sim_Winner_Frame['Projection'].astype(str) + Sim_Winner_Frame['Salary'].astype(str) + Sim_Winner_Frame['Own'].astype(str)
854
+ Sim_Winner_Frame = Sim_Winner_Frame.assign(win_count=Sim_Winner_Frame['unique_id'].map(Sim_Winner_Frame['unique_id'].value_counts()))
855
 
856
+ # Type Casting
857
+ type_cast_dict = {'Salary': int, 'Projection': np.float16, 'Fantasy': np.float16, 'GPP_Proj': np.float32}
858
+ Sim_Winner_Frame = Sim_Winner_Frame.astype(type_cast_dict)
859
 
860
+ del FinalPortfolio, insert_port, type_cast_dict
 
 
861
 
862
+ # Sorting
863
+ st.session_state.Sim_Winner_Frame = Sim_Winner_Frame.sort_values(by=['win_count', 'GPP_Proj'], ascending= [False, False]).copy().drop_duplicates(subset='unique_id').head(100)
864
+ st.session_state.Sim_Winner_Frame.drop(columns='unique_id', inplace=True)
865
+
866
+ # Data Copying
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
867
  st.session_state.Sim_Winner_Export = Sim_Winner_Frame.copy()
868
 
869
+ # Data Copying
870
+ st.session_state.Sim_Winner_Display = Sim_Winner_Frame.copy()
871
 
872
+ del Sim_Winner_Frame, Sim_Winners
873
+
874
+ st.session_state.player_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Display.iloc[:,0:6].values, return_counts=True)),
875
  columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True)
876
+ st.session_state.player_freq['Freq'] = st.session_state.player_freq['Freq'].astype(int)
877
+ st.session_state.player_freq['Position'] = st.session_state.player_freq['Player'].map(maps_dict['Pos_map'])
878
+ st.session_state.player_freq['Salary'] = st.session_state.player_freq['Player'].map(maps_dict['Salary_map'])
879
+ st.session_state.player_freq['Proj Own'] = st.session_state.player_freq['Player'].map(maps_dict['Own_map']) / 100
880
+ st.session_state.player_freq['Exposure'] = st.session_state.player_freq['Freq']/(5000)
881
+ st.session_state.player_freq['Edge'] = st.session_state.player_freq['Exposure'] - st.session_state.player_freq['Proj Own']
882
+ st.session_state.player_freq['Team'] = st.session_state.player_freq['Player'].map(maps_dict['Team_map'])
883
  for checkVar in range(len(team_list)):
884
+ st.session_state.player_freq['Team'] = st.session_state.player_freq['Team'].replace(item_list, team_list)
 
 
885
 
886
+ st.session_state.cpt_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Display.iloc[:,0:1].values, return_counts=True)),
887
  columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True)
888
+ st.session_state.cpt_freq['Freq'] = st.session_state.cpt_freq['Freq'].astype(int)
889
+ st.session_state.cpt_freq['Position'] = st.session_state.cpt_freq['Player'].map(maps_dict['Pos_map'])
890
+ st.session_state.cpt_freq['Salary'] = st.session_state.cpt_freq['Player'].map(maps_dict['Salary_map'])
891
+ st.session_state.cpt_freq['Proj Own'] = (st.session_state.cpt_freq['Player'].map(maps_dict['Own_map']) / 4) / 100
892
+ st.session_state.cpt_freq['Exposure'] = st.session_state.cpt_freq['Freq']/5000
893
+ st.session_state.cpt_freq['Edge'] = st.session_state.cpt_freq['Exposure'] - st.session_state.cpt_freq['Proj Own']
894
+ st.session_state.cpt_freq['Team'] = st.session_state.cpt_freq['Player'].map(maps_dict['Team_map'])
895
  for checkVar in range(len(team_list)):
896
+ st.session_state.cpt_freq['Team'] = st.session_state.cpt_freq['Team'].replace(item_list, team_list)
 
 
897
 
898
+ st.session_state.flex_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Display.iloc[:,[1, 2, 3, 4, 5]].values, return_counts=True)),
899
  columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True)
900
+ st.session_state.flex_freq['Freq'] = st.session_state.flex_freq['Freq'].astype(int)
901
+ st.session_state.flex_freq['Position'] = st.session_state.flex_freq['Player'].map(maps_dict['Pos_map'])
902
+ st.session_state.flex_freq['Salary'] = st.session_state.flex_freq['Player'].map(maps_dict['Salary_map'])
903
+ st.session_state.flex_freq['Proj Own'] = (st.session_state.flex_freq['Player'].map(maps_dict['Own_map']) / 100) - ((st.session_state.flex_freq['Player'].map(maps_dict['Own_map']) / 4) / 100)
904
+ st.session_state.flex_freq['Exposure'] = st.session_state.flex_freq['Freq']/5000
905
+ st.session_state.flex_freq['Edge'] = st.session_state.flex_freq['Exposure'] - st.session_state.flex_freq['Proj Own']
906
+ st.session_state.flex_freq['Team'] = st.session_state.flex_freq['Player'].map(maps_dict['Team_map'])
907
  for checkVar in range(len(team_list)):
908
+ st.session_state.flex_freq['Team'] = st.session_state.flex_freq['Team'].replace(item_list, team_list)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
909
 
910
  with st.container():
 
911
  if 'player_freq' in st.session_state:
912
  player_split_var2 = st.radio("Are you wanting to isolate any lineups with specific players?", ('Full Players', 'Specific Players'), key='player_split_var2')
913
  if player_split_var2 == 'Specific Players':
 
922
  if 'Sim_Winner_Display' in st.session_state:
923
  st.dataframe(st.session_state.Sim_Winner_Display.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').background_gradient(cmap='RdYlGn_r', subset=['Own']).format(precision=2), use_container_width = True)
924
  if 'Sim_Winner_Export' in st.session_state:
925
+ st.download_button(
926
+ label="Export Full Frame",
927
+ data=st.session_state.Sim_Winner_Export.to_csv().encode('utf-8'),
928
+ file_name='NFL_consim_export.csv',
929
+ mime='text/csv',
930
+ )
931
 
932
  with st.container():
933
  tab1, tab2, tab3 = st.tabs(['Overall Exposures', 'CPT Exposures', 'FLEX Exposures'])
 
936
  st.dataframe(st.session_state.player_freq.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(freq_format, precision=2), use_container_width = True)
937
  st.download_button(
938
  label="Export Exposures",
939
+ data=st.session_state.player_freq.to_csv().encode('utf-8'),
940
  file_name='player_freq_export.csv',
941
  mime='text/csv',
942
  )
 
945
  st.dataframe(st.session_state.cpt_freq.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(freq_format, precision=2), use_container_width = True)
946
  st.download_button(
947
  label="Export Exposures",
948
+ data=st.session_state.cpt_freq.to_csv().encode('utf-8'),
949
  file_name='cpt_freq_export.csv',
950
  mime='text/csv',
951
  )
 
954
  st.dataframe(st.session_state.flex_freq.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(freq_format, precision=2), use_container_width = True)
955
  st.download_button(
956
  label="Export Exposures",
957
+ data=st.session_state.flex_freq.to_csv().encode('utf-8'),
958
  file_name='flex_freq_export.csv',
959
  mime='text/csv',
960
+ )
961
+ del gcservice_account
962
+ del dk_roo_raw, dk_roo_raw_2, fd_roo_raw, fd_roo_raw_2
963
+ del static_exposure, overall_exposure
964
+ del insert_port1, Contest_Size, sharp_split, Strength_var, scaling_var, Sort_function, Sim_function, strength_grow, field_growth
965
+ del raw_baselines
966
+ del freq_format
967
+
968
+ gc.collect()