Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -43,7 +43,6 @@ def load_dk_player_projections():
|
|
43 |
load_display = pd.DataFrame(worksheet.get_all_records())
|
44 |
load_display.replace('', np.nan, inplace=True)
|
45 |
raw_display = load_display.dropna(subset=['Median'])
|
46 |
-
del load_display
|
47 |
|
48 |
return raw_display
|
49 |
|
@@ -54,7 +53,6 @@ def load_fd_player_projections():
|
|
54 |
load_display = pd.DataFrame(worksheet.get_all_records())
|
55 |
load_display.replace('', np.nan, inplace=True)
|
56 |
raw_display = load_display.dropna(subset=['Median'])
|
57 |
-
del load_display
|
58 |
|
59 |
return raw_display
|
60 |
|
@@ -72,9 +70,6 @@ def set_export_ids():
|
|
72 |
load_display.replace('', np.nan, inplace=True)
|
73 |
raw_display = load_display.dropna(subset=['Median'])
|
74 |
fd_ids = dict(zip(raw_display['Player'], raw_display['player_id']))
|
75 |
-
|
76 |
-
del load_display
|
77 |
-
del raw_display
|
78 |
|
79 |
return dk_ids, fd_ids
|
80 |
|
@@ -96,8 +91,6 @@ def run_seed_frame(seed_depth1, Strength_var, strength_grow, Teams_used, Total_R
|
|
96 |
FinalPortfolio2, maps_dict2 = get_uncorrelated_portfolio_for_sim(Total_Runs_def * .1, sharp_split)
|
97 |
FinalPortfolio = pd.concat([FinalPortfolio, FinalPortfolio2], axis=0)
|
98 |
maps_dict.update(maps_dict2)
|
99 |
-
del FinalPortfolio2
|
100 |
-
del maps_dict2
|
101 |
elif RunsVar > 3 and RunsVar <= 4:
|
102 |
FieldStrength += (strength_grow_def + ((30 - len(Teams_used_def)) * .001))
|
103 |
FinalPortfolio3, maps_dict3 = get_correlated_portfolio_for_sim(Total_Runs_def * .1, sharp_split)
|
@@ -107,10 +100,6 @@ def run_seed_frame(seed_depth1, Strength_var, strength_grow, Teams_used, Total_R
|
|
107 |
FinalPortfolio = FinalPortfolio.drop_duplicates(subset = ['Projection', 'Own'],keep = 'last').reset_index(drop = True)
|
108 |
maps_dict.update(maps_dict3)
|
109 |
maps_dict.update(maps_dict4)
|
110 |
-
del FinalPortfolio3
|
111 |
-
del maps_dict3
|
112 |
-
del FinalPortfolio4
|
113 |
-
del maps_dict4
|
114 |
elif RunsVar > 4:
|
115 |
FieldStrength = 1
|
116 |
FinalPortfolio3, maps_dict3 = get_correlated_portfolio_for_sim(Total_Runs_def * .1, sharp_split)
|
@@ -120,12 +109,8 @@ def run_seed_frame(seed_depth1, Strength_var, strength_grow, Teams_used, Total_R
|
|
120 |
FinalPortfolio = FinalPortfolio.drop_duplicates(subset = ['Projection', 'Own'],keep = 'last').reset_index(drop = True)
|
121 |
maps_dict.update(maps_dict3)
|
122 |
maps_dict.update(maps_dict4)
|
123 |
-
del FinalPortfolio3
|
124 |
-
del maps_dict3
|
125 |
-
del FinalPortfolio4
|
126 |
-
del maps_dict4
|
127 |
RunsVar += 1
|
128 |
-
|
129 |
return FinalPortfolio, maps_dict
|
130 |
|
131 |
def create_stack_options(player_data, wr_var):
|
@@ -142,9 +127,6 @@ def create_stack_options(player_data, wr_var):
|
|
142 |
merged_frame = merged_frame.reset_index()
|
143 |
correl_dict = dict(zip(merged_frame.QB, merged_frame.Player))
|
144 |
|
145 |
-
del merged_frame
|
146 |
-
del data_raw
|
147 |
-
|
148 |
return correl_dict
|
149 |
|
150 |
def create_overall_dfs(pos_players, table_name, dict_name, pos):
|
@@ -154,17 +136,11 @@ def create_overall_dfs(pos_players, table_name, dict_name, pos):
|
|
154 |
overall_table_name = table_name_raw.head(round(len(table_name_raw)))
|
155 |
overall_table_name = overall_table_name.assign(Var = range(0,len(overall_table_name)))
|
156 |
overall_dict_name = pd.Series(overall_table_name.Player.values, index=overall_table_name.Var).to_dict()
|
157 |
-
|
158 |
-
del pos_players
|
159 |
-
del table_name_raw
|
160 |
elif pos != "FLEX":
|
161 |
table_name_raw = pos_players[pos_players['Position'].str.contains(pos)].reset_index(drop=True)
|
162 |
overall_table_name = table_name_raw.head(round(len(table_name_raw)))
|
163 |
overall_table_name = overall_table_name.assign(Var = range(0,len(overall_table_name)))
|
164 |
overall_dict_name = pd.Series(overall_table_name.Player.values, index=overall_table_name.Var).to_dict()
|
165 |
-
|
166 |
-
del pos_players
|
167 |
-
del table_name_raw
|
168 |
|
169 |
return overall_table_name, overall_dict_name
|
170 |
|
@@ -188,6 +164,7 @@ def calculate_range_var(count, min_val, FieldStrength, field_growth):
|
|
188 |
var = round(len(count[0]) * FieldStrength)
|
189 |
var = max(var, min_val)
|
190 |
var += round(field_growth)
|
|
|
191 |
return min(var, len(count[0]))
|
192 |
|
193 |
def create_random_portfolio(Total_Sample_Size, raw_baselines):
|
@@ -211,9 +188,6 @@ def create_random_portfolio(Total_Sample_Size, raw_baselines):
|
|
211 |
elif max_var > 16:
|
212 |
ranges_dict['qb_range'] = round(max_var / 2)
|
213 |
ranges_dict['dst_range'] = round(max_var)
|
214 |
-
# Generate unique ranges
|
215 |
-
# for key, value in ranges_dict.items():
|
216 |
-
# ranges_dict[f"{key}_Uniques"] = list(range(0, value, 1))
|
217 |
|
218 |
# Generate random portfolios
|
219 |
rng = np.random.default_rng()
|
@@ -223,11 +197,6 @@ def create_random_portfolio(Total_Sample_Size, raw_baselines):
|
|
223 |
all_choices = [rng.choice(ranges_dict[f"{key}_range"], size=(Total_Sample_Size, elem)) for key, elem in zip(keys, total_elements)]
|
224 |
RandomPortfolio = pd.DataFrame(np.hstack(all_choices), columns=['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST'])
|
225 |
RandomPortfolio['User/Field'] = 0
|
226 |
-
|
227 |
-
del rng
|
228 |
-
del total_elements
|
229 |
-
del all_choices
|
230 |
-
del O_merge
|
231 |
|
232 |
return RandomPortfolio, maps_dict, ranges_dict, full_pos_player_dict
|
233 |
|
@@ -253,12 +222,6 @@ def get_correlated_portfolio_for_sim(Total_Sample_Size, sharp_split):
|
|
253 |
RandomPortfolio = RandomPortfolio[RandomPortfolio['plyr_count'] == 10].drop(columns=['plyr_list','plyr_count']).\
|
254 |
reset_index(drop=True)
|
255 |
|
256 |
-
del sizesplit
|
257 |
-
del full_pos_player_dict
|
258 |
-
del ranges_dict
|
259 |
-
del stack_num
|
260 |
-
del stacking_dict
|
261 |
-
|
262 |
RandomPortfolio['QBs'] = RandomPortfolio['QB'].map(maps_dict['Salary_map']).astype(np.int32)
|
263 |
RandomPortfolio['RB1s'] = RandomPortfolio['RB1'].map(maps_dict['Salary_map']).astype(np.int32)
|
264 |
RandomPortfolio['RB2s'] = RandomPortfolio['RB2'].map(maps_dict['Salary_map']).astype(np.int32)
|
@@ -290,7 +253,6 @@ def get_correlated_portfolio_for_sim(Total_Sample_Size, sharp_split):
|
|
290 |
RandomPortfolio['DSTo'] = RandomPortfolio['DST'].map(maps_dict['Own_map']).astype(np.float16)
|
291 |
|
292 |
RandomPortArray = RandomPortfolio.to_numpy()
|
293 |
-
del RandomPortfolio
|
294 |
|
295 |
RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,10:19].astype(int))]
|
296 |
RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,19:28].astype(np.double))]
|
@@ -299,8 +261,6 @@ def get_correlated_portfolio_for_sim(Total_Sample_Size, sharp_split):
|
|
299 |
RandomPortArrayOut = np.delete(RandomPortArray, np.s_[10:37], axis=1)
|
300 |
RandomPortfolioDF = pd.DataFrame(RandomPortArrayOut, columns = ['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST', 'User/Field', 'Salary', 'Projection', 'Own'])
|
301 |
RandomPortfolioDF = RandomPortfolioDF.sort_values(by=Sim_function, ascending=False)
|
302 |
-
del RandomPortArray
|
303 |
-
del RandomPortArrayOut
|
304 |
|
305 |
if insert_port == 1:
|
306 |
CleanPortfolio['Salary'] = sum([CleanPortfolio['QB'].map(maps_dict['Salary_map']),
|
@@ -345,8 +305,6 @@ def get_correlated_portfolio_for_sim(Total_Sample_Size, sharp_split):
|
|
345 |
|
346 |
RandomPortfolio = RandomPortfolioDF.sort_values(by=Sim_function, ascending=False)
|
347 |
|
348 |
-
del RandomPortfolioDF
|
349 |
-
|
350 |
RandomPortfolio = RandomPortfolio[['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST', 'User/Field', 'Salary', 'Projection', 'Own']]
|
351 |
|
352 |
return RandomPortfolio, maps_dict
|
@@ -371,10 +329,6 @@ def get_uncorrelated_portfolio_for_sim(Total_Sample_Size, sharp_split):
|
|
371 |
RandomPortfolio = RandomPortfolio[RandomPortfolio['plyr_count'] == 10].drop(columns=['plyr_list','plyr_count']).\
|
372 |
reset_index(drop=True)
|
373 |
|
374 |
-
del sizesplit
|
375 |
-
del full_pos_player_dict
|
376 |
-
del ranges_dict
|
377 |
-
|
378 |
RandomPortfolio['QBs'] = RandomPortfolio['QB'].map(maps_dict['Salary_map']).astype(np.int32)
|
379 |
RandomPortfolio['RB1s'] = RandomPortfolio['RB1'].map(maps_dict['Salary_map']).astype(np.int32)
|
380 |
RandomPortfolio['RB2s'] = RandomPortfolio['RB2'].map(maps_dict['Salary_map']).astype(np.int32)
|
@@ -406,7 +360,6 @@ def get_uncorrelated_portfolio_for_sim(Total_Sample_Size, sharp_split):
|
|
406 |
RandomPortfolio['DSTo'] = RandomPortfolio['DST'].map(maps_dict['Own_map']).astype(np.float16)
|
407 |
|
408 |
RandomPortArray = RandomPortfolio.to_numpy()
|
409 |
-
del RandomPortfolio
|
410 |
|
411 |
RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,10:19].astype(int))]
|
412 |
RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,19:28].astype(np.double))]
|
@@ -415,9 +368,6 @@ def get_uncorrelated_portfolio_for_sim(Total_Sample_Size, sharp_split):
|
|
415 |
RandomPortArrayOut = np.delete(RandomPortArray, np.s_[10:37], axis=1)
|
416 |
RandomPortfolioDF = pd.DataFrame(RandomPortArrayOut, columns = ['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST', 'User/Field', 'Salary', 'Projection', 'Own'])
|
417 |
RandomPortfolioDF = RandomPortfolioDF.sort_values(by=Sim_function, ascending=False)
|
418 |
-
del RandomPortArray
|
419 |
-
del RandomPortArrayOut
|
420 |
-
# st.table(RandomPortfolioDF.head(50))
|
421 |
|
422 |
if insert_port == 1:
|
423 |
CleanPortfolio['Salary'] = sum([CleanPortfolio['QB'].map(maps_dict['Salary_map']),
|
@@ -464,8 +414,6 @@ def get_uncorrelated_portfolio_for_sim(Total_Sample_Size, sharp_split):
|
|
464 |
|
465 |
RandomPortfolio = RandomPortfolio[['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST', 'User/Field', 'Salary', 'Projection', 'Own']]
|
466 |
|
467 |
-
del RandomPortfolioDF
|
468 |
-
|
469 |
return RandomPortfolio, maps_dict
|
470 |
|
471 |
|
@@ -727,12 +675,6 @@ with tab1:
|
|
727 |
static_exposure['Exposure'] = static_exposure['count'] / len(split_portfolio)
|
728 |
static_exposure = static_exposure[['Player', 'Exposure']]
|
729 |
|
730 |
-
del player_salary_dict
|
731 |
-
del player_proj_dict
|
732 |
-
del player_own_dict
|
733 |
-
del player_team_dict
|
734 |
-
del static_col_raw
|
735 |
-
del static_col
|
736 |
with st.container():
|
737 |
col1, col2 = st.columns([3, 3])
|
738 |
|
@@ -770,8 +712,6 @@ with tab1:
|
|
770 |
overall_exposure = overall_exposure.sort_values(by='Exposure', ascending=False)
|
771 |
overall_exposure['Exposure'] = overall_exposure['Exposure'].astype(float).map(lambda n: '{:.2%}'.format(n))
|
772 |
|
773 |
-
del static_exposure
|
774 |
-
|
775 |
with st.container():
|
776 |
col1, col2 = st.columns([1, 6])
|
777 |
|
@@ -788,9 +728,7 @@ with tab1:
|
|
788 |
display_portfolio = split_portfolio[['Lineup', 'QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST', 'Salary', 'Main_Stack', 'Main_Stack_Size', 'Projection', 'Ownership']]
|
789 |
display_portfolio = display_portfolio.set_index('Lineup')
|
790 |
st.dataframe(display_portfolio.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').background_gradient(cmap='RdYlGn_r', subset=['Ownership']).format(precision=2))
|
791 |
-
|
792 |
-
del exposure_col_raw
|
793 |
-
del exposure_col
|
794 |
with tab2:
|
795 |
col1, col2 = st.columns([1, 7])
|
796 |
with col1:
|
@@ -808,7 +746,7 @@ with tab2:
|
|
808 |
site_var1 = st.radio("What site are you working with?", ('Draftkings', 'Fanduel'))
|
809 |
if site_var1 == 'Draftkings':
|
810 |
if slate_var1 == 'User':
|
811 |
-
raw_baselines = proj_dataframe[['Player', 'Salary', 'Position', 'Team', 'Opp', 'Median', 'Own']]
|
812 |
elif slate_var1 != 'User':
|
813 |
raw_baselines = dk_roo_raw[dk_roo_raw['slate'] == str(slate_var1)]
|
814 |
raw_baselines = raw_baselines[raw_baselines['version'] == 'overall']
|
@@ -818,8 +756,7 @@ with tab2:
|
|
818 |
elif slate_var1 != 'User':
|
819 |
raw_baselines = fd_roo_raw[fd_roo_raw['slate'] == str(slate_var1)]
|
820 |
raw_baselines = raw_baselines[raw_baselines['version'] == 'overall']
|
821 |
-
|
822 |
-
del fd_roo_raw
|
823 |
st.info("If you are uploading a portfolio, note that there is an adjustments to projections and deviation mapping to prevent 'Projection Bias' and create a fair simulation")
|
824 |
insert_port1 = st.selectbox("Are you uploading a portfolio?", ('No', 'Yes'), key='insert_port1')
|
825 |
if insert_port1 == 'Yes':
|
@@ -877,51 +814,53 @@ with tab2:
|
|
877 |
Sim_function = 'Own'
|
878 |
|
879 |
if slate_var1 == 'User':
|
880 |
-
|
881 |
-
|
882 |
-
|
883 |
-
|
884 |
-
|
885 |
-
|
886 |
-
|
887 |
-
|
888 |
-
|
889 |
-
|
890 |
-
|
891 |
-
|
892 |
-
|
893 |
-
|
894 |
-
OwnFrame['Own%'] = np.where(OwnFrame['Own%'] > 75, 75, OwnFrame['Own%'])
|
895 |
-
OwnFrame['Own'] = OwnFrame['Own%'] * (900 / OwnFrame['Own%'].sum())
|
896 |
-
Overall_Proj = OwnFrame[['Player', 'Team', 'Position', 'Median', 'Own', 'Salary']]
|
897 |
|
898 |
-
|
899 |
-
|
|
|
|
|
|
|
|
|
|
|
900 |
|
901 |
elif slate_var1 != 'User':
|
902 |
-
|
903 |
-
|
904 |
-
|
905 |
-
|
906 |
-
|
907 |
-
|
908 |
-
|
909 |
-
|
910 |
-
|
911 |
-
|
912 |
-
|
913 |
-
|
914 |
-
|
915 |
-
|
916 |
-
|
917 |
-
OwnFrame['Own%'] = np.where((OwnFrame['Position'] != 'QB') & (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean() >= 0), OwnFrame['Own'] * (1.5 * (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean())/100) + OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean(), OwnFrame['Own%'])
|
918 |
-
OwnFrame['Own%'] = np.where(OwnFrame['Own%'] > 75, 75, OwnFrame['Own%'])
|
919 |
-
OwnFrame['Own'] = OwnFrame['Own%'] * (900 / OwnFrame['Own%'].sum())
|
920 |
-
Overall_Proj = OwnFrame[['Player', 'Team', 'Position', 'Median', 'Own', 'Salary']]
|
921 |
|
922 |
-
|
923 |
-
|
924 |
-
|
|
|
|
|
|
|
|
|
925 |
|
926 |
if insert_port == 1:
|
927 |
UserPortfolio = portfolio_dataframe[['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST']]
|
@@ -945,9 +884,6 @@ with tab2:
|
|
945 |
Teams_used['team_item'] = Teams_used['index'] + 1
|
946 |
Teams_used = Teams_used.drop(columns=['index'])
|
947 |
Teams_used_dictraw = Teams_used.drop(columns=['team_item'])
|
948 |
-
# Teams_used_dict = Teams_used_dictraw.to_dict()
|
949 |
-
|
950 |
-
del Teams_used_dictraw
|
951 |
|
952 |
team_list = Teams_used['Team'].to_list()
|
953 |
item_list = Teams_used['team_item'].to_list()
|
@@ -955,8 +891,6 @@ with tab2:
|
|
955 |
FieldStrength_raw = Strength_var + ((30 - len(Teams_used)) * .01)
|
956 |
FieldStrength = FieldStrength_raw - (FieldStrength_raw * (20000 / Contest_Size))
|
957 |
|
958 |
-
del FieldStrength_raw
|
959 |
-
|
960 |
if FieldStrength < 0:
|
961 |
FieldStrength = Strength_var
|
962 |
field_split = Strength_var
|
@@ -1000,12 +934,6 @@ with tab2:
|
|
1000 |
pos_players = pd.concat([rbs_raw, wrs_raw, tes_raw])
|
1001 |
pos_players.dropna(subset=['Median']).reset_index(drop=True)
|
1002 |
pos_players = pos_players.reset_index(drop=True)
|
1003 |
-
|
1004 |
-
del qbs_raw
|
1005 |
-
del defs_raw
|
1006 |
-
del rbs_raw
|
1007 |
-
del wrs_raw
|
1008 |
-
del tes_raw
|
1009 |
|
1010 |
if insert_port == 1:
|
1011 |
try:
|
@@ -1025,8 +953,6 @@ with tab2:
|
|
1025 |
CleanPortfolio['User/Field'] = CleanPortfolio['index'] + 1
|
1026 |
CleanPortfolio.drop(columns=['index'], inplace=True)
|
1027 |
|
1028 |
-
del positions
|
1029 |
-
|
1030 |
CleanPortfolio.replace('', np.nan, inplace=True)
|
1031 |
CleanPortfolio.dropna(subset=['QB'], inplace=True)
|
1032 |
|
@@ -1041,7 +967,6 @@ with tab2:
|
|
1041 |
nerf_frame = pd.merge(cleaport_players, Overall_Proj, on='Player', how='left')
|
1042 |
for col in ['Median', 'Floor', 'Ceiling', 'STDev']:
|
1043 |
nerf_frame[col] *= 0.90
|
1044 |
-
del Raw_Portfolio
|
1045 |
except:
|
1046 |
CleanPortfolio = UserPortfolio.reset_index()
|
1047 |
CleanPortfolio['User/Field'] = CleanPortfolio['index'] + 1
|
@@ -1100,10 +1025,6 @@ with tab2:
|
|
1100 |
'team_check_map':dict(zip(cleaport_players.Player,nerf_frame.Team))
|
1101 |
}
|
1102 |
|
1103 |
-
del cleaport_players
|
1104 |
-
del Overall_Proj
|
1105 |
-
del nerf_frame
|
1106 |
-
|
1107 |
st.write('Seed frame creation')
|
1108 |
FinalPortfolio, maps_dict = run_seed_frame(seed_depth1, Strength_var, strength_grow, Teams_used, Total_Runs)
|
1109 |
|
@@ -1155,28 +1076,12 @@ with tab2:
|
|
1155 |
best_lineup = final_array[final_array[:, -1].argsort(kind='stable')[::-1][:1]]
|
1156 |
Sim_Winners.append(best_lineup)
|
1157 |
SimVar += 1
|
1158 |
-
|
1159 |
-
del SimVar
|
1160 |
-
del ref_dict, up_dict
|
1161 |
-
del linenum_var1, UserPortfolio
|
1162 |
-
try:
|
1163 |
-
del up_array
|
1164 |
-
except:
|
1165 |
-
pass
|
1166 |
-
del CleanPortfolio
|
1167 |
-
del vec_projection_map
|
1168 |
-
del vec_stdev_map
|
1169 |
-
del sample_arrays
|
1170 |
-
del final_array
|
1171 |
-
del fp_array
|
1172 |
-
del fp_random
|
1173 |
st.write('Contest simulation complete')
|
1174 |
# Initial setup
|
1175 |
Sim_Winner_Frame = pd.DataFrame(np.concatenate(Sim_Winners), columns=FinalPortfolio.columns.tolist() + ['Fantasy'])
|
1176 |
Sim_Winner_Frame['GPP_Proj'] = (Sim_Winner_Frame['Projection'] + Sim_Winner_Frame['Fantasy']) / 2
|
1177 |
|
1178 |
-
del FinalPortfolio
|
1179 |
-
|
1180 |
# Type Casting
|
1181 |
type_cast_dict = {'Salary': int, 'Projection': np.float16, 'Fantasy': np.float16, 'GPP_Proj': np.float16}
|
1182 |
Sim_Winner_Frame = Sim_Winner_Frame.astype(type_cast_dict)
|
@@ -1187,8 +1092,6 @@ with tab2:
|
|
1187 |
# Data Copying
|
1188 |
st.session_state.Sim_Winner_Export = Sim_Winner_Frame.copy()
|
1189 |
|
1190 |
-
del Sim_Winner_Frame
|
1191 |
-
|
1192 |
# Conditional Replacement
|
1193 |
columns_to_replace = ['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST']
|
1194 |
|
@@ -1197,9 +1100,6 @@ with tab2:
|
|
1197 |
elif site_var1 == 'Fanduel':
|
1198 |
replace_dict = fdid_dict
|
1199 |
|
1200 |
-
del dkid_dict
|
1201 |
-
del fdid_dict
|
1202 |
-
|
1203 |
for col in columns_to_replace:
|
1204 |
st.session_state.Sim_Winner_Export[col].replace(replace_dict, inplace=True)
|
1205 |
|
@@ -1216,7 +1116,6 @@ with tab2:
|
|
1216 |
player_freq['Team'] = player_freq['Team'].replace(item_list, team_list)
|
1217 |
|
1218 |
st.session_state.player_freq = player_freq[['Player', 'Position', 'Team', 'Salary', 'Proj Own', 'Exposure', 'Edge']]
|
1219 |
-
del player_freq
|
1220 |
|
1221 |
qb_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Frame.iloc[:,0:1].values, return_counts=True)),
|
1222 |
columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True)
|
@@ -1231,7 +1130,6 @@ with tab2:
|
|
1231 |
qb_freq['Team'] = qb_freq['Team'].replace(item_list, team_list)
|
1232 |
|
1233 |
st.session_state.qb_freq = qb_freq[['Player', 'Team', 'Position', 'Salary', 'Proj Own', 'Exposure', 'Edge']]
|
1234 |
-
del qb_freq
|
1235 |
|
1236 |
rb_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Frame.iloc[:,[1, 2]].values, return_counts=True)),
|
1237 |
columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True)
|
@@ -1246,7 +1144,6 @@ with tab2:
|
|
1246 |
rb_freq['Team'] = rb_freq['Team'].replace(item_list, team_list)
|
1247 |
|
1248 |
st.session_state.rb_freq = rb_freq[['Player', 'Team', 'Position', 'Salary', 'Proj Own', 'Exposure', 'Edge']]
|
1249 |
-
del rb_freq
|
1250 |
|
1251 |
wr_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Frame.iloc[:,[3, 4, 5]].values, return_counts=True)),
|
1252 |
columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True)
|
@@ -1261,7 +1158,6 @@ with tab2:
|
|
1261 |
wr_freq['Team'] = wr_freq['Team'].replace(item_list, team_list)
|
1262 |
|
1263 |
st.session_state.wr_freq = wr_freq[['Player', 'Team', 'Position', 'Salary', 'Proj Own', 'Exposure', 'Edge']]
|
1264 |
-
del wr_freq
|
1265 |
|
1266 |
te_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Frame.iloc[:,[6]].values, return_counts=True)),
|
1267 |
columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True)
|
@@ -1276,7 +1172,6 @@ with tab2:
|
|
1276 |
te_freq['Team'] = te_freq['Team'].replace(item_list, team_list)
|
1277 |
|
1278 |
st.session_state.te_freq = te_freq[['Player', 'Team', 'Position', 'Salary', 'Proj Own', 'Exposure', 'Edge']]
|
1279 |
-
del te_freq
|
1280 |
|
1281 |
flex_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Frame.iloc[:,[7]].values, return_counts=True)),
|
1282 |
columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True)
|
@@ -1291,7 +1186,6 @@ with tab2:
|
|
1291 |
flex_freq['Team'] = flex_freq['Team'].replace(item_list, team_list)
|
1292 |
|
1293 |
st.session_state.flex_freq = flex_freq[['Player', 'Team', 'Position', 'Salary', 'Proj Own', 'Exposure', 'Edge']]
|
1294 |
-
del flex_freq
|
1295 |
|
1296 |
dst_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Frame.iloc[:,8:9].values, return_counts=True)),
|
1297 |
columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True)
|
@@ -1306,12 +1200,6 @@ with tab2:
|
|
1306 |
dst_freq['Team'] = dst_freq['Team'].replace(item_list, team_list)
|
1307 |
|
1308 |
st.session_state.dst_freq = dst_freq[['Player', 'Team', 'Position', 'Salary', 'Proj Own', 'Exposure', 'Edge']]
|
1309 |
-
del dst_freq
|
1310 |
-
|
1311 |
-
del Sim_size
|
1312 |
-
del maps_dict
|
1313 |
-
del team_list
|
1314 |
-
del item_list
|
1315 |
|
1316 |
with st.container():
|
1317 |
simulate_container = st.empty()
|
@@ -1401,4 +1289,11 @@ with tab2:
|
|
1401 |
data=convert_df_to_csv(st.session_state.dst_freq),
|
1402 |
file_name='dst_freq_export.csv',
|
1403 |
mime='text/csv',
|
1404 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
load_display = pd.DataFrame(worksheet.get_all_records())
|
44 |
load_display.replace('', np.nan, inplace=True)
|
45 |
raw_display = load_display.dropna(subset=['Median'])
|
|
|
46 |
|
47 |
return raw_display
|
48 |
|
|
|
53 |
load_display = pd.DataFrame(worksheet.get_all_records())
|
54 |
load_display.replace('', np.nan, inplace=True)
|
55 |
raw_display = load_display.dropna(subset=['Median'])
|
|
|
56 |
|
57 |
return raw_display
|
58 |
|
|
|
70 |
load_display.replace('', np.nan, inplace=True)
|
71 |
raw_display = load_display.dropna(subset=['Median'])
|
72 |
fd_ids = dict(zip(raw_display['Player'], raw_display['player_id']))
|
|
|
|
|
|
|
73 |
|
74 |
return dk_ids, fd_ids
|
75 |
|
|
|
91 |
FinalPortfolio2, maps_dict2 = get_uncorrelated_portfolio_for_sim(Total_Runs_def * .1, sharp_split)
|
92 |
FinalPortfolio = pd.concat([FinalPortfolio, FinalPortfolio2], axis=0)
|
93 |
maps_dict.update(maps_dict2)
|
|
|
|
|
94 |
elif RunsVar > 3 and RunsVar <= 4:
|
95 |
FieldStrength += (strength_grow_def + ((30 - len(Teams_used_def)) * .001))
|
96 |
FinalPortfolio3, maps_dict3 = get_correlated_portfolio_for_sim(Total_Runs_def * .1, sharp_split)
|
|
|
100 |
FinalPortfolio = FinalPortfolio.drop_duplicates(subset = ['Projection', 'Own'],keep = 'last').reset_index(drop = True)
|
101 |
maps_dict.update(maps_dict3)
|
102 |
maps_dict.update(maps_dict4)
|
|
|
|
|
|
|
|
|
103 |
elif RunsVar > 4:
|
104 |
FieldStrength = 1
|
105 |
FinalPortfolio3, maps_dict3 = get_correlated_portfolio_for_sim(Total_Runs_def * .1, sharp_split)
|
|
|
109 |
FinalPortfolio = FinalPortfolio.drop_duplicates(subset = ['Projection', 'Own'],keep = 'last').reset_index(drop = True)
|
110 |
maps_dict.update(maps_dict3)
|
111 |
maps_dict.update(maps_dict4)
|
|
|
|
|
|
|
|
|
112 |
RunsVar += 1
|
113 |
+
|
114 |
return FinalPortfolio, maps_dict
|
115 |
|
116 |
def create_stack_options(player_data, wr_var):
|
|
|
127 |
merged_frame = merged_frame.reset_index()
|
128 |
correl_dict = dict(zip(merged_frame.QB, merged_frame.Player))
|
129 |
|
|
|
|
|
|
|
130 |
return correl_dict
|
131 |
|
132 |
def create_overall_dfs(pos_players, table_name, dict_name, pos):
|
|
|
136 |
overall_table_name = table_name_raw.head(round(len(table_name_raw)))
|
137 |
overall_table_name = overall_table_name.assign(Var = range(0,len(overall_table_name)))
|
138 |
overall_dict_name = pd.Series(overall_table_name.Player.values, index=overall_table_name.Var).to_dict()
|
|
|
|
|
|
|
139 |
elif pos != "FLEX":
|
140 |
table_name_raw = pos_players[pos_players['Position'].str.contains(pos)].reset_index(drop=True)
|
141 |
overall_table_name = table_name_raw.head(round(len(table_name_raw)))
|
142 |
overall_table_name = overall_table_name.assign(Var = range(0,len(overall_table_name)))
|
143 |
overall_dict_name = pd.Series(overall_table_name.Player.values, index=overall_table_name.Var).to_dict()
|
|
|
|
|
|
|
144 |
|
145 |
return overall_table_name, overall_dict_name
|
146 |
|
|
|
164 |
var = round(len(count[0]) * FieldStrength)
|
165 |
var = max(var, min_val)
|
166 |
var += round(field_growth)
|
167 |
+
|
168 |
return min(var, len(count[0]))
|
169 |
|
170 |
def create_random_portfolio(Total_Sample_Size, raw_baselines):
|
|
|
188 |
elif max_var > 16:
|
189 |
ranges_dict['qb_range'] = round(max_var / 2)
|
190 |
ranges_dict['dst_range'] = round(max_var)
|
|
|
|
|
|
|
191 |
|
192 |
# Generate random portfolios
|
193 |
rng = np.random.default_rng()
|
|
|
197 |
all_choices = [rng.choice(ranges_dict[f"{key}_range"], size=(Total_Sample_Size, elem)) for key, elem in zip(keys, total_elements)]
|
198 |
RandomPortfolio = pd.DataFrame(np.hstack(all_choices), columns=['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST'])
|
199 |
RandomPortfolio['User/Field'] = 0
|
|
|
|
|
|
|
|
|
|
|
200 |
|
201 |
return RandomPortfolio, maps_dict, ranges_dict, full_pos_player_dict
|
202 |
|
|
|
222 |
RandomPortfolio = RandomPortfolio[RandomPortfolio['plyr_count'] == 10].drop(columns=['plyr_list','plyr_count']).\
|
223 |
reset_index(drop=True)
|
224 |
|
|
|
|
|
|
|
|
|
|
|
|
|
225 |
RandomPortfolio['QBs'] = RandomPortfolio['QB'].map(maps_dict['Salary_map']).astype(np.int32)
|
226 |
RandomPortfolio['RB1s'] = RandomPortfolio['RB1'].map(maps_dict['Salary_map']).astype(np.int32)
|
227 |
RandomPortfolio['RB2s'] = RandomPortfolio['RB2'].map(maps_dict['Salary_map']).astype(np.int32)
|
|
|
253 |
RandomPortfolio['DSTo'] = RandomPortfolio['DST'].map(maps_dict['Own_map']).astype(np.float16)
|
254 |
|
255 |
RandomPortArray = RandomPortfolio.to_numpy()
|
|
|
256 |
|
257 |
RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,10:19].astype(int))]
|
258 |
RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,19:28].astype(np.double))]
|
|
|
261 |
RandomPortArrayOut = np.delete(RandomPortArray, np.s_[10:37], axis=1)
|
262 |
RandomPortfolioDF = pd.DataFrame(RandomPortArrayOut, columns = ['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST', 'User/Field', 'Salary', 'Projection', 'Own'])
|
263 |
RandomPortfolioDF = RandomPortfolioDF.sort_values(by=Sim_function, ascending=False)
|
|
|
|
|
264 |
|
265 |
if insert_port == 1:
|
266 |
CleanPortfolio['Salary'] = sum([CleanPortfolio['QB'].map(maps_dict['Salary_map']),
|
|
|
305 |
|
306 |
RandomPortfolio = RandomPortfolioDF.sort_values(by=Sim_function, ascending=False)
|
307 |
|
|
|
|
|
308 |
RandomPortfolio = RandomPortfolio[['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST', 'User/Field', 'Salary', 'Projection', 'Own']]
|
309 |
|
310 |
return RandomPortfolio, maps_dict
|
|
|
329 |
RandomPortfolio = RandomPortfolio[RandomPortfolio['plyr_count'] == 10].drop(columns=['plyr_list','plyr_count']).\
|
330 |
reset_index(drop=True)
|
331 |
|
|
|
|
|
|
|
|
|
332 |
RandomPortfolio['QBs'] = RandomPortfolio['QB'].map(maps_dict['Salary_map']).astype(np.int32)
|
333 |
RandomPortfolio['RB1s'] = RandomPortfolio['RB1'].map(maps_dict['Salary_map']).astype(np.int32)
|
334 |
RandomPortfolio['RB2s'] = RandomPortfolio['RB2'].map(maps_dict['Salary_map']).astype(np.int32)
|
|
|
360 |
RandomPortfolio['DSTo'] = RandomPortfolio['DST'].map(maps_dict['Own_map']).astype(np.float16)
|
361 |
|
362 |
RandomPortArray = RandomPortfolio.to_numpy()
|
|
|
363 |
|
364 |
RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,10:19].astype(int))]
|
365 |
RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,19:28].astype(np.double))]
|
|
|
368 |
RandomPortArrayOut = np.delete(RandomPortArray, np.s_[10:37], axis=1)
|
369 |
RandomPortfolioDF = pd.DataFrame(RandomPortArrayOut, columns = ['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST', 'User/Field', 'Salary', 'Projection', 'Own'])
|
370 |
RandomPortfolioDF = RandomPortfolioDF.sort_values(by=Sim_function, ascending=False)
|
|
|
|
|
|
|
371 |
|
372 |
if insert_port == 1:
|
373 |
CleanPortfolio['Salary'] = sum([CleanPortfolio['QB'].map(maps_dict['Salary_map']),
|
|
|
414 |
|
415 |
RandomPortfolio = RandomPortfolio[['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST', 'User/Field', 'Salary', 'Projection', 'Own']]
|
416 |
|
|
|
|
|
417 |
return RandomPortfolio, maps_dict
|
418 |
|
419 |
|
|
|
675 |
static_exposure['Exposure'] = static_exposure['count'] / len(split_portfolio)
|
676 |
static_exposure = static_exposure[['Player', 'Exposure']]
|
677 |
|
|
|
|
|
|
|
|
|
|
|
|
|
678 |
with st.container():
|
679 |
col1, col2 = st.columns([3, 3])
|
680 |
|
|
|
712 |
overall_exposure = overall_exposure.sort_values(by='Exposure', ascending=False)
|
713 |
overall_exposure['Exposure'] = overall_exposure['Exposure'].astype(float).map(lambda n: '{:.2%}'.format(n))
|
714 |
|
|
|
|
|
715 |
with st.container():
|
716 |
col1, col2 = st.columns([1, 6])
|
717 |
|
|
|
728 |
display_portfolio = split_portfolio[['Lineup', 'QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST', 'Salary', 'Main_Stack', 'Main_Stack_Size', 'Projection', 'Ownership']]
|
729 |
display_portfolio = display_portfolio.set_index('Lineup')
|
730 |
st.dataframe(display_portfolio.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').background_gradient(cmap='RdYlGn_r', subset=['Ownership']).format(precision=2))
|
731 |
+
|
|
|
|
|
732 |
with tab2:
|
733 |
col1, col2 = st.columns([1, 7])
|
734 |
with col1:
|
|
|
746 |
site_var1 = st.radio("What site are you working with?", ('Draftkings', 'Fanduel'))
|
747 |
if site_var1 == 'Draftkings':
|
748 |
if slate_var1 == 'User':
|
749 |
+
raw_baselines = proj_dataframe[['Player', 'Salary', 'Position', 'Team', 'Opp', 'Median', 'Own']].copy()
|
750 |
elif slate_var1 != 'User':
|
751 |
raw_baselines = dk_roo_raw[dk_roo_raw['slate'] == str(slate_var1)]
|
752 |
raw_baselines = raw_baselines[raw_baselines['version'] == 'overall']
|
|
|
756 |
elif slate_var1 != 'User':
|
757 |
raw_baselines = fd_roo_raw[fd_roo_raw['slate'] == str(slate_var1)]
|
758 |
raw_baselines = raw_baselines[raw_baselines['version'] == 'overall']
|
759 |
+
|
|
|
760 |
st.info("If you are uploading a portfolio, note that there is an adjustments to projections and deviation mapping to prevent 'Projection Bias' and create a fair simulation")
|
761 |
insert_port1 = st.selectbox("Are you uploading a portfolio?", ('No', 'Yes'), key='insert_port1')
|
762 |
if insert_port1 == 'Yes':
|
|
|
814 |
Sim_function = 'Own'
|
815 |
|
816 |
if slate_var1 == 'User':
|
817 |
+
initial_proj = proj_dataframe[['Player', 'Team', 'Position', 'Median', 'Own', 'Floor', 'Ceiling', 'Salary']].copy()
|
818 |
+
|
819 |
+
# Define the calculation to be applied
|
820 |
+
def calculate_own(position, own, mean_own, factor, max_own=75):
|
821 |
+
return np.where((position == 'QB') & (own - mean_own >= 0),
|
822 |
+
own * (factor * (own - mean_own) / 100) + mean_own,
|
823 |
+
own)
|
824 |
+
|
825 |
+
# Set the factors based on the contest_var1
|
826 |
+
factor_qb, factor_other = {
|
827 |
+
'Small': (10, 5),
|
828 |
+
'Medium': (6, 3),
|
829 |
+
'Large': (3, 1.5),
|
830 |
+
}[contest_var1]
|
|
|
|
|
|
|
831 |
|
832 |
+
# Apply the calculation to the DataFrame
|
833 |
+
initial_proj['Own%'] = initial_proj.apply(lambda row: calculate_own(row['Position'], row['Own'], initial_proj.loc[initial_proj['Position'] == row['Position'], 'Own'].mean(), factor_qb if row['Position'] == 'QB' else factor_other), axis=1)
|
834 |
+
initial_proj['Own%'] = initial_proj['Own%'].clip(upper=75)
|
835 |
+
initial_proj['Own'] = initial_proj['Own%'] * (900 / initial_proj['Own%'].sum())
|
836 |
+
|
837 |
+
# Drop unnecessary columns and create the final DataFrame
|
838 |
+
Overall_Proj = initial_proj[['Player', 'Team', 'Position', 'Median', 'Own', 'Salary']]
|
839 |
|
840 |
elif slate_var1 != 'User':
|
841 |
+
# Copy only the necessary columns
|
842 |
+
initial_proj = raw_baselines[['Player', 'Team', 'Position', 'Median', 'Own', 'Floor', 'Ceiling', 'Salary']].copy()
|
843 |
+
|
844 |
+
# Define the calculation to be applied
|
845 |
+
def calculate_own(position, own, mean_own, factor, max_own=75):
|
846 |
+
return np.where((position == 'QB') & (own - mean_own >= 0),
|
847 |
+
own * (factor * (own - mean_own) / 100) + mean_own,
|
848 |
+
own)
|
849 |
+
|
850 |
+
# Set the factors based on the contest_var1
|
851 |
+
factor_qb, factor_other = {
|
852 |
+
'Small': (10, 5),
|
853 |
+
'Medium': (6, 3),
|
854 |
+
'Large': (3, 1.5),
|
855 |
+
}[contest_var1]
|
|
|
|
|
|
|
|
|
856 |
|
857 |
+
# Apply the calculation to the DataFrame
|
858 |
+
initial_proj['Own%'] = initial_proj.apply(lambda row: calculate_own(row['Position'], row['Own'], initial_proj.loc[initial_proj['Position'] == row['Position'], 'Own'].mean(), factor_qb if row['Position'] == 'QB' else factor_other), axis=1)
|
859 |
+
initial_proj['Own%'] = initial_proj['Own%'].clip(upper=75)
|
860 |
+
initial_proj['Own'] = initial_proj['Own%'] * (900 / initial_proj['Own%'].sum())
|
861 |
+
|
862 |
+
# Drop unnecessary columns and create the final DataFrame
|
863 |
+
Overall_Proj = initial_proj[['Player', 'Team', 'Position', 'Median', 'Own', 'Salary']]
|
864 |
|
865 |
if insert_port == 1:
|
866 |
UserPortfolio = portfolio_dataframe[['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST']]
|
|
|
884 |
Teams_used['team_item'] = Teams_used['index'] + 1
|
885 |
Teams_used = Teams_used.drop(columns=['index'])
|
886 |
Teams_used_dictraw = Teams_used.drop(columns=['team_item'])
|
|
|
|
|
|
|
887 |
|
888 |
team_list = Teams_used['Team'].to_list()
|
889 |
item_list = Teams_used['team_item'].to_list()
|
|
|
891 |
FieldStrength_raw = Strength_var + ((30 - len(Teams_used)) * .01)
|
892 |
FieldStrength = FieldStrength_raw - (FieldStrength_raw * (20000 / Contest_Size))
|
893 |
|
|
|
|
|
894 |
if FieldStrength < 0:
|
895 |
FieldStrength = Strength_var
|
896 |
field_split = Strength_var
|
|
|
934 |
pos_players = pd.concat([rbs_raw, wrs_raw, tes_raw])
|
935 |
pos_players.dropna(subset=['Median']).reset_index(drop=True)
|
936 |
pos_players = pos_players.reset_index(drop=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
937 |
|
938 |
if insert_port == 1:
|
939 |
try:
|
|
|
953 |
CleanPortfolio['User/Field'] = CleanPortfolio['index'] + 1
|
954 |
CleanPortfolio.drop(columns=['index'], inplace=True)
|
955 |
|
|
|
|
|
956 |
CleanPortfolio.replace('', np.nan, inplace=True)
|
957 |
CleanPortfolio.dropna(subset=['QB'], inplace=True)
|
958 |
|
|
|
967 |
nerf_frame = pd.merge(cleaport_players, Overall_Proj, on='Player', how='left')
|
968 |
for col in ['Median', 'Floor', 'Ceiling', 'STDev']:
|
969 |
nerf_frame[col] *= 0.90
|
|
|
970 |
except:
|
971 |
CleanPortfolio = UserPortfolio.reset_index()
|
972 |
CleanPortfolio['User/Field'] = CleanPortfolio['index'] + 1
|
|
|
1025 |
'team_check_map':dict(zip(cleaport_players.Player,nerf_frame.Team))
|
1026 |
}
|
1027 |
|
|
|
|
|
|
|
|
|
1028 |
st.write('Seed frame creation')
|
1029 |
FinalPortfolio, maps_dict = run_seed_frame(seed_depth1, Strength_var, strength_grow, Teams_used, Total_Runs)
|
1030 |
|
|
|
1076 |
best_lineup = final_array[final_array[:, -1].argsort(kind='stable')[::-1][:1]]
|
1077 |
Sim_Winners.append(best_lineup)
|
1078 |
SimVar += 1
|
1079 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1080 |
st.write('Contest simulation complete')
|
1081 |
# Initial setup
|
1082 |
Sim_Winner_Frame = pd.DataFrame(np.concatenate(Sim_Winners), columns=FinalPortfolio.columns.tolist() + ['Fantasy'])
|
1083 |
Sim_Winner_Frame['GPP_Proj'] = (Sim_Winner_Frame['Projection'] + Sim_Winner_Frame['Fantasy']) / 2
|
1084 |
|
|
|
|
|
1085 |
# Type Casting
|
1086 |
type_cast_dict = {'Salary': int, 'Projection': np.float16, 'Fantasy': np.float16, 'GPP_Proj': np.float16}
|
1087 |
Sim_Winner_Frame = Sim_Winner_Frame.astype(type_cast_dict)
|
|
|
1092 |
# Data Copying
|
1093 |
st.session_state.Sim_Winner_Export = Sim_Winner_Frame.copy()
|
1094 |
|
|
|
|
|
1095 |
# Conditional Replacement
|
1096 |
columns_to_replace = ['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST']
|
1097 |
|
|
|
1100 |
elif site_var1 == 'Fanduel':
|
1101 |
replace_dict = fdid_dict
|
1102 |
|
|
|
|
|
|
|
1103 |
for col in columns_to_replace:
|
1104 |
st.session_state.Sim_Winner_Export[col].replace(replace_dict, inplace=True)
|
1105 |
|
|
|
1116 |
player_freq['Team'] = player_freq['Team'].replace(item_list, team_list)
|
1117 |
|
1118 |
st.session_state.player_freq = player_freq[['Player', 'Position', 'Team', 'Salary', 'Proj Own', 'Exposure', 'Edge']]
|
|
|
1119 |
|
1120 |
qb_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Frame.iloc[:,0:1].values, return_counts=True)),
|
1121 |
columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True)
|
|
|
1130 |
qb_freq['Team'] = qb_freq['Team'].replace(item_list, team_list)
|
1131 |
|
1132 |
st.session_state.qb_freq = qb_freq[['Player', 'Team', 'Position', 'Salary', 'Proj Own', 'Exposure', 'Edge']]
|
|
|
1133 |
|
1134 |
rb_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Frame.iloc[:,[1, 2]].values, return_counts=True)),
|
1135 |
columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True)
|
|
|
1144 |
rb_freq['Team'] = rb_freq['Team'].replace(item_list, team_list)
|
1145 |
|
1146 |
st.session_state.rb_freq = rb_freq[['Player', 'Team', 'Position', 'Salary', 'Proj Own', 'Exposure', 'Edge']]
|
|
|
1147 |
|
1148 |
wr_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Frame.iloc[:,[3, 4, 5]].values, return_counts=True)),
|
1149 |
columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True)
|
|
|
1158 |
wr_freq['Team'] = wr_freq['Team'].replace(item_list, team_list)
|
1159 |
|
1160 |
st.session_state.wr_freq = wr_freq[['Player', 'Team', 'Position', 'Salary', 'Proj Own', 'Exposure', 'Edge']]
|
|
|
1161 |
|
1162 |
te_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Frame.iloc[:,[6]].values, return_counts=True)),
|
1163 |
columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True)
|
|
|
1172 |
te_freq['Team'] = te_freq['Team'].replace(item_list, team_list)
|
1173 |
|
1174 |
st.session_state.te_freq = te_freq[['Player', 'Team', 'Position', 'Salary', 'Proj Own', 'Exposure', 'Edge']]
|
|
|
1175 |
|
1176 |
flex_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Frame.iloc[:,[7]].values, return_counts=True)),
|
1177 |
columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True)
|
|
|
1186 |
flex_freq['Team'] = flex_freq['Team'].replace(item_list, team_list)
|
1187 |
|
1188 |
st.session_state.flex_freq = flex_freq[['Player', 'Team', 'Position', 'Salary', 'Proj Own', 'Exposure', 'Edge']]
|
|
|
1189 |
|
1190 |
dst_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Frame.iloc[:,8:9].values, return_counts=True)),
|
1191 |
columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True)
|
|
|
1200 |
dst_freq['Team'] = dst_freq['Team'].replace(item_list, team_list)
|
1201 |
|
1202 |
st.session_state.dst_freq = dst_freq[['Player', 'Team', 'Position', 'Salary', 'Proj Own', 'Exposure', 'Edge']]
|
|
|
|
|
|
|
|
|
|
|
|
|
1203 |
|
1204 |
with st.container():
|
1205 |
simulate_container = st.empty()
|
|
|
1289 |
data=convert_df_to_csv(st.session_state.dst_freq),
|
1290 |
file_name='dst_freq_export.csv',
|
1291 |
mime='text/csv',
|
1292 |
+
)
|
1293 |
+
|
1294 |
+
del gc
|
1295 |
+
del dk_roo_raw, fd_roo_raw
|
1296 |
+
del t_stamp
|
1297 |
+
del dkid_dict, fdid_dict
|
1298 |
+
for key in st.session_state.keys():
|
1299 |
+
del st.session_state[key]
|