diff --git "a/app.py" "b/app.py" --- "a/app.py" +++ "b/app.py" @@ -30,59 +30,23 @@ def init_conn(): "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/gspread-connection%40sheets-api-connect-378620.iam.gserviceaccount.com" } - gc_con = gspread.service_account_from_dict(credentials) - - return gc_con - -gcservice_account = init_conn() - -freq_format = {'Proj Own': '{:.2%}', 'Exposure': '{:.2%}', 'Edge': '{:.2%}'} - -@st.cache_resource(ttl = 300) -def load_dk_player_projections(): - sh = gcservice_account.open_by_url('https://docs.google.com/spreadsheets/d/1I_1Ve3F4tftgfLQQoRKOJ351XfEG48s36OxXUKxmgS8/edit#gid=1391856348') - worksheet = sh.worksheet('DK_ROO') - load_display = pd.DataFrame(worksheet.get_all_records()) - load_display.replace('', np.nan, inplace=True) - raw_display = load_display.dropna(subset=['Median']) + gc = gspread.service_account_from_dict(credentials) + return gc - return raw_display +gcservice_account = init_conn() -@st.cache_resource(ttl = 300) -def load_fd_player_projections(): - sh = gcservice_account.open_by_url('https://docs.google.com/spreadsheets/d/1I_1Ve3F4tftgfLQQoRKOJ351XfEG48s36OxXUKxmgS8/edit#gid=1391856348') - worksheet = sh.worksheet('FD_ROO') - load_display = pd.DataFrame(worksheet.get_all_records()) - load_display.replace('', np.nan, inplace=True) - raw_display = load_display.dropna(subset=['Median']) +game_format = {'Win Percentage': '{:.2%}','First Inning Lead Percentage': '{:.2%}', + 'Fifth Inning Lead Percentage': '{:.2%}', '8+ runs': '{:.2%}', 'DK LevX': '{:.2%}', 'FD LevX': '{:.2%}'} - return raw_display +player_roo_format = {'Top_finish': '{:.2%}','Top_5_finish': '{:.2%}', 'Top_10_finish': '{:.2%}', '20+%': '{:.2%}', '2x%': '{:.2%}', '3x%': '{:.2%}', + '4x%': '{:.2%}','GPP%': '{:.2%}'} -@st.cache_resource(ttl = 300) -def set_export_ids(): - sh = gcservice_account.open_by_url('https://docs.google.com/spreadsheets/d/1I_1Ve3F4tftgfLQQoRKOJ351XfEG48s36OxXUKxmgS8/edit#gid=1391856348') - worksheet = sh.worksheet('DK_ROO') - load_display = pd.DataFrame(worksheet.get_all_records()) - load_display.replace('', np.nan, inplace=True) - raw_display = load_display.dropna(subset=['Median']) - dk_ids = dict(zip(raw_display['Player'], raw_display['player_id'])) - - worksheet = sh.worksheet('FD_ROO') - load_display = pd.DataFrame(worksheet.get_all_records()) - load_display.replace('', np.nan, inplace=True) - raw_display = load_display.dropna(subset=['Median']) - fd_ids = dict(zip(raw_display['Player'], raw_display['player_id'])) - - return dk_ids, fd_ids +freq_format = {'Proj Own': '{:.2%}', 'Exposure': '{:.2%}', 'Edge': '{:.2%}'} -dk_roo_raw = load_dk_player_projections() -fd_roo_raw = load_fd_player_projections() -t_stamp = f"Last Update: " + str(dk_roo_raw['timestamp'][0]) + f" CST" -dkid_dict, fdid_dict = set_export_ids() +@st.cache_data +def convert_df_to_csv(df): + return df.to_csv().encode('utf-8') -static_exposure = pd.DataFrame(columns=['Player', 'count']) -overall_exposure = pd.DataFrame(columns=['Player', 'count']) - def sim_contest(Sim_size, FinalPortfolio, CleanPortfolio, maps_dict, up_dict, insert_port): SimVar = 1 Sim_Winners = [] @@ -134,44 +98,52 @@ def sim_contest(Sim_size, FinalPortfolio, CleanPortfolio, maps_dict, up_dict, in return Sim_Winners -def run_seed_frame(seed_depth1, Strength_var, strength_grow, Teams_used, Total_Runs, field_growth): +def run_seed_frame(seed_depth1, Strength_var, strength_grow, Teams_used, Total_Runs): RunsVar = 1 seed_depth_def = seed_depth1 Strength_var_def = Strength_var strength_grow_def = strength_grow Teams_used_def = Teams_used Total_Runs_def = Total_Runs - - st.write('Creating Seed Frames') - while RunsVar <= seed_depth_def: if RunsVar <= 3: FieldStrength = Strength_var_def - FinalPortfolio, maps_dict = get_correlated_portfolio_for_sim(Total_Runs_def * .25, sharp_split, field_growth) - FinalPortfolio2, maps_dict2 = get_uncorrelated_portfolio_for_sim(Total_Runs_def * .25, sharp_split, field_growth) - FinalPortfolio_init = pd.concat([FinalPortfolio, FinalPortfolio2], axis=0) + RandomPortfolio, maps_dict = get_correlated_portfolio_for_sim(Total_Runs_def * .1) + FinalPortfolio = RandomPortfolio + FinalPortfolio2, maps_dict2 = get_uncorrelated_portfolio_for_sim(Total_Runs_def * .1) + FinalPortfolio = pd.concat([FinalPortfolio, FinalPortfolio2], axis=0) maps_dict.update(maps_dict2) + del FinalPortfolio2 + del maps_dict2 elif RunsVar > 3 and RunsVar <= 4: FieldStrength += (strength_grow_def + ((30 - len(Teams_used_def)) * .001)) - FinalPortfolio3, maps_dict3 = get_correlated_portfolio_for_sim(Total_Runs_def * .25, sharp_split, field_growth) - FinalPortfolio4, maps_dict4 = get_uncorrelated_portfolio_for_sim(Total_Runs_def * .25, sharp_split, field_growth) - FinalPortfolio_merge_3 = pd.concat([FinalPortfolio_init, FinalPortfolio3], axis=0) - FinalPortfolio_merge_4 = pd.concat([FinalPortfolio_merge_3, FinalPortfolio4], axis=0) - FinalPortfolio_step_2 = FinalPortfolio_merge_4.drop_duplicates(subset = ['Projection', 'Own'],keep = 'last').reset_index(drop = True) + FinalPortfolio3, maps_dict3 = get_correlated_portfolio_for_sim(Total_Runs_def * .1) + FinalPortfolio4, maps_dict4 = get_uncorrelated_portfolio_for_sim(Total_Runs_def * .1) + FinalPortfolio = pd.concat([FinalPortfolio, FinalPortfolio3], axis=0) + FinalPortfolio = pd.concat([FinalPortfolio, FinalPortfolio4], axis=0) + FinalPortfolio = FinalPortfolio.drop_duplicates(subset = ['Projection', 'Own'],keep = 'last').reset_index(drop = True) maps_dict.update(maps_dict3) maps_dict.update(maps_dict4) + del FinalPortfolio3 + del maps_dict3 + del FinalPortfolio4 + del maps_dict4 elif RunsVar > 4: FieldStrength = 1 - FinalPortfolio5, maps_dict5 = get_correlated_portfolio_for_sim(Total_Runs_def * .25, sharp_split, field_growth) - FinalPortfolio6, maps_dict6 = get_uncorrelated_portfolio_for_sim(Total_Runs_def * .25, sharp_split, field_growth) - FinalPortfolio_merge_5 = pd.concat([FinalPortfolio_step_2, FinalPortfolio5], axis=0) - FinalPortfolio_merge_6 = pd.concat([FinalPortfolio_merge_5, FinalPortfolio6], axis=0) - FinalPortfolio_export = FinalPortfolio_merge_6.drop_duplicates(subset = ['Projection', 'Own'],keep = 'last').reset_index(drop = True) - maps_dict.update(maps_dict5) - maps_dict.update(maps_dict6) + FinalPortfolio3, maps_dict3 = get_correlated_portfolio_for_sim(Total_Runs_def * .1) + FinalPortfolio4, maps_dict4 = get_uncorrelated_portfolio_for_sim(Total_Runs_def * .1) + FinalPortfolio = pd.concat([FinalPortfolio, FinalPortfolio3], axis=0) + FinalPortfolio = pd.concat([FinalPortfolio, FinalPortfolio4], axis=0) + FinalPortfolio = FinalPortfolio.drop_duplicates(subset = ['Projection', 'Own'],keep = 'last').reset_index(drop = True) + maps_dict.update(maps_dict3) + maps_dict.update(maps_dict4) + del FinalPortfolio3 + del maps_dict3 + del FinalPortfolio4 + del maps_dict4 RunsVar += 1 - - return FinalPortfolio_export, maps_dict + + return FinalPortfolio, maps_dict def create_stack_options(player_data, wr_var): merged_frame = pd.DataFrame(columns = ['QB', 'Player']) @@ -187,86 +159,104 @@ def create_stack_options(player_data, wr_var): merged_frame = merged_frame.reset_index() correl_dict = dict(zip(merged_frame.QB, merged_frame.Player)) + del merged_frame + del data_raw + return correl_dict -def create_overall_dfs(pos_players, table_name, dict_name, pos): - if pos == "FLEX": +def create_overall_dfs(s_pos_players, pos_players, table_name, dict_name, pos): + if pos == "S_FLEX": + table_name_raw = s_pos_players.reset_index(drop=True) + overall_table_name = table_name_raw.head(round(len(table_name_raw))) + overall_table_name = overall_table_name.assign(Var = range(0,len(overall_table_name))) + overall_dict_name = pd.Series(overall_table_name.Player.values, index=overall_table_name.Var).to_dict() + + del pos_players + del table_name_raw + + elif pos == "FLEX": pos_players = pos_players.sort_values(by='Value', ascending=False) table_name_raw = pos_players.reset_index(drop=True) overall_table_name = table_name_raw.head(round(len(table_name_raw))) overall_table_name = overall_table_name.assign(Var = range(0,len(overall_table_name))) overall_dict_name = pd.Series(overall_table_name.Player.values, index=overall_table_name.Var).to_dict() + + del pos_players + del table_name_raw + elif pos != "FLEX": table_name_raw = pos_players[pos_players['Position'].str.contains(pos)].reset_index(drop=True) overall_table_name = table_name_raw.head(round(len(table_name_raw))) overall_table_name = overall_table_name.assign(Var = range(0,len(overall_table_name))) overall_dict_name = pd.Series(overall_table_name.Player.values, index=overall_table_name.Var).to_dict() + + del pos_players + del table_name_raw return overall_table_name, overall_dict_name def get_overall_merged_df(): ref_dict = { - 'pos':['RB', 'WR', 'TE', 'FLEX'], - 'pos_dfs':['RB_Table', 'WR_Table', 'TE_Table', 'FLEX_Table'], - 'pos_dicts':['rb_dict', 'wr_dict', 'te_dict', 'flex_dict'] + 'pos':['RB', 'WR', 'FLEX', 'S_FLEX'], + 'pos_dfs':['RB_Table', 'WR_Table', 'FLEX_Table', 'S_FLEX_Table'], + 'pos_dicts':['rb_dict', 'wr_dict', 'flex_dict', 's_flex_dict'] } for i in range(0,4): ref_dict['pos_dfs'][i], ref_dict['pos_dicts'][i] =\ - create_overall_dfs(pos_players, ref_dict['pos_dfs'][i], ref_dict['pos_dicts'][i], ref_dict['pos'][i]) + create_overall_dfs(s_pos_players, pos_players, ref_dict['pos_dfs'][i], ref_dict['pos_dicts'][i], ref_dict['pos'][i]) df_out = pd.concat(ref_dict['pos_dfs'], ignore_index=True) - return ref_dict + return df_out, ref_dict def calculate_range_var(count, min_val, FieldStrength, field_growth): var = round(len(count[0]) * FieldStrength) var = max(var, min_val) var += round(field_growth) - return min(var, len(count[0])) -def create_random_portfolio(Total_Sample_Size, raw_baselines, field_growth): +def create_random_portfolio(Total_Sample_Size, raw_baselines): - full_pos_player_dict = get_overall_merged_df() - qb_baselines = raw_baselines[raw_baselines['Position'] == 'QB'] - qb_baselines = qb_baselines.drop_duplicates(subset='Team') - max_var = len(qb_baselines[qb_baselines['Position'] == 'QB']) + O_merge, full_pos_player_dict = get_overall_merged_df() + max_var = len(raw_baselines[raw_baselines['Position'] == 'QB']) field_growth_rounded = round(field_growth) ranges_dict = {} # Calculate ranges - for df, dict_val, min_val, key in zip(ref_dict['pos_dfs'], ref_dict['pos_dicts'], [10, 20, 10, 30], ['RB', 'WR', 'TE', 'FLEX']): - count = create_overall_dfs(pos_players, df, dict_val, key) + for df, dict_val, min_val, key in zip(ref_dict['pos_dfs'], ref_dict['pos_dicts'], [10, 20, 30, 30], ['RB', 'WR', 'FLEX', 'S_FLEX']): + count = create_overall_dfs(s_pos_players, pos_players, df, dict_val, key) ranges_dict[f"{key.lower()}_range"] = calculate_range_var(count, min_val, FieldStrength, field_growth_rounded) if max_var <= 10: ranges_dict['qb_range'] = round(max_var) - ranges_dict['dst_range'] = round(max_var) elif max_var > 10 and max_var <= 16: ranges_dict['qb_range'] = round(max_var / 1.5) - ranges_dict['dst_range'] = round(max_var) elif max_var > 16: ranges_dict['qb_range'] = round(max_var / 2) - ranges_dict['dst_range'] = round(max_var) + # Generate unique ranges + # for key, value in ranges_dict.items(): + # ranges_dict[f"{key}_Uniques"] = list(range(0, value, 1)) # Generate random portfolios rng = np.random.default_rng() - total_elements = [1, 2, 3, 1, 1, 1] - keys = ['qb', 'rb', 'wr', 'te', 'flex', 'dst'] + total_elements = [1, 2, 3, 1, 1] + keys = ['qb', 'rb', 'wr', 'flex', 's_flex'] all_choices = [rng.choice(ranges_dict[f"{key}_range"], size=(Total_Sample_Size, elem)) for key, elem in zip(keys, total_elements)] - RandomPortfolio = pd.DataFrame(np.hstack(all_choices), columns=['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST']) + RandomPortfolio = pd.DataFrame(np.hstack(all_choices), columns=['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'FLEX', 'S_FLEX']) RandomPortfolio['User/Field'] = 0 + + del O_merge return RandomPortfolio, maps_dict, ranges_dict, full_pos_player_dict -def get_correlated_portfolio_for_sim(Total_Sample_Size, sharp_split, field_growth): +def get_correlated_portfolio_for_sim(Total_Sample_Size): sizesplit = round(Total_Sample_Size * sharp_split) - RandomPortfolio, maps_dict, ranges_dict, full_pos_player_dict = create_random_portfolio(sizesplit, raw_baselines, field_growth) + RandomPortfolio, maps_dict, ranges_dict, full_pos_player_dict = create_random_portfolio(sizesplit, raw_baselines) stack_num = random.randint(1, 3) stacking_dict = create_stack_options(raw_baselines, stack_num) @@ -276,23 +266,27 @@ def get_correlated_portfolio_for_sim(Total_Sample_Size, sharp_split, field_growt RandomPortfolio['WR1'] = pd.Series(list(RandomPortfolio['QB'].map(stacking_dict)), dtype="string[pyarrow]") RandomPortfolio['WR2'] = pd.Series(list(RandomPortfolio['WR2'].map(full_pos_player_dict['pos_dicts'][1])), dtype="string[pyarrow]") RandomPortfolio['WR3'] = pd.Series(list(RandomPortfolio['WR3'].map(full_pos_player_dict['pos_dicts'][1])), dtype="string[pyarrow]") - RandomPortfolio['TE'] = pd.Series(list(RandomPortfolio['TE'].map(full_pos_player_dict['pos_dicts'][2])), dtype="string[pyarrow]") - RandomPortfolio['FLEX'] = pd.Series(list(RandomPortfolio['FLEX'].map(full_pos_player_dict['pos_dicts'][3])), dtype="string[pyarrow]") - RandomPortfolio['DST'] = pd.Series(list(RandomPortfolio['DST'].map(def_dict)), dtype="string[pyarrow]") + RandomPortfolio['FLEX'] = pd.Series(list(RandomPortfolio['FLEX'].map(full_pos_player_dict['pos_dicts'][2])), dtype="string[pyarrow]") + RandomPortfolio['S_FLEX'] = pd.Series(list(RandomPortfolio['S_FLEX'].map(full_pos_player_dict['pos_dicts'][3])), dtype="string[pyarrow]") RandomPortfolio['plyr_list'] = RandomPortfolio[RandomPortfolio.columns.values.tolist()].values.tolist() RandomPortfolio['plyr_count'] = RandomPortfolio['plyr_list'].apply(lambda x: len(set(x))) - RandomPortfolio = RandomPortfolio[RandomPortfolio['plyr_count'] == 10].drop(columns=['plyr_list','plyr_count']).\ + RandomPortfolio = RandomPortfolio[RandomPortfolio['plyr_count'] == 9].drop(columns=['plyr_list','plyr_count']).\ reset_index(drop=True) + del sizesplit + del full_pos_player_dict + del ranges_dict + del stack_num + del stacking_dict + RandomPortfolio['QBs'] = RandomPortfolio['QB'].map(maps_dict['Salary_map']).astype(np.int32) RandomPortfolio['RB1s'] = RandomPortfolio['RB1'].map(maps_dict['Salary_map']).astype(np.int32) RandomPortfolio['RB2s'] = RandomPortfolio['RB2'].map(maps_dict['Salary_map']).astype(np.int32) RandomPortfolio['WR1s'] = RandomPortfolio['WR1'].map(maps_dict['Salary_map']).astype(np.int32) RandomPortfolio['WR2s'] = RandomPortfolio['WR2'].map(maps_dict['Salary_map']).astype(np.int32) RandomPortfolio['WR3s'] = RandomPortfolio['WR3'].map(maps_dict['Salary_map']).astype(np.int32) - RandomPortfolio['TEs'] = RandomPortfolio['TE'].map(maps_dict['Salary_map']).astype(np.int32) RandomPortfolio['FLEXs'] = RandomPortfolio['FLEX'].map(maps_dict['Salary_map']).astype(np.int32) - RandomPortfolio['DSTs'] = RandomPortfolio['DST'].map(maps_dict['Salary_map']).astype(np.int32) + RandomPortfolio['S_FLEXs'] = RandomPortfolio['S_FLEX'].map(maps_dict['Salary_map']).astype(np.int32) RandomPortfolio['QBp'] = RandomPortfolio['QB'].map(maps_dict['Projection_map']).astype(np.float16) RandomPortfolio['RB1p'] = RandomPortfolio['RB1'].map(maps_dict['Projection_map']).astype(np.float16) @@ -300,9 +294,8 @@ def get_correlated_portfolio_for_sim(Total_Sample_Size, sharp_split, field_growt RandomPortfolio['WR1p'] = RandomPortfolio['WR1'].map(maps_dict['Projection_map']).astype(np.float16) RandomPortfolio['WR2p'] = RandomPortfolio['WR2'].map(maps_dict['Projection_map']).astype(np.float16) RandomPortfolio['WR3p'] = RandomPortfolio['WR3'].map(maps_dict['Projection_map']).astype(np.float16) - RandomPortfolio['TEp'] = RandomPortfolio['TE'].map(maps_dict['Projection_map']).astype(np.float16) RandomPortfolio['FLEXp'] = RandomPortfolio['FLEX'].map(maps_dict['Projection_map']).astype(np.float16) - RandomPortfolio['DSTp'] = RandomPortfolio['DST'].map(maps_dict['Projection_map']).astype(np.float16) + RandomPortfolio['S_FLEXp'] = RandomPortfolio['S_FLEX'].map(maps_dict['Projection_map']).astype(np.float16) RandomPortfolio['QBo'] = RandomPortfolio['QB'].map(maps_dict['Own_map']).astype(np.float16) RandomPortfolio['RB1o'] = RandomPortfolio['RB1'].map(maps_dict['Own_map']).astype(np.float16) @@ -310,19 +303,22 @@ def get_correlated_portfolio_for_sim(Total_Sample_Size, sharp_split, field_growt RandomPortfolio['WR1o'] = RandomPortfolio['WR1'].map(maps_dict['Own_map']).astype(np.float16) RandomPortfolio['WR2o'] = RandomPortfolio['WR2'].map(maps_dict['Own_map']).astype(np.float16) RandomPortfolio['WR3o'] = RandomPortfolio['WR3'].map(maps_dict['Own_map']).astype(np.float16) - RandomPortfolio['TEo'] = RandomPortfolio['TE'].map(maps_dict['Own_map']).astype(np.float16) RandomPortfolio['FLEXo'] = RandomPortfolio['FLEX'].map(maps_dict['Own_map']).astype(np.float16) - RandomPortfolio['DSTo'] = RandomPortfolio['DST'].map(maps_dict['Own_map']).astype(np.float16) + RandomPortfolio['S_FLEXo'] = RandomPortfolio['S_FLEX'].map(maps_dict['Own_map']).astype(np.float16) RandomPortArray = RandomPortfolio.to_numpy() + del RandomPortfolio - RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,10:19].astype(int))] - RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,19:28].astype(np.double))] - RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,28:37].astype(np.double))] - - RandomPortArrayOut = np.delete(RandomPortArray, np.s_[10:37], axis=1) - RandomPortfolioDF = pd.DataFrame(RandomPortArrayOut, columns = ['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST', 'User/Field', 'Salary', 'Projection', 'Own']) + RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,9:17].astype(int))] + RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,17:25].astype(np.double))] + RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,25:33].astype(np.double))] + # st.write(RandomPortArray[:,:100]) + RandomPortArrayOut = np.delete(RandomPortArray, np.s_[9:33], axis=1) + # st.write(RandomPortArrayOut[:,:100]) + RandomPortfolioDF = pd.DataFrame(RandomPortArrayOut, columns = ['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'FLEX', 'S_FLEX', 'User/Field', 'Salary', 'Projection', 'Own']) RandomPortfolioDF = RandomPortfolioDF.sort_values(by=Sim_function, ascending=False) + del RandomPortArray + del RandomPortArrayOut if insert_port == 1: CleanPortfolio['Salary'] = sum([CleanPortfolio['QB'].map(maps_dict['Salary_map']), @@ -331,9 +327,8 @@ def get_correlated_portfolio_for_sim(Total_Sample_Size, sharp_split, field_growt CleanPortfolio['WR1'].map(maps_dict['Salary_map']), CleanPortfolio['WR2'].map(maps_dict['Salary_map']), CleanPortfolio['WR3'].map(maps_dict['Salary_map']), - CleanPortfolio['TE'].map(maps_dict['Salary_map']), CleanPortfolio['FLEX'].map(maps_dict['Salary_map']), - CleanPortfolio['DST'].map(maps_dict['Salary_map']) + CleanPortfolio['S_FLEX'].map(maps_dict['Salary_map']) ]).astype(np.int16) if insert_port == 1: CleanPortfolio['Projection'] = sum([CleanPortfolio['QB'].map(up_dict['Projection_map']), @@ -342,9 +337,8 @@ def get_correlated_portfolio_for_sim(Total_Sample_Size, sharp_split, field_growt CleanPortfolio['WR1'].map(up_dict['Projection_map']), CleanPortfolio['WR2'].map(up_dict['Projection_map']), CleanPortfolio['WR3'].map(up_dict['Projection_map']), - CleanPortfolio['TE'].map(up_dict['Projection_map']), CleanPortfolio['FLEX'].map(up_dict['Projection_map']), - CleanPortfolio['DST'].map(up_dict['Projection_map']) + CleanPortfolio['S_FLEX'].map(up_dict['Projection_map']) ]).astype(np.float16) if insert_port == 1: CleanPortfolio['Own'] = sum([CleanPortfolio['QB'].map(maps_dict['Own_map']), @@ -353,29 +347,25 @@ def get_correlated_portfolio_for_sim(Total_Sample_Size, sharp_split, field_growt CleanPortfolio['WR1'].map(maps_dict['Own_map']), CleanPortfolio['WR2'].map(maps_dict['Own_map']), CleanPortfolio['WR3'].map(maps_dict['Own_map']), - CleanPortfolio['TE'].map(maps_dict['Own_map']), CleanPortfolio['FLEX'].map(maps_dict['Own_map']), - CleanPortfolio['DST'].map(maps_dict['Own_map']) + CleanPortfolio['S_FLEX'].map(maps_dict['Own_map']) ]).astype(np.float16) if site_var1 == 'Draftkings': RandomPortfolioDF = RandomPortfolioDF[RandomPortfolioDF['Salary'] <= 50000].reset_index(drop=True) RandomPortfolioDF = RandomPortfolioDF[RandomPortfolioDF['Salary'] >= (49500 - (5000 * (1 - (len(Teams_used) / 32)))) - (FieldStrength * 1000)].reset_index(drop=True) - elif site_var1 == 'Fanduel': - RandomPortfolioDF = RandomPortfolioDF[RandomPortfolioDF['Salary'] <= 60000].reset_index(drop=True) - RandomPortfolioDF = RandomPortfolioDF[RandomPortfolioDF['Salary'] >= (59500 - (5000 * (1 - (len(Teams_used) / 32)))) - (FieldStrength * 1000)].reset_index(drop=True) RandomPortfolio = RandomPortfolioDF.sort_values(by=Sim_function, ascending=False) - RandomPortfolio = RandomPortfolio[['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST', 'User/Field', 'Salary', 'Projection', 'Own']] + RandomPortfolio = RandomPortfolio[['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'FLEX', 'S_FLEX', 'User/Field', 'Salary', 'Projection', 'Own']] return RandomPortfolio, maps_dict -def get_uncorrelated_portfolio_for_sim(Total_Sample_Size, sharp_split, field_growth): +def get_uncorrelated_portfolio_for_sim(Total_Sample_Size): sizesplit = round(Total_Sample_Size * (1-sharp_split)) - RandomPortfolio, maps_dict, ranges_dict, full_pos_player_dict = create_random_portfolio(sizesplit, raw_baselines, field_growth) + RandomPortfolio, maps_dict, ranges_dict, full_pos_player_dict = create_random_portfolio(sizesplit, raw_baselines) RandomPortfolio['QB'] = pd.Series(list(RandomPortfolio['QB'].map(qb_dict)), dtype="string[pyarrow]") RandomPortfolio['RB1'] = pd.Series(list(RandomPortfolio['RB1'].map(full_pos_player_dict['pos_dicts'][0])), dtype="string[pyarrow]") @@ -383,23 +373,25 @@ def get_uncorrelated_portfolio_for_sim(Total_Sample_Size, sharp_split, field_gro RandomPortfolio['WR1'] = pd.Series(list(RandomPortfolio['WR1'].map(full_pos_player_dict['pos_dicts'][1])), dtype="string[pyarrow]") RandomPortfolio['WR2'] = pd.Series(list(RandomPortfolio['WR2'].map(full_pos_player_dict['pos_dicts'][1])), dtype="string[pyarrow]") RandomPortfolio['WR3'] = pd.Series(list(RandomPortfolio['WR3'].map(full_pos_player_dict['pos_dicts'][1])), dtype="string[pyarrow]") - RandomPortfolio['TE'] = pd.Series(list(RandomPortfolio['TE'].map(full_pos_player_dict['pos_dicts'][2])), dtype="string[pyarrow]") - RandomPortfolio['FLEX'] = pd.Series(list(RandomPortfolio['FLEX'].map(full_pos_player_dict['pos_dicts'][3])), dtype="string[pyarrow]") - RandomPortfolio['DST'] = pd.Series(list(RandomPortfolio['DST'].map(def_dict)), dtype="string[pyarrow]") + RandomPortfolio['FLEX'] = pd.Series(list(RandomPortfolio['FLEX'].map(full_pos_player_dict['pos_dicts'][2])), dtype="string[pyarrow]") + RandomPortfolio['S_FLEX'] = pd.Series(list(RandomPortfolio['S_FLEX'].map(full_pos_player_dict['pos_dicts'][3])), dtype="string[pyarrow]") RandomPortfolio['plyr_list'] = RandomPortfolio[RandomPortfolio.columns.values.tolist()].values.tolist() RandomPortfolio['plyr_count'] = RandomPortfolio['plyr_list'].apply(lambda x: len(set(x))) - RandomPortfolio = RandomPortfolio[RandomPortfolio['plyr_count'] == 10].drop(columns=['plyr_list','plyr_count']).\ + RandomPortfolio = RandomPortfolio[RandomPortfolio['plyr_count'] == 9].drop(columns=['plyr_list','plyr_count']).\ reset_index(drop=True) + del sizesplit + del full_pos_player_dict + del ranges_dict + RandomPortfolio['QBs'] = RandomPortfolio['QB'].map(maps_dict['Salary_map']).astype(np.int32) RandomPortfolio['RB1s'] = RandomPortfolio['RB1'].map(maps_dict['Salary_map']).astype(np.int32) RandomPortfolio['RB2s'] = RandomPortfolio['RB2'].map(maps_dict['Salary_map']).astype(np.int32) RandomPortfolio['WR1s'] = RandomPortfolio['WR1'].map(maps_dict['Salary_map']).astype(np.int32) RandomPortfolio['WR2s'] = RandomPortfolio['WR2'].map(maps_dict['Salary_map']).astype(np.int32) RandomPortfolio['WR3s'] = RandomPortfolio['WR3'].map(maps_dict['Salary_map']).astype(np.int32) - RandomPortfolio['TEs'] = RandomPortfolio['TE'].map(maps_dict['Salary_map']).astype(np.int32) RandomPortfolio['FLEXs'] = RandomPortfolio['FLEX'].map(maps_dict['Salary_map']).astype(np.int32) - RandomPortfolio['DSTs'] = RandomPortfolio['DST'].map(maps_dict['Salary_map']).astype(np.int32) + RandomPortfolio['S_FLEXs'] = RandomPortfolio['S_FLEX'].map(maps_dict['Salary_map']).astype(np.int32) RandomPortfolio['QBp'] = RandomPortfolio['QB'].map(maps_dict['Projection_map']).astype(np.float16) RandomPortfolio['RB1p'] = RandomPortfolio['RB1'].map(maps_dict['Projection_map']).astype(np.float16) @@ -407,9 +399,8 @@ def get_uncorrelated_portfolio_for_sim(Total_Sample_Size, sharp_split, field_gro RandomPortfolio['WR1p'] = RandomPortfolio['WR1'].map(maps_dict['Projection_map']).astype(np.float16) RandomPortfolio['WR2p'] = RandomPortfolio['WR2'].map(maps_dict['Projection_map']).astype(np.float16) RandomPortfolio['WR3p'] = RandomPortfolio['WR3'].map(maps_dict['Projection_map']).astype(np.float16) - RandomPortfolio['TEp'] = RandomPortfolio['TE'].map(maps_dict['Projection_map']).astype(np.float16) RandomPortfolio['FLEXp'] = RandomPortfolio['FLEX'].map(maps_dict['Projection_map']).astype(np.float16) - RandomPortfolio['DSTp'] = RandomPortfolio['DST'].map(maps_dict['Projection_map']).astype(np.float16) + RandomPortfolio['S_FLEXp'] = RandomPortfolio['S_FLEX'].map(maps_dict['Projection_map']).astype(np.float16) RandomPortfolio['QBo'] = RandomPortfolio['QB'].map(maps_dict['Own_map']).astype(np.float16) RandomPortfolio['RB1o'] = RandomPortfolio['RB1'].map(maps_dict['Own_map']).astype(np.float16) @@ -417,20 +408,23 @@ def get_uncorrelated_portfolio_for_sim(Total_Sample_Size, sharp_split, field_gro RandomPortfolio['WR1o'] = RandomPortfolio['WR1'].map(maps_dict['Own_map']).astype(np.float16) RandomPortfolio['WR2o'] = RandomPortfolio['WR2'].map(maps_dict['Own_map']).astype(np.float16) RandomPortfolio['WR3o'] = RandomPortfolio['WR3'].map(maps_dict['Own_map']).astype(np.float16) - RandomPortfolio['TEo'] = RandomPortfolio['TE'].map(maps_dict['Own_map']).astype(np.float16) RandomPortfolio['FLEXo'] = RandomPortfolio['FLEX'].map(maps_dict['Own_map']).astype(np.float16) - RandomPortfolio['DSTo'] = RandomPortfolio['DST'].map(maps_dict['Own_map']).astype(np.float16) + RandomPortfolio['S_FLEXo'] = RandomPortfolio['S_FLEX'].map(maps_dict['Own_map']).astype(np.float16) RandomPortArray = RandomPortfolio.to_numpy() + del RandomPortfolio - RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,10:19].astype(int))] - RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,19:28].astype(np.double))] - RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,28:37].astype(np.double))] - - RandomPortArrayOut = np.delete(RandomPortArray, np.s_[10:37], axis=1) - RandomPortfolioDF = pd.DataFrame(RandomPortArrayOut, columns = ['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST', 'User/Field', 'Salary', 'Projection', 'Own']) + RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,9:17].astype(int))] + RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,17:25].astype(np.double))] + RandomPortArray = np.c_[RandomPortArray, np.einsum('ij->i',RandomPortArray[:,25:33].astype(np.double))] + # st.write(RandomPortArray[:,:100]) + RandomPortArrayOut = np.delete(RandomPortArray, np.s_[9:33], axis=1) + # st.write(RandomPortArrayOut[:,:100]) + RandomPortfolioDF = pd.DataFrame(RandomPortArrayOut, columns = ['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'FLEX', 'S_FLEX', 'User/Field', 'Salary', 'Projection', 'Own']) RandomPortfolioDF = RandomPortfolioDF.sort_values(by=Sim_function, ascending=False) - + del RandomPortArray + del RandomPortArrayOut + if insert_port == 1: CleanPortfolio['Salary'] = sum([CleanPortfolio['QB'].map(maps_dict['Salary_map']), CleanPortfolio['RB1'].map(maps_dict['Salary_map']), @@ -438,9 +432,8 @@ def get_uncorrelated_portfolio_for_sim(Total_Sample_Size, sharp_split, field_gro CleanPortfolio['WR1'].map(maps_dict['Salary_map']), CleanPortfolio['WR2'].map(maps_dict['Salary_map']), CleanPortfolio['WR3'].map(maps_dict['Salary_map']), - CleanPortfolio['TE'].map(maps_dict['Salary_map']), CleanPortfolio['FLEX'].map(maps_dict['Salary_map']), - CleanPortfolio['DST'].map(maps_dict['Salary_map']) + CleanPortfolio['S_FLEX'].map(maps_dict['Salary_map']) ]).astype(np.int16) if insert_port == 1: CleanPortfolio['Projection'] = sum([CleanPortfolio['QB'].map(up_dict['Projection_map']), @@ -449,9 +442,8 @@ def get_uncorrelated_portfolio_for_sim(Total_Sample_Size, sharp_split, field_gro CleanPortfolio['WR1'].map(up_dict['Projection_map']), CleanPortfolio['WR2'].map(up_dict['Projection_map']), CleanPortfolio['WR3'].map(up_dict['Projection_map']), - CleanPortfolio['TE'].map(up_dict['Projection_map']), CleanPortfolio['FLEX'].map(up_dict['Projection_map']), - CleanPortfolio['DST'].map(up_dict['Projection_map']) + CleanPortfolio['S_FLEX'].map(up_dict['Projection_map']) ]).astype(np.float16) if insert_port == 1: CleanPortfolio['Own'] = sum([CleanPortfolio['QB'].map(maps_dict['Own_map']), @@ -460,28 +452,28 @@ def get_uncorrelated_portfolio_for_sim(Total_Sample_Size, sharp_split, field_gro CleanPortfolio['WR1'].map(maps_dict['Own_map']), CleanPortfolio['WR2'].map(maps_dict['Own_map']), CleanPortfolio['WR3'].map(maps_dict['Own_map']), - CleanPortfolio['TE'].map(maps_dict['Own_map']), CleanPortfolio['FLEX'].map(maps_dict['Own_map']), - CleanPortfolio['DST'].map(maps_dict['Own_map']) + CleanPortfolio['S_FLEX'].map(maps_dict['Own_map']) ]).astype(np.float16) if site_var1 == 'Draftkings': RandomPortfolioDF = RandomPortfolioDF[RandomPortfolioDF['Salary'] <= 50000].reset_index(drop=True) RandomPortfolioDF = RandomPortfolioDF[RandomPortfolioDF['Salary'] >= (49500 - (5000 * (1 - (len(Teams_used) / 32)))) - (FieldStrength * 1000)].reset_index(drop=True) - elif site_var1 == 'Fanduel': - RandomPortfolioDF = RandomPortfolioDF[RandomPortfolioDF['Salary'] <= 60000].reset_index(drop=True) - RandomPortfolioDF = RandomPortfolioDF[RandomPortfolioDF['Salary'] >= (59500 - (5000 * (1 - (len(Teams_used) / 32)))) - (FieldStrength * 1000)].reset_index(drop=True) RandomPortfolio = RandomPortfolioDF.sort_values(by=Sim_function, ascending=False) - RandomPortfolio = RandomPortfolio[['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST', 'User/Field', 'Salary', 'Projection', 'Own']] + RandomPortfolio = RandomPortfolio[['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'FLEX', 'S_FLEX', 'User/Field', 'Salary', 'Projection', 'Own']] return RandomPortfolio, maps_dict +static_exposure = pd.DataFrame(columns=['Player', 'count']) +overall_exposure = pd.DataFrame(columns=['Player', 'count']) + tab1, tab2 = st.tabs(['Uploads', 'Contest Sim']) with tab1: - with st.container(): + with st.container(): + st.warning("Unlike many of the other sports, Paydirt does not have NCAA projections. In order to use this tool, you'll need to upload from another source. You don't need to upload a portfolio, but you do need to upload projections!", icon="⚠️") col1, col2 = st.columns([3, 3]) with col1: @@ -510,9 +502,10 @@ with tab1: player_salary_dict = dict(zip(proj_dataframe.Player, proj_dataframe.Salary)) player_proj_dict = dict(zip(proj_dataframe.Player, proj_dataframe.Median)) player_own_dict = dict(zip(proj_dataframe.Player, proj_dataframe.Own)) + player_team_dict = dict(zip(proj_dataframe.Player, proj_dataframe.Team)) with col2: - st.info("The Portfolio file must contain only columns in order and explicitly named: 'QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', and 'DST'. Upload your projections first to avoid an error message.") + st.info("The Portfolio file must contain only columns in order and explicitly named: 'QB', 'RB1', RB2, 'WR1', 'WR2', 'WR3', 'FLEX', and 'S_FLEX'. Upload your projections first to avoid an error message.") portfolio_file = st.file_uploader("Upload Portfolio File", key = 'portfolio_uploader') if portfolio_file is not None: @@ -524,7 +517,7 @@ with tab1: try: try: - portfolio_dataframe.columns=["QB", "RB1", "RB2", "WR1", "WR2", "WR3", "TE", "FLEX", "DST"] + portfolio_dataframe.columns=["QB", "RB1", "RB2", "WR1", "WR2", "WR3", "FLEX", "S_FLEX"] split_portfolio = portfolio_dataframe split_portfolio[['QB', 'QB_ID']] = split_portfolio.QB.str.split("(", n=1, expand = True) split_portfolio[['RB1', 'RB1_ID']] = split_portfolio.RB1.str.split("(", n=1, expand = True) @@ -532,9 +525,8 @@ with tab1: split_portfolio[['WR1', 'WR1_ID']] = split_portfolio.WR1.str.split("(", n=1, expand = True) split_portfolio[['WR2', 'WR2_ID']] = split_portfolio.WR2.str.split("(", n=1, expand = True) split_portfolio[['WR3', 'WR3_ID']] = split_portfolio.WR3.str.split("(", n=1, expand = True) - split_portfolio[['TE', 'TE_ID']] = split_portfolio.TE.str.split("(", n=1, expand = True) split_portfolio[['FLEX', 'FLEX_ID']] = split_portfolio.FLEX.str.split("(", n=1, expand = True) - split_portfolio[['DST', 'DST_ID']] = split_portfolio.DST.str.split("(", n=1, expand = True) + split_portfolio[['S_FLEX', 'S_FLEX_ID']] = split_portfolio.S_FLEX.str.split("(", n=1, expand = True) split_portfolio['QB'] = split_portfolio['QB'].str.strip() split_portfolio['RB1'] = split_portfolio['RB1'].str.strip() @@ -542,9 +534,8 @@ with tab1: split_portfolio['WR1'] = split_portfolio['WR1'].str.strip() split_portfolio['WR2'] = split_portfolio['WR2'].str.strip() split_portfolio['WR3'] = split_portfolio['WR3'].str.strip() - split_portfolio['TE'] = split_portfolio['TE'].str.strip() split_portfolio['FLEX'] = split_portfolio['FLEX'].str.strip() - split_portfolio['DST'] = split_portfolio['DST'].str.strip() + split_portfolio['S_FLEX'] = split_portfolio['S_FLEX'].str.strip() st.table(split_portfolio.head(10)) @@ -554,9 +545,8 @@ with tab1: split_portfolio['WR1'].map(player_salary_dict), split_portfolio['WR2'].map(player_salary_dict), split_portfolio['WR3'].map(player_salary_dict), - split_portfolio['TE'].map(player_salary_dict), split_portfolio['FLEX'].map(player_salary_dict), - split_portfolio['DST'].map(player_salary_dict)]) + split_portfolio['S_FLEX'].map(player_salary_dict)]) split_portfolio['Projection'] = sum([split_portfolio['QB'].map(player_proj_dict), split_portfolio['RB1'].map(player_proj_dict), @@ -564,9 +554,8 @@ with tab1: split_portfolio['WR1'].map(player_proj_dict), split_portfolio['WR2'].map(player_proj_dict), split_portfolio['WR3'].map(player_proj_dict), - split_portfolio['TE'].map(player_proj_dict), split_portfolio['FLEX'].map(player_proj_dict), - split_portfolio['DST'].map(player_proj_dict)]) + split_portfolio['S_FLEX'].map(player_proj_dict)]) split_portfolio['Ownership'] = sum([split_portfolio['QB'].map(player_own_dict), split_portfolio['RB1'].map(player_own_dict), @@ -574,13 +563,24 @@ with tab1: split_portfolio['WR1'].map(player_own_dict), split_portfolio['WR2'].map(player_own_dict), split_portfolio['WR3'].map(player_own_dict), - split_portfolio['TE'].map(player_own_dict), split_portfolio['FLEX'].map(player_own_dict), - split_portfolio['DST'].map(player_own_dict)]) + split_portfolio['S_FLEX'].map(player_own_dict)]) + + split_portfolio['QB_team'] = split_portfolio['QB'].map(player_team_dict) + split_portfolio['RB1_team'] = split_portfolio['RB1'].map(player_team_dict) + split_portfolio['RB2_team'] = split_portfolio['RB2'].map(player_team_dict) + split_portfolio['WR1_team'] = split_portfolio['WR1'].map(player_team_dict) + split_portfolio['WR2_team'] = split_portfolio['WR2'].map(player_team_dict) + split_portfolio['WR3_team'] = split_portfolio['WR3'].map(player_team_dict) + split_portfolio['FLEX_team'] = split_portfolio['FLEX'].map(player_team_dict) + split_portfolio['S_FLEX_team'] = split_portfolio['S_FLEX'].map(player_team_dict) + + split_portfolio = split_portfolio[['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'FLEX', 'S_FLEX', 'Salary', 'Projection', 'Ownership', 'QB_team', + 'RB1_team', 'RB2_team', 'WR1_team', 'WR2_team', 'W32_team', 'FLEX_team', 'S_FLEX_team']] except: - portfolio_dataframe.columns=["QB", "RB1", "RB2", "WR1", "WR2", "WR3", "TE", "FLEX", "DST"] + portfolio_dataframe.columns=["QB", "RB1", "RB2", "WR1", "WR2", "WR3", "FLEX", "S_FLEX"] split_portfolio = portfolio_dataframe split_portfolio[['QB_ID', 'QB']] = split_portfolio.QB.str.split(":", n=1, expand = True) @@ -589,9 +589,8 @@ with tab1: split_portfolio[['WR1_ID', 'WR1']] = split_portfolio.WR1.str.split(":", n=1, expand = True) split_portfolio[['WR2_ID', 'WR2']] = split_portfolio.WR2.str.split(":", n=1, expand = True) split_portfolio[['WR3_ID', 'WR3']] = split_portfolio.WR3.str.split(":", n=1, expand = True) - split_portfolio[['TE_ID', 'TE']] = split_portfolio.TE.str.split(":", n=1, expand = True) split_portfolio[['FLEX_ID', 'FLEX']] = split_portfolio.FLEX.str.split(":", n=1, expand = True) - split_portfolio[['DST_ID', 'DST']] = split_portfolio.DST.str.split(":", n=1, expand = True) + split_portfolio[['S_FLEX_ID', 'S_FLEX']] = split_portfolio.S_FLEX.str.split(":", n=1, expand = True) split_portfolio['QB'] = split_portfolio['QB'].str.strip() split_portfolio['RB1'] = split_portfolio['RB1'].str.strip() @@ -599,9 +598,8 @@ with tab1: split_portfolio['WR1'] = split_portfolio['WR1'].str.strip() split_portfolio['WR2'] = split_portfolio['WR2'].str.strip() split_portfolio['WR3'] = split_portfolio['WR3'].str.strip() - split_portfolio['TE'] = split_portfolio['TE'].str.strip() split_portfolio['FLEX'] = split_portfolio['FLEX'].str.strip() - split_portfolio['DST'] = split_portfolio['DST'].str.strip() + split_portfolio['S_FLEX'] = split_portfolio['S_FLEX'].str.strip() split_portfolio['Salary'] = sum([split_portfolio['QB'].map(player_salary_dict), split_portfolio['RB1'].map(player_salary_dict), @@ -609,9 +607,8 @@ with tab1: split_portfolio['WR1'].map(player_salary_dict), split_portfolio['WR2'].map(player_salary_dict), split_portfolio['WR3'].map(player_salary_dict), - split_portfolio['TE'].map(player_salary_dict), split_portfolio['FLEX'].map(player_salary_dict), - split_portfolio['DST'].map(player_salary_dict)]) + split_portfolio['S_FLEX'].map(player_salary_dict)]) split_portfolio['Projection'] = sum([split_portfolio['QB'].map(player_proj_dict), split_portfolio['RB1'].map(player_proj_dict), @@ -619,9 +616,8 @@ with tab1: split_portfolio['WR1'].map(player_proj_dict), split_portfolio['WR2'].map(player_proj_dict), split_portfolio['WR3'].map(player_proj_dict), - split_portfolio['TE'].map(player_proj_dict), split_portfolio['FLEX'].map(player_proj_dict), - split_portfolio['DST'].map(player_proj_dict)]) + split_portfolio['S_FLEX'].map(player_proj_dict)]) st.table(split_portfolio.head(10)) split_portfolio['Ownership'] = sum([split_portfolio['QB'].map(player_own_dict), @@ -630,9 +626,20 @@ with tab1: split_portfolio['WR1'].map(player_own_dict), split_portfolio['WR2'].map(player_own_dict), split_portfolio['WR3'].map(player_own_dict), - split_portfolio['TE'].map(player_own_dict), split_portfolio['FLEX'].map(player_own_dict), - split_portfolio['DST'].map(player_own_dict)]) + split_portfolio['S_FLEX'].map(player_own_dict)]) + + split_portfolio['QB_team'] = split_portfolio['QB'].map(player_team_dict) + split_portfolio['RB1_team'] = split_portfolio['RB1'].map(player_team_dict) + split_portfolio['RB2_team'] = split_portfolio['RB2'].map(player_team_dict) + split_portfolio['WR1_team'] = split_portfolio['WR1'].map(player_team_dict) + split_portfolio['WR2_team'] = split_portfolio['WR2'].map(player_team_dict) + split_portfolio['WR3_team'] = split_portfolio['WR3'].map(player_team_dict) + split_portfolio['FLEX_team'] = split_portfolio['FLEX'].map(player_team_dict) + split_portfolio['S_FLEX_team'] = split_portfolio['S_FLEX'].map(player_team_dict) + + split_portfolio = split_portfolio[['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'FLEX', 'S_FLEX', 'Salary', 'Projection', 'Ownership', 'QB_team', + 'RB1_team', 'RB2_team', 'WR1_team', 'WR2_team', 'WR3_team', 'FLEX_team', 'S_FLEX_team']] except: split_portfolio = portfolio_dataframe @@ -643,9 +650,8 @@ with tab1: split_portfolio['WR1'].map(player_salary_dict), split_portfolio['WR2'].map(player_salary_dict), split_portfolio['WR3'].map(player_salary_dict), - split_portfolio['TE'].map(player_salary_dict), split_portfolio['FLEX'].map(player_salary_dict), - split_portfolio['DST'].map(player_salary_dict)]) + split_portfolio['S_FLEX'].map(player_salary_dict)]) split_portfolio['Projection'] = sum([split_portfolio['QB'].map(player_proj_dict), split_portfolio['RB1'].map(player_proj_dict), @@ -653,9 +659,8 @@ with tab1: split_portfolio['WR1'].map(player_proj_dict), split_portfolio['WR2'].map(player_proj_dict), split_portfolio['WR3'].map(player_proj_dict), - split_portfolio['TE'].map(player_proj_dict), split_portfolio['FLEX'].map(player_proj_dict), - split_portfolio['DST'].map(player_proj_dict)]) + split_portfolio['S_FLEX'].map(player_proj_dict)]) split_portfolio['Ownership'] = sum([split_portfolio['QB'].map(player_own_dict), split_portfolio['RB1'].map(player_own_dict), @@ -663,40 +668,78 @@ with tab1: split_portfolio['WR1'].map(player_own_dict), split_portfolio['WR2'].map(player_own_dict), split_portfolio['WR3'].map(player_own_dict), - split_portfolio['TE'].map(player_own_dict), split_portfolio['FLEX'].map(player_own_dict), - split_portfolio['DST'].map(player_own_dict)]) - - gc.collect() + split_portfolio['S_FLEX'].map(player_own_dict)]) + + split_portfolio['QB_team'] = split_portfolio['QB'].map(player_team_dict) + split_portfolio['RB1_team'] = split_portfolio['RB1'].map(player_team_dict) + split_portfolio['RB2_team'] = split_portfolio['RB2'].map(player_team_dict) + split_portfolio['WR1_team'] = split_portfolio['WR1'].map(player_team_dict) + split_portfolio['WR2_team'] = split_portfolio['WR2'].map(player_team_dict) + split_portfolio['WR3_team'] = split_portfolio['WR3'].map(player_team_dict) + split_portfolio['FLEX_team'] = split_portfolio['FLEX'].map(player_team_dict) + split_portfolio['S_FLEX_team'] = split_portfolio['S_FLEX'].map(player_team_dict) + + split_portfolio = split_portfolio[['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'FLEX', 'S_FLEX', 'Salary', 'Projection', 'Ownership', 'QB_team', + 'RB1_team', 'RB2_team', 'WR1_team', 'WR2_team', 'WR3_team', 'FLEX_team', 'S_FLEX_team']] + + for player_cols in split_portfolio.iloc[:, :8]: + static_col_raw = split_portfolio[player_cols].value_counts() + static_col = static_col_raw.to_frame() + static_col.reset_index(inplace=True) + static_col.columns = ['Player', 'count'] + static_exposure = pd.concat([static_exposure, static_col], ignore_index=True) + static_exposure['Exposure'] = static_exposure['count'] / len(split_portfolio) + static_exposure = static_exposure[['Player', 'Exposure']] + with st.container(): + col1, col2 = st.columns([3, 3]) + + if portfolio_file is not None: + split_portfolio = split_portfolio + + for player_cols in split_portfolio.iloc[:, :8]: + exposure_col_raw = split_portfolio[player_cols].value_counts() + exposure_col = exposure_col_raw.to_frame() + exposure_col.reset_index(inplace=True) + exposure_col.columns = ['Player', 'count'] + overall_exposure = pd.concat([overall_exposure, exposure_col], ignore_index=True) + overall_exposure['Exposure'] = overall_exposure['count'] / len(split_portfolio) + overall_exposure = overall_exposure.groupby('Player').sum() + overall_exposure.reset_index(inplace=True) + overall_exposure = overall_exposure[['Player', 'Exposure']] + overall_exposure = overall_exposure.set_index('Player') + overall_exposure = overall_exposure.sort_values(by='Exposure', ascending=False) + overall_exposure['Exposure'] = overall_exposure['Exposure'].astype(float).map(lambda n: '{:.2%}'.format(n)) + + with st.container(): + col1, col2 = st.columns([1, 6]) + + with col1: + if portfolio_file is not None: + st.header('Exposure View') + st.dataframe(overall_exposure) + + with col2: + if portfolio_file is not None: + st.header('Portfolio View') + split_portfolio = split_portfolio.reset_index() + split_portfolio['Lineup'] = split_portfolio['index'] + 1 + display_portfolio = split_portfolio[['Lineup', 'QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'FLEX', 'S_FLEX', 'Salary', 'Projection', 'Ownership']] + display_portfolio = display_portfolio.set_index('Lineup') + st.dataframe(display_portfolio.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').background_gradient(cmap='RdYlGn_r', subset=['Ownership']).format(precision=2)) + with tab2: col1, col2 = st.columns([1, 7]) with col1: - st.info(t_stamp) if st.button("Load/Reset Data", key='reset1'): - st.cache_data.clear() - for key in st.session_state.keys(): - del st.session_state[key] - dk_roo_raw = load_dk_player_projections() - fd_roo_raw = load_fd_player_projections() - t_stamp = f"Last Update: " + str(dk_roo_raw['timestamp'][0]) + f" CST" - dkid_dict, fdid_dict = set_export_ids() - - slate_var1 = st.radio("Which data are you loading?", ('Main Slate', 'Secondary Slate', 'Thurs-Mon Slate', 'User')) - site_var1 = st.radio("What site are you working with?", ('Draftkings', 'Fanduel')) - if site_var1 == 'Draftkings': - if slate_var1 == 'User': - raw_baselines = proj_dataframe[['Player', 'Salary', 'Position', 'Team', 'Opp', 'Median', 'Own']] - elif slate_var1 != 'User': - raw_baselines = dk_roo_raw[dk_roo_raw['slate'] == str(slate_var1)] - raw_baselines = raw_baselines[raw_baselines['version'] == 'overall'] - elif site_var1 == 'Fanduel': - if slate_var1 == 'User': - raw_baselines = proj_dataframe - elif slate_var1 != 'User': - raw_baselines = fd_roo_raw[fd_roo_raw['slate'] == str(slate_var1)] - raw_baselines = raw_baselines[raw_baselines['version'] == 'overall'] - + st.cache_data.clear() + for key in st.session_state.keys(): + del st.session_state[key] + slate_var1 = 'User' + site_var1 = 'Draftkings' + if proj_file is not None: + raw_baselines = proj_dataframe[['Player', 'Salary', 'Position', 'Team', 'Opp', 'Median', 'Own']] st.info("If you are uploading a portfolio, note that there is an adjustments to projections and deviation mapping to prevent 'Projection Bias' and create a fair simulation") insert_port1 = st.selectbox("Are you uploading a portfolio?", ('No', 'Yes'), key='insert_port1') if insert_port1 == 'Yes': @@ -710,6 +753,7 @@ with tab2: Contest_Size = 5000 elif contest_var1 == 'Large': Contest_Size = 10000 + linenum_var1 = 1000 strength_var1 = st.selectbox("How sharp is the field in the contest?", ('Not Very', 'Average', 'Very')) if strength_var1 == 'Not Very': sharp_split = .33 @@ -723,460 +767,386 @@ with tab2: sharp_split = .75 Strength_var = .01 scaling_var = 15 - - Sort_function = 'Median' - Sim_function = 'Projection' - - if Contest_Size <= 1000: - strength_grow = .01 - elif Contest_Size > 1000 and Contest_Size <= 2500: - strength_grow = .025 - elif Contest_Size > 2500 and Contest_Size <= 5000: - strength_grow = .05 - elif Contest_Size > 5000 and Contest_Size <= 20000: - strength_grow = .075 - elif Contest_Size > 20000: - strength_grow = .1 - field_growth = 100 * strength_grow - with col2: - with st.container(): - if st.button("Simulate Contest"): - with st.container(): - for key in st.session_state.keys(): - del st.session_state[key] + if st.button("Simulate Contest"): + with st.container(): + st.write('Contest Simulation Starting') + seed_depth1 = 10 + Total_Runs = 1000000 + if Contest_Size <= 1000: + strength_grow = .01 + elif Contest_Size > 1000 and Contest_Size <= 2500: + strength_grow = .025 + elif Contest_Size > 2500 and Contest_Size <= 5000: + strength_grow = .05 + elif Contest_Size > 5000 and Contest_Size <= 20000: + strength_grow = .075 + elif Contest_Size > 20000: + strength_grow = .1 - if slate_var1 == 'User': - initial_proj = proj_dataframe[['Player', 'Team', 'Position', 'Median', 'Own', 'Salary']] - - # Define the calculation to be applied - def calculate_own(position, own, mean_own, factor, max_own=75): - return np.where((position == 'QB') & (own - mean_own >= 0), - own * (factor * (own - mean_own) / 100) + mean_own, - own) - - # Set the factors based on the contest_var1 - factor_qb, factor_other = { - 'Small': (10, 5), - 'Medium': (6, 3), - 'Large': (3, 1.5), - }[contest_var1] + field_growth = 100 * strength_grow + + Sort_function = 'Median' + if Sort_function == 'Median': + Sim_function = 'Projection' + elif Sort_function == 'Own': + Sim_function = 'Own' + + if slate_var1 == 'User': + OwnFrame = proj_dataframe + if contest_var1 == 'Small': + OwnFrame['Own%'] = np.where((OwnFrame['Position'] == 'QB') & (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean() >= 0), OwnFrame['Own'] * (10 * (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean())/100) + OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean(), OwnFrame['Own']) + OwnFrame['Own%'] = np.where((OwnFrame['Position'] != 'QB') & (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean() >= 0), OwnFrame['Own'] * (5 * (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean())/100) + OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean(), OwnFrame['Own%']) + OwnFrame['Own%'] = np.where(OwnFrame['Own%'] > 75, 75, OwnFrame['Own%']) + OwnFrame['Own'] = OwnFrame['Own%'] * (800 / OwnFrame['Own%'].sum()) + if contest_var1 == 'Medium': + OwnFrame['Own%'] = np.where((OwnFrame['Position'] == 'QB') & (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean() >= 0), OwnFrame['Own'] * (6 * (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean())/100) + OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean(), OwnFrame['Own']) + OwnFrame['Own%'] = np.where((OwnFrame['Position'] != 'QB') & (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean() >= 0), OwnFrame['Own'] * (3 * (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean())/100) + OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean(), OwnFrame['Own%']) + OwnFrame['Own%'] = np.where(OwnFrame['Own%'] > 75, 75, OwnFrame['Own%']) + OwnFrame['Own'] = OwnFrame['Own%'] * (800 / OwnFrame['Own%'].sum()) + if contest_var1 == 'Large': + OwnFrame['Own%'] = np.where((OwnFrame['Position'] == 'QB') & (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean() >= 0), OwnFrame['Own'] * (3 * (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean())/100) + OwnFrame.loc[OwnFrame['Position'] == 'QB', 'Own'].mean(), OwnFrame['Own']) + OwnFrame['Own%'] = np.where((OwnFrame['Position'] != 'QB') & (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean() >= 0), OwnFrame['Own'] * (1.5 * (OwnFrame['Own'] - OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean())/100) + OwnFrame.loc[OwnFrame['Position'] != 'QB', 'Own'].mean(), OwnFrame['Own%']) + OwnFrame['Own%'] = np.where(OwnFrame['Own%'] > 75, 75, OwnFrame['Own%']) + OwnFrame['Own'] = OwnFrame['Own%'] * (800 / OwnFrame['Own%'].sum()) + Overall_Proj = OwnFrame[['Player', 'Team', 'Position', 'Median', 'Own', 'Salary']] + + if insert_port == 1: + UserPortfolio = portfolio_dataframe[['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'FLEX', 'S_FLEX']] + elif insert_port == 0: + UserPortfolio = pd.DataFrame(columns = ['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'FLEX', 'S_FLEX']) + + Overall_Proj.replace('', np.nan, inplace=True) + Overall_Proj = Overall_Proj.dropna(subset=['Median']) + Overall_Proj = Overall_Proj.assign(Value=lambda x: (x.Median / (x.Salary / 1000))) + Overall_Proj['Sort_var'] = (Overall_Proj['Median'].rank(ascending=False) + Overall_Proj['Value'].rank(ascending=False)) / 2 + Overall_Proj = Overall_Proj.sort_values(by='Sort_var', ascending=False) + Overall_Proj['Own'] = np.where((Overall_Proj['Median'] > 0) & (Overall_Proj['Own'] == 0), 1, Overall_Proj['Own']) + Overall_Proj = Overall_Proj.loc[Overall_Proj['Own'] > 0] + + Overall_Proj['Floor'] = np.where(Overall_Proj['Position'] == 'QB', Overall_Proj['Median'] * .5, Overall_Proj['Median'] * .25) + Overall_Proj['Ceiling'] = np.where(Overall_Proj['Position'] == 'WR', Overall_Proj['Median'] + Overall_Proj['Median'], Overall_Proj['Median'] + Overall_Proj['Floor']) + Overall_Proj['STDev'] = Overall_Proj['Median'] / 4 + + Teams_used = Overall_Proj['Team'].drop_duplicates().reset_index(drop=True) + Teams_used = Teams_used.reset_index() + Teams_used['team_item'] = Teams_used['index'] + 1 + Teams_used = Teams_used.drop(columns=['index']) + Teams_used_dictraw = Teams_used.drop(columns=['team_item']) + Teams_used_dict = Teams_used_dictraw.to_dict() + + team_list = Teams_used['Team'].to_list() + item_list = Teams_used['team_item'].to_list() + + FieldStrength_raw = Strength_var + ((30 - len(Teams_used)) * .01) + FieldStrength = FieldStrength_raw - (FieldStrength_raw * (20000 / Contest_Size)) + + if FieldStrength < 0: + FieldStrength = Strength_var + field_split = Strength_var + + for checkVar in range(len(team_list)): + Overall_Proj['Team'] = Overall_Proj['Team'].replace(team_list, item_list) + + qbs_raw = Overall_Proj[Overall_Proj.Position == 'QB'] + qbs_raw.dropna(subset=['Median']).reset_index(drop=True) + qbs_raw = qbs_raw.reset_index(drop=True) + qbs_raw = qbs_raw.sort_values(by=['Median'], ascending=False) + + qbs = qbs_raw.head(round(len(qbs_raw))) + qbs = qbs.assign(Var = range(0,len(qbs))) + qb_dict = pd.Series(qbs.Player.values, index=qbs.Var).to_dict() + + defs_raw = Overall_Proj[Overall_Proj.Position.str.contains("D")] + defs_raw.dropna(subset=['Median']).reset_index(drop=True) + defs_raw = defs_raw.reset_index(drop=True) + defs_raw = defs_raw.sort_values(by=['Own', 'Value'], ascending=False) + + defs = defs_raw.head(round(len(defs_raw))) + defs = defs.assign(Var = range(0,len(defs))) + def_dict = pd.Series(defs.Player.values, index=defs.Var).to_dict() + + rbs_raw = Overall_Proj[Overall_Proj.Position == 'RB'] + rbs_raw.dropna(subset=['Median']).reset_index(drop=True) + rbs_raw = rbs_raw.reset_index(drop=True) + rbs_raw = rbs_raw.sort_values(by=['Own', 'Value'], ascending=False) + + wrs_raw = Overall_Proj[Overall_Proj.Position == 'WR'] + wrs_raw.dropna(subset=['Median']).reset_index(drop=True) + wrs_raw = wrs_raw.reset_index(drop=True) + wrs_raw = wrs_raw.sort_values(by=['Own', 'Median'], ascending=False) + + pos_players = pd.concat([rbs_raw, wrs_raw]) + pos_players.dropna(subset=['Median']).reset_index(drop=True) + pos_players = pos_players.reset_index(drop=True) + + s_pos_players = pd.concat([qbs_raw, rbs_raw, wrs_raw]) + pos_players.dropna(subset=['Median']).reset_index(drop=True) + pos_players = pos_players.reset_index(drop=True) + + if insert_port == 1: + try: + # Initialize an empty DataFrame for Raw Portfolio + Raw_Portfolio = pd.DataFrame() - # Apply the calculation to the DataFrame - initial_proj['Own%'] = initial_proj.apply(lambda row: calculate_own(row['Position'], row['Own'], initial_proj.loc[initial_proj['Position'] == row['Position'], 'Own'].mean(), factor_qb if row['Position'] == 'QB' else factor_other), axis=1) - initial_proj['Own%'] = initial_proj['Own%'].clip(upper=75) - initial_proj['Own'] = initial_proj['Own%'] * (900 / initial_proj['Own%'].sum()) + # Loop through each position and split the data accordingly + positions = ['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'FLEX', 'S_FLEX'] + for pos in positions: + temp_df = UserPortfolio[pos].str.split("(", n=1, expand=True) + temp_df.columns = [pos, 'Drop'] + Raw_Portfolio = pd.concat([Raw_Portfolio, temp_df], axis=1) - # Drop unnecessary columns and create the final DataFrame - Overall_Proj = initial_proj[['Player', 'Team', 'Position', 'Median', 'Own', 'Salary']] + # Select only necessary columns and strip white spaces + CleanPortfolio = Raw_Portfolio[positions].apply(lambda x: x.str.strip()) + CleanPortfolio.reset_index(inplace=True) + CleanPortfolio['User/Field'] = CleanPortfolio['index'] + 1 + CleanPortfolio.drop(columns=['index'], inplace=True) - elif slate_var1 != 'User': - # Copy only the necessary columns - initial_proj = raw_baselines[['Player', 'Team', 'Position', 'Median', 'Own', 'Salary']] + CleanPortfolio.replace('', np.nan, inplace=True) + CleanPortfolio.dropna(subset=['QB'], inplace=True) - # Define the calculation to be applied - def calculate_own(position, own, mean_own, factor, max_own=75): - return np.where((position == 'QB') & (own - mean_own >= 0), - own * (factor * (own - mean_own) / 100) + mean_own, - own) + # Create frequency table for players + cleaport_players = pd.DataFrame( + np.column_stack(np.unique(CleanPortfolio.iloc[:, 0:8].values, return_counts=True)), + columns=['Player', 'Freq'] + ).sort_values('Freq', ascending=False).reset_index(drop=True) + cleaport_players['Freq'] = cleaport_players['Freq'].astype(int) - # Set the factors based on the contest_var1 - factor_qb, factor_other = { - 'Small': (10, 5), - 'Medium': (6, 3), - 'Large': (3, 1.5), - }[contest_var1] + # Merge and update nerf_frame + nerf_frame = pd.merge(cleaport_players, Overall_Proj, on='Player', how='left') + for col in ['Median', 'Floor', 'Ceiling', 'STDev']: + nerf_frame[col] *= 0.90 + del Raw_Portfolio + except: + CleanPortfolio = UserPortfolio.reset_index() + CleanPortfolio['User/Field'] = CleanPortfolio['index'] + 1 + CleanPortfolio.drop(columns=['index'], inplace=True) - # Apply the calculation to the DataFrame - initial_proj['Own%'] = initial_proj.apply(lambda row: calculate_own(row['Position'], row['Own'], initial_proj.loc[initial_proj['Position'] == row['Position'], 'Own'].mean(), factor_qb if row['Position'] == 'QB' else factor_other), axis=1) - initial_proj['Own%'] = initial_proj['Own%'].clip(upper=75) - initial_proj['Own'] = initial_proj['Own%'] * (900 / initial_proj['Own%'].sum()) + # Replace empty strings and drop rows with NaN in 'QB' column + CleanPortfolio.replace('', np.nan, inplace=True) + CleanPortfolio.dropna(subset=['QB'], inplace=True) - # Drop unnecessary columns and create the final DataFrame - Overall_Proj = initial_proj[['Player', 'Team', 'Position', 'Median', 'Own', 'Salary']] - - if insert_port == 1: - UserPortfolio = portfolio_dataframe[['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST']] - elif insert_port == 0: - UserPortfolio = pd.DataFrame(columns = ['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST']) - - Overall_Proj.replace('', np.nan, inplace=True) - Overall_Proj = Overall_Proj.dropna(subset=['Median']) - Overall_Proj = Overall_Proj.assign(Value=lambda x: (x.Median / (x.Salary / 1000))) - Overall_Proj['Sort_var'] = (Overall_Proj['Median'].rank(ascending=False) + Overall_Proj['Value'].rank(ascending=False)) / 2 - Overall_Proj = Overall_Proj.sort_values(by='Sort_var', ascending=False) - Overall_Proj['Own'] = np.where((Overall_Proj['Median'] > 0) & (Overall_Proj['Own'] == 0), 1, Overall_Proj['Own']) - Overall_Proj = Overall_Proj.loc[Overall_Proj['Own'] > 0] - - Overall_Proj['Floor'] = np.where(Overall_Proj['Position'] == 'QB', Overall_Proj['Median'] * .5, Overall_Proj['Median'] * .25) - Overall_Proj['Ceiling'] = np.where(Overall_Proj['Position'] == 'WR', Overall_Proj['Median'] + Overall_Proj['Median'], Overall_Proj['Median'] + Overall_Proj['Floor']) - Overall_Proj['STDev'] = Overall_Proj['Median'] / 4 - - Teams_used = Overall_Proj['Team'].drop_duplicates().reset_index(drop=True) - Teams_used = Teams_used.reset_index() - Teams_used['team_item'] = Teams_used['index'] + 1 - Teams_used = Teams_used.drop(columns=['index']) - Teams_used_dictraw = Teams_used.drop(columns=['team_item']) - - team_list = Teams_used['Team'].to_list() - item_list = Teams_used['team_item'].to_list() - - FieldStrength_raw = Strength_var + ((30 - len(Teams_used)) * .01) - FieldStrength = FieldStrength_raw - (FieldStrength_raw * (20000 / Contest_Size)) - - if FieldStrength < 0: - FieldStrength = Strength_var - field_split = Strength_var - - for checkVar in range(len(team_list)): - Overall_Proj['Team'] = Overall_Proj['Team'].replace(team_list, item_list) - - qbs_raw = Overall_Proj[Overall_Proj.Position == 'QB'] - qbs_raw.dropna(subset=['Median']).reset_index(drop=True) - qbs_raw = qbs_raw.reset_index(drop=True) - qbs_raw = qbs_raw.sort_values(by=['Median'], ascending=False) - - qbs = qbs_raw.head(round(len(qbs_raw))) - qbs = qbs.assign(Var = range(0,len(qbs))) - qb_dict = pd.Series(qbs.Player.values, index=qbs.Var).to_dict() - - defs_raw = Overall_Proj[Overall_Proj.Position.str.contains("D")] - defs_raw.dropna(subset=['Median']).reset_index(drop=True) - defs_raw = defs_raw.reset_index(drop=True) - defs_raw = defs_raw.sort_values(by=['Own', 'Value'], ascending=False) - - defs = defs_raw.head(round(len(defs_raw))) - defs = defs.assign(Var = range(0,len(defs))) - def_dict = pd.Series(defs.Player.values, index=defs.Var).to_dict() - - rbs_raw = Overall_Proj[Overall_Proj.Position == 'RB'] - rbs_raw.dropna(subset=['Median']).reset_index(drop=True) - rbs_raw = rbs_raw.reset_index(drop=True) - rbs_raw = rbs_raw.sort_values(by=['Own', 'Value'], ascending=False) - - wrs_raw = Overall_Proj[Overall_Proj.Position == 'WR'] - wrs_raw.dropna(subset=['Median']).reset_index(drop=True) - wrs_raw = wrs_raw.reset_index(drop=True) - wrs_raw = wrs_raw.sort_values(by=['Own', 'Median'], ascending=False) - - tes_raw = Overall_Proj[Overall_Proj.Position == 'TE'] - tes_raw.dropna(subset=['Median']).reset_index(drop=True) - tes_raw = tes_raw.reset_index(drop=True) - tes_raw = tes_raw.sort_values(by=['Own', 'Value'], ascending=False) - - pos_players = pd.concat([rbs_raw, wrs_raw, tes_raw]) - pos_players.dropna(subset=['Median']).reset_index(drop=True) - pos_players = pos_players.reset_index(drop=True) - - if insert_port == 1: - try: - # Initialize an empty DataFrame for Raw Portfolio - Raw_Portfolio = pd.DataFrame() - - # Loop through each position and split the data accordingly - positions = ['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST'] - for pos in positions: - temp_df = UserPortfolio[pos].str.split("(", n=1, expand=True) - temp_df.columns = [pos, 'Drop'] - Raw_Portfolio = pd.concat([Raw_Portfolio, temp_df], axis=1) - - # Select only necessary columns and strip white spaces - CleanPortfolio = Raw_Portfolio[positions].apply(lambda x: x.str.strip()) - CleanPortfolio.reset_index(inplace=True) - CleanPortfolio['User/Field'] = CleanPortfolio['index'] + 1 - CleanPortfolio.drop(columns=['index'], inplace=True) - - CleanPortfolio.replace('', np.nan, inplace=True) - CleanPortfolio.dropna(subset=['QB'], inplace=True) - - # Create frequency table for players - cleaport_players = pd.DataFrame( - np.column_stack(np.unique(CleanPortfolio.iloc[:, 0:9].values, return_counts=True)), - columns=['Player', 'Freq'] - ).sort_values('Freq', ascending=False).reset_index(drop=True) - cleaport_players['Freq'] = cleaport_players['Freq'].astype(int) - - # Merge and update nerf_frame - nerf_frame = pd.merge(cleaport_players, Overall_Proj, on='Player', how='left') - for col in ['Median', 'Floor', 'Ceiling', 'STDev']: - nerf_frame[col] *= 0.90 - except: - CleanPortfolio = UserPortfolio.reset_index() - CleanPortfolio['User/Field'] = CleanPortfolio['index'] + 1 - CleanPortfolio.drop(columns=['index'], inplace=True) - - # Replace empty strings and drop rows with NaN in 'QB' column - CleanPortfolio.replace('', np.nan, inplace=True) - CleanPortfolio.dropna(subset=['QB'], inplace=True) - - # Create frequency table for players - cleaport_players = pd.DataFrame( - np.column_stack(np.unique(CleanPortfolio.iloc[:, 0:9].values, return_counts=True)), - columns=['Player', 'Freq'] - ).sort_values('Freq', ascending=False).reset_index(drop=True) - cleaport_players['Freq'] = cleaport_players['Freq'].astype(int) - - # Merge and update nerf_frame - nerf_frame = pd.merge(cleaport_players, Overall_Proj, on='Player', how='left') - for col in ['Median', 'Floor', 'Ceiling', 'STDev']: - nerf_frame[col] *= 0.90 - - elif insert_port == 0: - CleanPortfolio = UserPortfolio - cleaport_players = pd.DataFrame(np.column_stack(np.unique(CleanPortfolio.iloc[:,0:9].values, return_counts=True)), - columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True) + # Create frequency table for players + cleaport_players = pd.DataFrame( + np.column_stack(np.unique(CleanPortfolio.iloc[:, 0:8].values, return_counts=True)), + columns=['Player', 'Freq'] + ).sort_values('Freq', ascending=False).reset_index(drop=True) cleaport_players['Freq'] = cleaport_players['Freq'].astype(int) - nerf_frame = Overall_Proj - - ref_dict = { - 'pos':['RB', 'WR', 'TE', 'FLEX'], - 'pos_dfs':['RB_Table', 'WR_Table', 'TE_Table', 'FLEX_Table'], - 'pos_dicts':['rb_dict', 'wr_dict', 'te_dict', 'flex_dict'] - } - - maps_dict = { - 'Floor_map':dict(zip(Overall_Proj.Player,Overall_Proj.Floor)), - 'Projection_map':dict(zip(Overall_Proj.Player,Overall_Proj.Median)), - 'Ceiling_map':dict(zip(Overall_Proj.Player,Overall_Proj.Ceiling)), - 'Salary_map':dict(zip(Overall_Proj.Player,Overall_Proj.Salary)), - 'Pos_map':dict(zip(Overall_Proj.Player,Overall_Proj.Position)), - 'Own_map':dict(zip(Overall_Proj.Player,Overall_Proj.Own)), - 'Team_map':dict(zip(Overall_Proj.Player,Overall_Proj.Team)), - 'STDev_map':dict(zip(Overall_Proj.Player,Overall_Proj.STDev)), - 'team_check_map':dict(zip(Overall_Proj.Player,Overall_Proj.Team)) - } - - up_dict = { - 'Floor_map':dict(zip(cleaport_players.Player,nerf_frame.Floor)), - 'Projection_map':dict(zip(cleaport_players.Player,nerf_frame.Median)), - 'Ceiling_map':dict(zip(cleaport_players.Player,nerf_frame.Ceiling)), - 'Salary_map':dict(zip(cleaport_players.Player,nerf_frame.Salary)), - 'Pos_map':dict(zip(cleaport_players.Player,nerf_frame.Position)), - 'Own_map':dict(zip(cleaport_players.Player,nerf_frame.Own)), - 'Team_map':dict(zip(cleaport_players.Player,nerf_frame.Team)), - 'STDev_map':dict(zip(cleaport_players.Player,nerf_frame.STDev)), - 'team_check_map':dict(zip(cleaport_players.Player,nerf_frame.Team)) - } - - FinalPortfolio, maps_dict = run_seed_frame(5, Strength_var, strength_grow, Teams_used, 1000000, field_growth) - - Sim_Winners = sim_contest(2500, FinalPortfolio, CleanPortfolio, maps_dict, up_dict, insert_port) - - # Initial setup - Sim_Winner_Frame = pd.DataFrame(np.concatenate(Sim_Winners), columns=FinalPortfolio.columns.tolist() + ['Fantasy']) - Sim_Winner_Frame['GPP_Proj'] = (Sim_Winner_Frame['Projection'] + Sim_Winner_Frame['Fantasy']) / 2 - Sim_Winner_Frame['unique_id'] = Sim_Winner_Frame['Projection'].astype(str) + Sim_Winner_Frame['Salary'].astype(str) + Sim_Winner_Frame['Own'].astype(str) - Sim_Winner_Frame = Sim_Winner_Frame.assign(win_count=Sim_Winner_Frame['unique_id'].map(Sim_Winner_Frame['unique_id'].value_counts())) - - # Type Casting - type_cast_dict = {'Salary': int, 'Projection': np.float16, 'Fantasy': np.float16, 'GPP_Proj': np.float32} - Sim_Winner_Frame = Sim_Winner_Frame.astype(type_cast_dict) - - del FinalPortfolio, insert_port, type_cast_dict - - # Sorting - st.session_state.Sim_Winner_Frame = Sim_Winner_Frame.sort_values(by=['win_count', 'GPP_Proj'], ascending= [False, False]).copy().drop_duplicates(subset='unique_id').head(100) - st.session_state.Sim_Winner_Frame.drop(columns='unique_id', inplace=True) - - # Data Copying - st.session_state.Sim_Winner_Export = Sim_Winner_Frame.copy() - - # Data Copying - st.session_state.Sim_Winner_Display = Sim_Winner_Frame.copy() - - # Conditional Replacement - columns_to_replace = ['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'FLEX', 'DST'] - - if site_var1 == 'Draftkings': - replace_dict = dkid_dict - elif site_var1 == 'Fanduel': - replace_dict = fdid_dict - - for col in columns_to_replace: - st.session_state.Sim_Winner_Export[col].replace(replace_dict, inplace=True) - - del replace_dict, Sim_Winner_Frame, Sim_Winners - - st.session_state.player_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Display.iloc[:,0:9].values, return_counts=True)), - columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True) - st.session_state.player_freq['Freq'] = st.session_state.player_freq['Freq'].astype(int) - st.session_state.player_freq['Position'] = st.session_state.player_freq['Player'].map(maps_dict['Pos_map']) - st.session_state.player_freq['Salary'] = st.session_state.player_freq['Player'].map(maps_dict['Salary_map']) - st.session_state.player_freq['Proj Own'] = st.session_state.player_freq['Player'].map(maps_dict['Own_map']) / 100 - st.session_state.player_freq['Exposure'] = st.session_state.player_freq['Freq']/(2500) - st.session_state.player_freq['Edge'] = st.session_state.player_freq['Exposure'] - st.session_state.player_freq['Proj Own'] - st.session_state.player_freq['Team'] = st.session_state.player_freq['Player'].map(maps_dict['Team_map']) - for checkVar in range(len(team_list)): - st.session_state.player_freq['Team'] = st.session_state.player_freq['Team'].replace(item_list, team_list) - - st.session_state.qb_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Display.iloc[:,0:1].values, return_counts=True)), - columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True) - st.session_state.qb_freq['Freq'] = st.session_state.qb_freq['Freq'].astype(int) - st.session_state.qb_freq['Position'] = st.session_state.qb_freq['Player'].map(maps_dict['Pos_map']) - st.session_state.qb_freq['Salary'] = st.session_state.qb_freq['Player'].map(maps_dict['Salary_map']) - st.session_state.qb_freq['Proj Own'] = st.session_state.qb_freq['Player'].map(maps_dict['Own_map']) / 100 - st.session_state.qb_freq['Exposure'] = st.session_state.qb_freq['Freq']/(2500) - st.session_state.qb_freq['Edge'] = st.session_state.qb_freq['Exposure'] - st.session_state.qb_freq['Proj Own'] - st.session_state.qb_freq['Team'] = st.session_state.qb_freq['Player'].map(maps_dict['Team_map']) - for checkVar in range(len(team_list)): - st.session_state.qb_freq['Team'] = st.session_state.qb_freq['Team'].replace(item_list, team_list) - - st.session_state.rb_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Display.iloc[:,[1, 2]].values, return_counts=True)), - columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True) - st.session_state.rb_freq['Freq'] = st.session_state.rb_freq['Freq'].astype(int) - st.session_state.rb_freq['Position'] = st.session_state.rb_freq['Player'].map(maps_dict['Pos_map']) - st.session_state.rb_freq['Salary'] = st.session_state.rb_freq['Player'].map(maps_dict['Salary_map']) - st.session_state.rb_freq['Proj Own'] = st.session_state.rb_freq['Player'].map(maps_dict['Own_map']) / 100 - st.session_state.rb_freq['Exposure'] = st.session_state.rb_freq['Freq']/2500 - st.session_state.rb_freq['Edge'] = st.session_state.rb_freq['Exposure'] - st.session_state.rb_freq['Proj Own'] - st.session_state.rb_freq['Team'] = st.session_state.rb_freq['Player'].map(maps_dict['Team_map']) - for checkVar in range(len(team_list)): - st.session_state.rb_freq['Team'] = st.session_state.rb_freq['Team'].replace(item_list, team_list) - - st.session_state.wr_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Display.iloc[:,[3, 4, 5]].values, return_counts=True)), - columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True) - st.session_state.wr_freq['Freq'] = st.session_state.wr_freq['Freq'].astype(int) - st.session_state.wr_freq['Position'] = st.session_state.wr_freq['Player'].map(maps_dict['Pos_map']) - st.session_state.wr_freq['Salary'] = st.session_state.wr_freq['Player'].map(maps_dict['Salary_map']) - st.session_state.wr_freq['Proj Own'] = st.session_state.wr_freq['Player'].map(maps_dict['Own_map']) / 100 - st.session_state.wr_freq['Exposure'] = st.session_state.wr_freq['Freq']/2500 - st.session_state.wr_freq['Edge'] = st.session_state.wr_freq['Exposure'] - st.session_state.wr_freq['Proj Own'] - st.session_state.wr_freq['Team'] = st.session_state.wr_freq['Player'].map(maps_dict['Team_map']) - for checkVar in range(len(team_list)): - st.session_state.wr_freq['Team'] = st.session_state.wr_freq['Team'].replace(item_list, team_list) - - st.session_state.te_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Display.iloc[:,[6]].values, return_counts=True)), + + # Merge and update nerf_frame + nerf_frame = pd.merge(cleaport_players, Overall_Proj, on='Player', how='left') + for col in ['Median', 'Floor', 'Ceiling', 'STDev']: + nerf_frame[col] *= 0.90 + + elif insert_port == 0: + CleanPortfolio = UserPortfolio + cleaport_players = pd.DataFrame(np.column_stack(np.unique(CleanPortfolio.iloc[:,0:8].values, return_counts=True)), columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True) - st.session_state.te_freq['Freq'] = st.session_state.te_freq['Freq'].astype(int) - st.session_state.te_freq['Position'] = st.session_state.te_freq['Player'].map(maps_dict['Pos_map']) - st.session_state.te_freq['Salary'] = st.session_state.te_freq['Player'].map(maps_dict['Salary_map']) - st.session_state.te_freq['Proj Own'] = st.session_state.te_freq['Player'].map(maps_dict['Own_map']) / 100 - st.session_state.te_freq['Exposure'] = st.session_state.te_freq['Freq']/2500 - st.session_state.te_freq['Edge'] = st.session_state.te_freq['Exposure'] - st.session_state.te_freq['Proj Own'] - st.session_state.te_freq['Team'] = st.session_state.te_freq['Player'].map(maps_dict['Team_map']) - for checkVar in range(len(team_list)): - st.session_state.te_freq['Team'] = st.session_state.te_freq['Team'].replace(item_list, team_list) + cleaport_players['Freq'] = cleaport_players['Freq'].astype(int) + nerf_frame = Overall_Proj - st.session_state.flex_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Display.iloc[:,[7]].values, return_counts=True)), - columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True) - st.session_state.flex_freq['Freq'] = st.session_state.flex_freq['Freq'].astype(int) - st.session_state.flex_freq['Position'] = st.session_state.flex_freq['Player'].map(maps_dict['Pos_map']) - st.session_state.flex_freq['Salary'] = st.session_state.flex_freq['Player'].map(maps_dict['Salary_map']) - st.session_state.flex_freq['Proj Own'] = st.session_state.flex_freq['Player'].map(maps_dict['Own_map']) / 100 - st.session_state.flex_freq['Exposure'] = st.session_state.flex_freq['Freq']/2500 - st.session_state.flex_freq['Edge'] = st.session_state.flex_freq['Exposure'] - st.session_state.flex_freq['Proj Own'] - st.session_state.flex_freq['Team'] = st.session_state.flex_freq['Player'].map(maps_dict['Team_map']) - for checkVar in range(len(team_list)): - st.session_state.flex_freq['Team'] = st.session_state.flex_freq['Team'].replace(item_list, team_list) + ref_dict = { + 'pos':['RB', 'WR', 'FLEX', 'S_FLEX'], + 'pos_dfs':['RB_Table', 'WR_Table', 'FLEX_Table', 'S_FLEX_Table'], + 'pos_dicts':['rb_dict', 'wr_dict', 'flex_dict', 's_flex_table'] + } + + maps_dict = { + 'Floor_map':dict(zip(Overall_Proj.Player,Overall_Proj.Floor)), + 'Projection_map':dict(zip(Overall_Proj.Player,Overall_Proj.Median)), + 'Ceiling_map':dict(zip(Overall_Proj.Player,Overall_Proj.Ceiling)), + 'Salary_map':dict(zip(Overall_Proj.Player,Overall_Proj.Salary)), + 'Pos_map':dict(zip(Overall_Proj.Player,Overall_Proj.Position)), + 'Own_map':dict(zip(Overall_Proj.Player,Overall_Proj.Own)), + 'Team_map':dict(zip(Overall_Proj.Player,Overall_Proj.Team)), + 'STDev_map':dict(zip(Overall_Proj.Player,Overall_Proj.STDev)), + 'team_check_map':dict(zip(Overall_Proj.Player,Overall_Proj.Team)) + } + + up_dict = { + 'Floor_map':dict(zip(cleaport_players.Player,nerf_frame.Floor)), + 'Projection_map':dict(zip(cleaport_players.Player,nerf_frame.Median)), + 'Ceiling_map':dict(zip(cleaport_players.Player,nerf_frame.Ceiling)), + 'Salary_map':dict(zip(cleaport_players.Player,nerf_frame.Salary)), + 'Pos_map':dict(zip(cleaport_players.Player,nerf_frame.Position)), + 'Own_map':dict(zip(cleaport_players.Player,nerf_frame.Own)), + 'Team_map':dict(zip(cleaport_players.Player,nerf_frame.Team)), + 'STDev_map':dict(zip(cleaport_players.Player,nerf_frame.STDev)), + 'team_check_map':dict(zip(cleaport_players.Player,nerf_frame.Team)) + } + + FinalPortfolio, maps_dict = run_seed_frame(5, Strength_var, strength_grow, Teams_used, 1000000, field_growth) - st.session_state.dst_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Display.iloc[:,8:9].values, return_counts=True)), - columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True) - st.session_state.dst_freq['Freq'] = st.session_state.dst_freq['Freq'].astype(int) - st.session_state.dst_freq['Position'] = st.session_state.dst_freq['Player'].map(maps_dict['Pos_map']) - st.session_state.dst_freq['Salary'] = st.session_state.dst_freq['Player'].map(maps_dict['Salary_map']) - st.session_state.dst_freq['Proj Own'] = st.session_state.dst_freq['Player'].map(maps_dict['Own_map']) / 100 - st.session_state.dst_freq['Exposure'] = st.session_state.dst_freq['Freq']/2500 - st.session_state.dst_freq['Edge'] = st.session_state.dst_freq['Exposure'] - st.session_state.dst_freq['Proj Own'] - st.session_state.dst_freq['Team'] = st.session_state.dst_freq['Player'].map(maps_dict['Team_map']) - for checkVar in range(len(team_list)): - st.session_state.dst_freq['Team'] = st.session_state.dst_freq['Team'].replace(item_list, team_list) + Sim_Winners = sim_contest(5000, FinalPortfolio, CleanPortfolio, maps_dict, up_dict, insert_port) + # Initial setup + Sim_Winner_Frame = pd.DataFrame(np.concatenate(Sim_Winners), columns=FinalPortfolio.columns.tolist() + ['Fantasy']) + Sim_Winner_Frame['GPP_Proj'] = (Sim_Winner_Frame['Projection'] + Sim_Winner_Frame['Fantasy']) / 2 + Sim_Winner_Frame['unique_id'] = Sim_Winner_Frame['Projection'].astype(str) + Sim_Winner_Frame['Salary'].astype(str) + Sim_Winner_Frame['Own'].astype(str) + Sim_Winner_Frame = Sim_Winner_Frame.assign(win_count=Sim_Winner_Frame['unique_id'].map(Sim_Winner_Frame['unique_id'].value_counts())) + + # Type Casting + type_cast_dict = {'Salary': int, 'Projection': np.float16, 'Fantasy': np.float16, 'GPP_Proj': np.float32} + Sim_Winner_Frame = Sim_Winner_Frame.astype(type_cast_dict) + + del FinalPortfolio, insert_port, type_cast_dict + + # Sorting + st.session_state.Sim_Winner_Frame = Sim_Winner_Frame.sort_values(by=['win_count', 'GPP_Proj'], ascending= [False, False]).copy().drop_duplicates(subset='unique_id').head(100) + st.session_state.Sim_Winner_Frame.drop(columns='unique_id', inplace=True) + + # Data Copying + st.session_state.Sim_Winner_Export = Sim_Winner_Frame.copy() + + # Data Copying + st.session_state.Sim_Winner_Display = Sim_Winner_Frame.copy() + + # Conditional Replacement + columns_to_replace = ['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'FLEX', 'S_FLEX'] - with st.container(): - if 'player_freq' in st.session_state: - player_split_var2 = st.radio("Are you wanting to isolate any lineups with specific players?", ('Full Players', 'Specific Players'), key='player_split_var2') - if player_split_var2 == 'Specific Players': - find_var2 = st.multiselect('Which players must be included in the lineups?', options = st.session_state.player_freq['Player'].unique()) - elif player_split_var2 == 'Full Players': - find_var2 = st.session_state.player_freq.Player.values.tolist() - - if player_split_var2 == 'Specific Players': - st.session_state.Sim_Winner_Display = st.session_state.Sim_Winner_Frame[np.equal.outer(st.session_state.Sim_Winner_Frame.to_numpy(), find_var2).any(axis=1).all(axis=1)] - if player_split_var2 == 'Full Players': - st.session_state.Sim_Winner_Display = st.session_state.Sim_Winner_Frame - if 'Sim_Winner_Display' in st.session_state: - st.dataframe(st.session_state.Sim_Winner_Display.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').background_gradient(cmap='RdYlGn_r', subset=['Own']).format(precision=2), use_container_width = True) - if 'Sim_Winner_Export' in st.session_state: - st.download_button( - label="Export Full Frame", - data=st.session_state.Sim_Winner_Export.to_csv().encode('utf-8'), - file_name='NFL_consim_export.csv', - mime='text/csv', - ) + del Sim_Winner_Frame, Sim_Winners + + st.session_state.player_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Display.iloc[:,[0, 1, 2, 3, 4, 5, 6, 7]].values, return_counts=True)), + columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True) + st.session_state.player_freq['Freq'] = st.session_state.player_freq['Freq'].astype(int) + st.session_state.player_freq['Position'] = st.session_state.player_freq['Player'].map(maps_dict['Pos_map']) + st.session_state.player_freq['Salary'] = st.session_state.player_freq['Player'].map(maps_dict['Salary_map']) + st.session_state.player_freq['Proj Own'] = st.session_state.player_freq['Player'].map(maps_dict['Own_map']) / 100 + st.session_state.player_freq['Exposure'] = st.session_state.player_freq['Freq']/(5000) + st.session_state.player_freq['Edge'] = st.session_state.player_freq['Exposure'] - st.session_state.player_freq['Proj Own'] + st.session_state.player_freq['Team'] = st.session_state.player_freq['Player'].map(maps_dict['Team_map']) + for checkVar in range(len(team_list)): + st.session_state.player_freq['Team'] = st.session_state.player_freq['Team'].replace(item_list, team_list) + + st.session_state.qb_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Display.iloc[:,[0]].values, return_counts=True)), + columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True) + st.session_state.qb_freq['Freq'] = st.session_state.qb_freq['Freq'].astype(int) + st.session_state.qb_freq['Position'] = st.session_state.qb_freq['Player'].map(maps_dict['Pos_map']) + st.session_state.qb_freq['Salary'] = st.session_state.qb_freq['Player'].map(maps_dict['Salary_map']) + st.session_state.qb_freq['Proj Own'] = st.session_state.qb_freq['Player'].map(maps_dict['Own_map']) / 100 + st.session_state.qb_freq['Exposure'] = st.session_state.qb_freq['Freq']/(5000) + st.session_state.qb_freq['Edge'] = st.session_state.qb_freq['Exposure'] - st.session_state.qb_freq['Proj Own'] + st.session_state.qb_freq['Team'] = st.session_state.qb_freq['Player'].map(maps_dict['Team_map']) + for checkVar in range(len(team_list)): + st.session_state.qb_freq['Team'] = st.session_state.qb_freq['Team'].replace(item_list, team_list) + + st.session_state.rb_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Display.iloc[:,[1, 2]].values, return_counts=True)), + columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True) + st.session_state.rb_freq['Freq'] = st.session_state.rb_freq['Freq'].astype(int) + st.session_state.rb_freq['Position'] = st.session_state.rb_freq['Player'].map(maps_dict['Pos_map']) + st.session_state.rb_freq['Salary'] = st.session_state.rb_freq['Player'].map(maps_dict['Salary_map']) + st.session_state.rb_freq['Proj Own'] = st.session_state.rb_freq['Player'].map(maps_dict['Own_map']) / 100 + st.session_state.rb_freq['Exposure'] = st.session_state.rb_freq['Freq']/5000 + st.session_state.rb_freq['Edge'] = st.session_state.rb_freq['Exposure'] - st.session_state.rb_freq['Proj Own'] + st.session_state.rb_freq['Team'] = st.session_state.rb_freq['Player'].map(maps_dict['Team_map']) + for checkVar in range(len(team_list)): + st.session_state.rb_freq['Team'] = st.session_state.rb_freq['Team'].replace(item_list, team_list) + + st.session_state.wr_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Display.iloc[:,[3, 4, 5]].values, return_counts=True)), + columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True) + st.session_state.wr_freq['Freq'] = st.session_state.wr_freq['Freq'].astype(int) + st.session_state.wr_freq['Position'] = st.session_state.wr_freq['Player'].map(maps_dict['Pos_map']) + st.session_state.wr_freq['Salary'] = st.session_state.wr_freq['Player'].map(maps_dict['Salary_map']) + st.session_state.wr_freq['Proj Own'] = st.session_state.wr_freq['Player'].map(maps_dict['Own_map']) / 100 + st.session_state.wr_freq['Exposure'] = st.session_state.wr_freq['Freq']/5000 + st.session_state.wr_freq['Edge'] = st.session_state.wr_freq['Exposure'] - st.session_state.wr_freq['Proj Own'] + st.session_state.wr_freq['Team'] = st.session_state.wr_freq['Player'].map(maps_dict['Team_map']) + for checkVar in range(len(team_list)): + st.session_state.wr_freq['Team'] = st.session_state.wr_freq['Team'].replace(item_list, team_list) + + st.session_state.flex_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Display.iloc[:,[6]].values, return_counts=True)), + columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True) + st.session_state.flex_freq['Freq'] = st.session_state.flex_freq['Freq'].astype(int) + st.session_state.flex_freq['Position'] = st.session_state.flex_freq['Player'].map(maps_dict['Pos_map']) + st.session_state.flex_freq['Salary'] = st.session_state.flex_freq['Player'].map(maps_dict['Salary_map']) + st.session_state.flex_freq['Proj Own'] = st.session_state.flex_freq['Player'].map(maps_dict['Own_map']) / 100 + st.session_state.flex_freq['Exposure'] = st.session_state.flex_freq['Freq']/5000 + st.session_state.flex_freq['Edge'] = st.session_state.flex_freq['Exposure'] - st.session_state.flex_freq['Proj Own'] + st.session_state.flex_freq['Team'] = st.session_state.flex_freq['Player'].map(maps_dict['Team_map']) + for checkVar in range(len(team_list)): + st.session_state.flex_freq['Team'] = st.session_state.flex_freq['Team'].replace(item_list, team_list) + + st.session_state.dst_freq = pd.DataFrame(np.column_stack(np.unique(st.session_state.Sim_Winner_Display.iloc[:,[7]].values, return_counts=True)), + columns=['Player','Freq']).sort_values('Freq', ascending=False).reset_index(drop=True) + st.session_state.dst_freq['Freq'] = st.session_state.dst_freq['Freq'].astype(int) + st.session_state.dst_freq['Position'] = st.session_state.dst_freq['Player'].map(maps_dict['Pos_map']) + st.session_state.dst_freq['Salary'] = st.session_state.dst_freq['Player'].map(maps_dict['Salary_map']) + st.session_state.dst_freq['Proj Own'] = st.session_state.dst_freq['Player'].map(maps_dict['Own_map']) / 100 + st.session_state.dst_freq['Exposure'] = st.session_state.dst_freq['Freq']/5000 + st.session_state.dst_freq['Edge'] = st.session_state.dst_freq['Exposure'] - st.session_state.dst_freq['Proj Own'] + st.session_state.dst_freq['Team'] = st.session_state.dst_freq['Player'].map(maps_dict['Team_map']) + for checkVar in range(len(team_list)): + st.session_state.dst_freq['Team'] = st.session_state.dst_freq['Team'].replace(item_list, team_list) + + with st.container(): + if 'player_freq' in st.session_state: + player_split_var2 = st.radio("Are you wanting to isolate any lineups with specific players?", ('Full Players', 'Specific Players'), key='player_split_var2') + if player_split_var2 == 'Specific Players': + find_var2 = st.multiselect('Which players must be included in the lineups?', options = st.session_state.player_freq['Player'].unique()) + elif player_split_var2 == 'Full Players': + find_var2 = st.session_state.player_freq.Player.values.tolist() - with st.container(): - tab1, tab2, tab3, tab4, tab5, tab6, tab7 = st.tabs(['Overall Exposures', 'QB Exposures', 'RB Exposures', 'WR Exposures', 'TE Exposures', 'FLEX Exposures', 'DST Exposures']) - with tab1: - if 'player_freq' in st.session_state: - st.dataframe(st.session_state.player_freq.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(freq_format, precision=2), use_container_width = True) - st.download_button( - label="Export Exposures", - data=st.session_state.player_freq.to_csv().encode('utf-8'), - file_name='player_freq_export.csv', - mime='text/csv', - ) - with tab2: - if 'qb_freq' in st.session_state: - st.dataframe(st.session_state.qb_freq.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(freq_format, precision=2), use_container_width = True) - st.download_button( - label="Export Exposures", - data=st.session_state.qb_freq.to_csv().encode('utf-8'), - file_name='qb_freq_export.csv', - mime='text/csv', - ) - with tab3: - if 'rb_freq' in st.session_state: - st.dataframe(st.session_state.rb_freq.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(freq_format, precision=2), use_container_width = True) - st.download_button( - label="Export Exposures", - data=st.session_state.rb_freq.to_csv().encode('utf-8'), - file_name='rb_freq_export.csv', - mime='text/csv', - ) - with tab4: - if 'wr_freq' in st.session_state: - st.dataframe(st.session_state.wr_freq.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(freq_format, precision=2), use_container_width = True) - st.download_button( - label="Export Exposures", - data=st.session_state.wr_freq.to_csv().encode('utf-8'), - file_name='wr_freq_export.csv', - mime='text/csv', - ) - with tab5: - if 'te_freq' in st.session_state: - st.dataframe(st.session_state.te_freq.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(freq_format, precision=2), use_container_width = True) - st.download_button( - label="Export Exposures", - data=st.session_state.te_freq.to_csv().encode('utf-8'), - file_name='te_freq_export.csv', - mime='text/csv', - ) - with tab6: - if 'flex_freq' in st.session_state: - st.dataframe(st.session_state.flex_freq.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(freq_format, precision=2), use_container_width = True) + if player_split_var2 == 'Specific Players': + st.session_state.Sim_Winner_Display = st.session_state.Sim_Winner_Frame[np.equal.outer(st.session_state.Sim_Winner_Frame.to_numpy(), find_var2).any(axis=1).all(axis=1)] + if player_split_var2 == 'Full Players': + st.session_state.Sim_Winner_Display = st.session_state.Sim_Winner_Frame + if 'Sim_Winner_Display' in st.session_state: + st.dataframe(st.session_state.Sim_Winner_Display.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').background_gradient(cmap='RdYlGn_r', subset=['Own']).format(precision=2), use_container_width = True) + if 'Sim_Winner_Export' in st.session_state: st.download_button( - label="Export Exposures", - data=st.session_state.flex_freq.to_csv().encode('utf-8'), - file_name='flex_freq_export.csv', - mime='text/csv', - ) - with tab7: - if 'dst_freq' in st.session_state: - st.dataframe(st.session_state.dst_freq.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(freq_format, precision=2), use_container_width = True) - st.download_button( - label="Export Exposures", - data=st.session_state.dst_freq.to_csv().encode('utf-8'), - file_name='dst_freq_export.csv', + label="Export Tables", + data=convert_df_to_csv(st.session_state.Sim_Winner_Export), + file_name='NFL_consim_export.csv', mime='text/csv', ) + + with st.container(): + tab1, tab2, tab3, tab4, tab5 = st.tabs(['Overall Exposures', 'QB Exposures', 'RB Exposures', 'WR Exposures', 'FLEX Exposures']) + with tab1: + if 'player_freq' in st.session_state: + st.dataframe(st.session_state.player_freq.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(freq_format, precision=2), use_container_width = True) + st.download_button( + label="Export Exposures", + data=st.session_state.player_freq.to_csv().encode('utf-8'), + file_name='player_freq_export.csv', + mime='text/csv', + ) + with tab2: + if 'qb_freq' in st.session_state: + st.dataframe(st.session_state.qb_freq.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(freq_format, precision=2), use_container_width = True) + st.download_button( + label="Export Exposures", + data=st.session_state.qb_freq.to_csv().encode('utf-8'), + file_name='qb_freq_export.csv', + mime='text/csv', + ) + with tab3: + if 'rb_freq' in st.session_state: + st.dataframe(st.session_state.rb_freq.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(freq_format, precision=2), use_container_width = True) + st.download_button( + label="Export Exposures", + data=st.session_state.rb_freq.to_csv().encode('utf-8'), + file_name='rb_freq_export.csv', + mime='text/csv', + ) + with tab4: + if 'wr_freq' in st.session_state: + st.dataframe(st.session_state.wr_freq.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(freq_format, precision=2), use_container_width = True) + st.download_button( + label="Export Exposures", + data=st.session_state.wr_freq.to_csv().encode('utf-8'), + file_name='wr_freq_export.csv', + mime='text/csv', + ) + with tab5: + if 'flex_freq' in st.session_state: + st.dataframe(st.session_state.flex_freq.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(freq_format, precision=2), use_container_width = True) + st.download_button( + label="Export Exposures", + data=st.session_state.flex_freq.to_csv().encode('utf-8'), + file_name='flex_freq_export.csv', + mime='text/csv', + ) -del gcservice_account -del dk_roo_raw, fd_roo_raw -del t_stamp -del dkid_dict, fdid_dict -del static_exposure, overall_exposure -del insert_port1, Contest_Size, sharp_split, Strength_var, scaling_var, Sort_function, Sim_function, strength_grow, field_growth -del raw_baselines -del freq_format gc.collect() \ No newline at end of file