|
import pandas as pd |
|
import math |
|
from global_func.small_field_preset import small_field_preset |
|
from global_func.large_field_preset import large_field_preset |
|
|
|
def hedging_preset(portfolio: pd.DataFrame, lineup_target: int, projections_file: pd.DataFrame): |
|
|
|
excluded_cols = ['salary', 'median', 'Own', 'Finish_percentile', 'Dupes', 'Stack', 'Size', 'Win%', 'Lineup Edge', 'Weighted Own', 'Geomean', 'Diversity'] |
|
list_size = 3 |
|
|
|
check_own_df = projections_file.copy() |
|
check_own_df = check_own_df.sort_values(by='ownership', ascending=False) |
|
top_owned = check_own_df['player_names'].head(list_size).tolist() |
|
|
|
def get_team_hitter_ownership(projections_file: pd.DataFrame): |
|
""" |
|
Calculate the sum ownership of hitters on each team. |
|
Excludes SP and P positions and sums ownership by team. |
|
|
|
Args: |
|
projections_file (pd.DataFrame): DataFrame with 'position', 'team', and 'ownership' columns |
|
|
|
Returns: |
|
pd.Series: Series with team names as index and total hitter ownership as values, sorted descending |
|
""" |
|
|
|
hitters_df = projections_file[~projections_file['position'].isin(['P', 'SP'])] |
|
|
|
|
|
team_ownership = hitters_df.groupby('team')['ownership'].sum().sort_values(ascending=False) |
|
|
|
return team_ownership |
|
|
|
team_ownership = get_team_hitter_ownership(projections_file) |
|
top_owned_teams = team_ownership.head(list_size).index.tolist() |
|
init_counter = 6 |
|
|
|
for runs in range(1, 5): |
|
concat_portfolio = pd.DataFrame(columns=portfolio.columns) |
|
for player in top_owned: |
|
print(player) |
|
working_df = portfolio.copy() |
|
|
|
|
|
player_columns = [col for col in working_df.columns if col not in excluded_cols] |
|
|
|
remove_mask = working_df[player_columns].apply( |
|
lambda row: player not in list(row), axis=1 |
|
) |
|
lock_mask = working_df[player_columns].apply( |
|
lambda row: player in list(row), axis=1 |
|
) |
|
|
|
removed_df = working_df[remove_mask] |
|
locked_df = working_df[lock_mask] |
|
|
|
removed_lineups = small_field_preset(removed_df, math.ceil(lineup_target / (list_size * init_counter)), excluded_cols) |
|
print(len(removed_lineups)) |
|
|
|
if not locked_df.empty: |
|
locked_lineups = large_field_preset(locked_df, math.ceil(lineup_target / (list_size * init_counter)), excluded_cols) |
|
print(len(locked_lineups)) |
|
concat_portfolio = pd.concat([concat_portfolio, removed_lineups, locked_lineups]) |
|
else: |
|
|
|
print(f"No lineups found containing {player}") |
|
concat_portfolio = pd.concat([concat_portfolio, removed_lineups]) |
|
|
|
for team in top_owned_teams: |
|
working_df = portfolio.copy() |
|
removed_df = working_df[working_df['Stack'] != team] |
|
teams_df = working_df[working_df['Stack'] == team] |
|
|
|
removed_lineups = small_field_preset(removed_df, math.ceil(lineup_target / (list_size * init_counter)), excluded_cols) |
|
|
|
if not teams_df.empty: |
|
team_lineups = large_field_preset(teams_df, math.ceil(lineup_target / (list_size * init_counter)), excluded_cols) |
|
concat_portfolio = pd.concat([concat_portfolio, removed_lineups, team_lineups]) |
|
else: |
|
|
|
print(f"No lineups found with {team} stacked") |
|
concat_portfolio = pd.concat([concat_portfolio, removed_lineups]) |
|
|
|
concat_portfolio = concat_portfolio.drop_duplicates(subset=['median', 'Own', 'Lineup Edge', 'Diversity']) |
|
|
|
if len(concat_portfolio) >= lineup_target: |
|
return concat_portfolio.head(lineup_target) |
|
else: |
|
init_counter -= 1 |
|
|
|
return concat_portfolio.head(lineup_target) |