File size: 3,389 Bytes
119b2bf 10c821a 119b2bf 2bbbfdd 119b2bf 33beedc 2bbbfdd 119b2bf 02245bb 119b2bf 6fbfae9 119b2bf 6fbfae9 119b2bf 6fbfae9 119b2bf 4ed60b8 119b2bf 4ed60b8 119b2bf 02245bb fac0157 02245bb fac0157 119b2bf 02245bb ef7a8b2 02245bb ef7a8b2 fac0157 8d3abd2 02245bb fac0157 8d3abd2 119b2bf 433242b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
import pandas as pd
import math
from global_func.small_field_preset import small_field_preset
from global_func.large_field_preset import large_field_preset
def hedging_preset(portfolio: pd.DataFrame, lineup_target: int, projections_file: pd.DataFrame):
excluded_cols = ['salary', 'median', 'Own', 'Finish_percentile', 'Dupes', 'Stack', 'Size', 'Win%', 'Lineup Edge', 'Weighted Own', 'Geomean', 'Similarity Score']
list_size = 3
check_own_df = projections_file.copy()
check_own_df = check_own_df.sort_values(by='ownership', ascending=False)
top_owned = check_own_df['player_names'].head(list_size).tolist()
def get_team_hitter_ownership(projections_file: pd.DataFrame):
"""
Calculate the sum ownership of hitters on each team.
Excludes SP and P positions and sums ownership by team.
Args:
projections_file (pd.DataFrame): DataFrame with 'position', 'team', and 'ownership' columns
Returns:
pd.Series: Series with team names as index and total hitter ownership as values, sorted descending
"""
# Filter out pitchers (SP and P positions)
hitters_df = projections_file[~projections_file['position'].isin(['P', 'SP'])]
# Group by team and sum ownership
team_ownership = hitters_df.groupby('team')['ownership'].sum().sort_values(ascending=False)
return team_ownership
team_ownership = get_team_hitter_ownership(projections_file)
top_owned_teams = team_ownership.head(list_size).index.tolist()
concat_portfolio = pd.DataFrame(columns=portfolio.columns)
for player in top_owned:
print(player)
working_df = portfolio.copy()
# Create mask for lineups that contain any of the removed players
player_columns = [col for col in working_df.columns if col not in excluded_cols]
remove_mask = working_df[player_columns].apply(
lambda row: player not in list(row), axis=1
)
lock_mask = working_df[player_columns].apply(
lambda row: player in list(row), axis=1
)
removed_df = working_df[remove_mask]
print(removed_df.head(10))
locked_df = working_df[lock_mask]
print(locked_df.head(10))
removed_lineups = small_field_preset(removed_df, math.ceil(lineup_target / (list_size * 3)), excluded_cols)
print(len(removed_lineups))
locked_lineups = large_field_preset(locked_df, math.ceil(lineup_target / (list_size * 3)), excluded_cols)
print(len(locked_lineups))
concat_portfolio = pd.concat([concat_portfolio, removed_lineups, locked_lineups])
for team in top_owned_teams:
working_df = portfolio.copy()
removed_df = working_df[working_df['Stack'] != team]
teams_df = working_df[working_df['Stack'] == team]
removed_lineups = small_field_preset(removed_df, math.ceil(lineup_target / (list_size * 3)), excluded_cols)
print(len(removed_lineups))
print(removed_lineups.head(10))
team_lineups = large_field_preset(teams_df, math.ceil(lineup_target / (list_size * 3)), excluded_cols)
print(len(team_lineups))
print(team_lineups.head(10))
concat_portfolio = pd.concat([concat_portfolio, removed_lineups, team_lineups])
return concat_portfolio.head(lineup_target) |