File size: 1,663 Bytes
119b2bf
 
10c821a
 
119b2bf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import pandas as pd
import math
from global_func.small_field_preset import small_field_preset
from global_func.large_field_preset import large_field_preset

def hedging_preset(portfolio: pd.DataFrame, lineup_target: int, projections_file: pd.DataFrame):
    
    excluded_cols = ['salary', 'median', 'Own', 'Finish_percentile', 'Dupes', 'Stack', 'Size', 'Win%', 'Lineup Edge', 'Weighted Own', 'Geomean', 'Similarity Score']

    check_own_df = projections_file.copy()
    check_own_df = check_own_df.sort_values(by='Own', ascending=False)
    top_owned = check_own_df['player_names'].head(3).tolist()

    concat_portfolio = pd.DataFrame(columns=portfolio.columns)

    for players in top_owned:
        working_df = portfolio.copy()

        # Create mask for lineups that contain any of the removed players
        player_columns = [col for col in working_df.columns if col not in excluded_cols]

        remove_mask = working_df[player_columns].apply(
            lambda row: not any(player in list(row) for player in players), axis=1
        )
        lock_mask = working_df[player_columns].apply(
            lambda row: all(player in list(row) for player in players), axis=1
        )

        removed_df = working_df[remove_mask]
        locked_df = working_df[lock_mask]

        removed_lineups = small_field_preset(removed_df, math.ceil(lineup_target / 2), excluded_cols)
        locked_lineups = large_field_preset(locked_df, math.ceil(lineup_target / 2), excluded_cols)

        concat_portfolio = pd.concat([concat_portfolio, removed_lineups, locked_lineups])

    return concat_portfolio.sort_values(by='median', ascending=False).head(lineup_target)