File size: 1,741 Bytes
cc0edce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2a7d830
cc0edce
 
2a7d830
cc0edce
 
2a7d830
cc0edce
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import pandas as pd
import numpy as np

def volatility_preset(portfolio: pd.DataFrame, lineup_target: int, exclude_cols: list):
    excluded_cols = ['salary', 'median', 'Own', 'Finish_percentile', 'Dupes', 'Stack', 'Size', 'Win%', 'Lineup Edge', 'Weighted Own', 'Geomean', 'Similarity Score']
    player_columns = [col for col in portfolio.columns if col not in excluded_cols]

    for slack_var in range(1, 20):
        concat_portfolio = pd.DataFrame(columns=portfolio.columns)

        for team in portfolio['Stack'].unique():
            rows_to_drop = []
            working_portfolio = portfolio.copy()
            working_portfolio = working_portfolio[working_portfolio['Stack'] == team].sort_values(by='Lineup Edge', ascending = False)
            working_portfolio = working_portfolio.reset_index(drop=True)
            curr_own_type_max = working_portfolio.loc[0, 'Similarity Score'] + (slack_var / 20 * working_portfolio.loc[0, 'Similarity Score'])

            for i in range(1, len(working_portfolio)):
                if working_portfolio.loc[i, 'Similarity Score'] < curr_own_type_max:
                    rows_to_drop.append(i)
                else:
                    curr_own_type_max = working_portfolio.loc[i, 'Similarity Score'] + (slack_var / 20 * working_portfolio.loc[i, 'Similarity Score'])

            working_portfolio = working_portfolio.drop(rows_to_drop).reset_index(drop=True)
            concat_portfolio = pd.concat([concat_portfolio, working_portfolio])
            
        if len(concat_portfolio) >= lineup_target:
            return concat_portfolio.sort_values(by='Lineup Edge', ascending=False).head(lineup_target)
        
    return concat_portfolio.sort_values(by='Lineup Edge', ascending=False)