James McCool commited on
Commit
55b8b36
·
1 Parent(s): 8393233

Refactor lineup generation logic in hedging_preset.py to improve processing efficiency. Introduced an initialization counter for lineup adjustments and added checks for empty DataFrames before generating lineups. Enhanced debug messages for better visibility into the lineup concatenation process.

Browse files
Files changed (1) hide show
  1. global_func/hedging_preset.py +46 -38
global_func/hedging_preset.py CHANGED
@@ -33,51 +33,59 @@ def hedging_preset(portfolio: pd.DataFrame, lineup_target: int, projections_file
33
 
34
  team_ownership = get_team_hitter_ownership(projections_file)
35
  top_owned_teams = team_ownership.head(list_size).index.tolist()
 
36
 
37
- concat_portfolio = pd.DataFrame(columns=portfolio.columns)
 
 
 
 
38
 
39
- for player in top_owned:
40
- print(player)
41
- working_df = portfolio.copy()
42
 
43
- # Create mask for lineups that contain any of the removed players
44
- player_columns = [col for col in working_df.columns if col not in excluded_cols]
 
 
 
 
45
 
46
- remove_mask = working_df[player_columns].apply(
47
- lambda row: player not in list(row), axis=1
48
- )
49
- lock_mask = working_df[player_columns].apply(
50
- lambda row: player in list(row), axis=1
51
- )
52
 
53
- removed_df = working_df[remove_mask]
54
- locked_df = working_df[lock_mask]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
- removed_lineups = small_field_preset(removed_df, math.ceil(lineup_target / (list_size * 4)), excluded_cols)
57
- print(len(removed_lineups))
58
- # Check if locked_df is empty before calling large_field_preset
59
- if not locked_df.empty:
60
- locked_lineups = large_field_preset(locked_df, math.ceil(lineup_target / (list_size * 4)), excluded_cols)
61
- print(len(locked_lineups))
62
- concat_portfolio = pd.concat([concat_portfolio, removed_lineups, locked_lineups])
63
- else:
64
- # If no lineups contain this player, just add the removed lineups
65
- print(f"No lineups found containing {player}")
66
- concat_portfolio = pd.concat([concat_portfolio, removed_lineups])
67
-
68
- for team in top_owned_teams:
69
- working_df = portfolio.copy()
70
- removed_df = working_df[working_df['Stack'] != team]
71
- teams_df = working_df[working_df['Stack'] == team]
72
 
73
- removed_lineups = small_field_preset(removed_df, math.ceil(lineup_target / (list_size * 4)), excluded_cols)
74
- # Check if teams_df is empty before calling large_field_preset
75
- if not teams_df.empty:
76
- team_lineups = large_field_preset(teams_df, math.ceil(lineup_target / (list_size * 4)), excluded_cols)
77
- concat_portfolio = pd.concat([concat_portfolio, removed_lineups, team_lineups])
78
  else:
79
- # If no lineups have this team stacked, just add the removed lineups
80
- print(f"No lineups found with {team} stacked")
81
- concat_portfolio = pd.concat([concat_portfolio, removed_lineups])
82
 
83
  return concat_portfolio.head(lineup_target)
 
33
 
34
  team_ownership = get_team_hitter_ownership(projections_file)
35
  top_owned_teams = team_ownership.head(list_size).index.tolist()
36
+ init_counter = 6
37
 
38
+ for runs in range(1, 5):
39
+ concat_portfolio = pd.DataFrame(columns=portfolio.columns)
40
+ for player in top_owned:
41
+ print(player)
42
+ working_df = portfolio.copy()
43
 
44
+ # Create mask for lineups that contain any of the removed players
45
+ player_columns = [col for col in working_df.columns if col not in excluded_cols]
 
46
 
47
+ remove_mask = working_df[player_columns].apply(
48
+ lambda row: player not in list(row), axis=1
49
+ )
50
+ lock_mask = working_df[player_columns].apply(
51
+ lambda row: player in list(row), axis=1
52
+ )
53
 
54
+ removed_df = working_df[remove_mask]
55
+ locked_df = working_df[lock_mask]
 
 
 
 
56
 
57
+ removed_lineups = small_field_preset(removed_df, math.ceil(lineup_target / (list_size * init_counter)), excluded_cols)
58
+ print(len(removed_lineups))
59
+ # Check if locked_df is empty before calling large_field_preset
60
+ if not locked_df.empty:
61
+ locked_lineups = large_field_preset(locked_df, math.ceil(lineup_target / (list_size * init_counter)), excluded_cols)
62
+ print(len(locked_lineups))
63
+ concat_portfolio = pd.concat([concat_portfolio, removed_lineups, locked_lineups])
64
+ else:
65
+ # If no lineups contain this player, just add the removed lineups
66
+ print(f"No lineups found containing {player}")
67
+ concat_portfolio = pd.concat([concat_portfolio, removed_lineups])
68
+
69
+ for team in top_owned_teams:
70
+ working_df = portfolio.copy()
71
+ removed_df = working_df[working_df['Stack'] != team]
72
+ teams_df = working_df[working_df['Stack'] == team]
73
 
74
+ removed_lineups = small_field_preset(removed_df, math.ceil(lineup_target / (list_size * init_counter)), excluded_cols)
75
+ # Check if teams_df is empty before calling large_field_preset
76
+ if not teams_df.empty:
77
+ team_lineups = large_field_preset(teams_df, math.ceil(lineup_target / (list_size * init_counter)), excluded_cols)
78
+ concat_portfolio = pd.concat([concat_portfolio, removed_lineups, team_lineups])
79
+ else:
80
+ # If no lineups have this team stacked, just add the removed lineups
81
+ print(f"No lineups found with {team} stacked")
82
+ concat_portfolio = pd.concat([concat_portfolio, removed_lineups])
83
+
84
+ concat_portfolio = concat_portfolio.drop_duplicates(subset=['median', 'Own', 'Lineup Edge', 'Similarity Score'])
 
 
 
 
 
85
 
86
+ if len(concat_portfolio) >= lineup_target:
87
+ return concat_portfolio.head(lineup_target)
 
 
 
88
  else:
89
+ init_counter -= 1
 
 
90
 
91
  return concat_portfolio.head(lineup_target)