James McCool
commited on
Commit
·
9f87d22
1
Parent(s):
972bd0b
Refactor contest data handling in app.py to improve player data processing
Browse files- Updated session state references from 'Adj_Contest' to 'Contest' for consistency and clarity.
- Simplified mapping dictionary creation by directly using existing session state dictionaries, enhancing code readability.
- Removed unused logic related to 'Showdown' type processing, streamlining the data handling process.
- These changes contribute to ongoing efforts to enhance data integrity and improve user experience within the application.
app.py
CHANGED
@@ -107,25 +107,21 @@ with tab1:
|
|
107 |
|
108 |
with tab2:
|
109 |
excluded_cols = ['BaseName', 'EntryCount']
|
110 |
-
if '
|
111 |
-
player_columns = [col for col in st.session_state['
|
112 |
for col in player_columns:
|
113 |
-
st.session_state['
|
114 |
|
115 |
# Create mapping dictionaries
|
116 |
map_dict = {
|
117 |
-
'pos_map':
|
118 |
-
'team_map':
|
119 |
-
'salary_map':
|
120 |
-
'
|
121 |
-
'
|
122 |
-
'own_percent_rank': dict(zip(st.session_state['Adj_projections_df']['player_names'], st.session_state['Adj_projections_df']['ownership'].rank(pct=True))),
|
123 |
-
'cpt_salary_map': dict(zip(st.session_state['Adj_projections_df']['player_names'], st.session_state['Adj_projections_df']['salary'])),
|
124 |
-
'cpt_proj_map': dict(zip(st.session_state['Adj_projections_df']['player_names'], st.session_state['Adj_projections_df']['median'] * 1.5)),
|
125 |
-
'cpt_own_map': dict(zip(st.session_state['Adj_projections_df']['player_names'], st.session_state['Adj_projections_df']['captain ownership']))
|
126 |
}
|
127 |
# Create a copy of the dataframe for calculations
|
128 |
-
working_df = st.session_state['
|
129 |
|
130 |
if type_var == 'Classic':
|
131 |
working_df['stack'] = working_df.apply(
|
@@ -143,9 +139,7 @@ with tab2:
|
|
143 |
axis=1
|
144 |
)
|
145 |
working_df['salary'] = working_df.apply(lambda row: sum(map_dict['salary_map'].get(player, 0) for player in row), axis=1)
|
146 |
-
working_df['median'] = working_df.apply(lambda row: sum(map_dict['proj_map'].get(player, 0) for player in row), axis=1)
|
147 |
working_df['actual_fpts'] = working_df.apply(lambda row: sum(st.session_state['actual_dict'].get(player, 0) for player in row), axis=1)
|
148 |
-
working_df['Own'] = working_df.apply(lambda row: sum(map_dict['own_map'].get(player, 0) for player in row), axis=1)
|
149 |
working_df['actual_own'] = working_df.apply(lambda row: sum(st.session_state['ownership_dict'].get(player, 0) for player in row), axis=1)
|
150 |
working_df['sorted'] = working_df[player_columns].apply(
|
151 |
lambda row: ','.join(sorted(row.values)),
|
@@ -156,45 +150,6 @@ with tab2:
|
|
156 |
working_df['percentile_finish'] = working_df['index'].rank(pct=True)
|
157 |
working_df['finish'] = working_df['index']
|
158 |
working_df = working_df.drop(['sorted', 'index'], axis=1)
|
159 |
-
elif type_var == 'Showdown':
|
160 |
-
working_df['stack'] = working_df.apply(
|
161 |
-
lambda row: Counter(
|
162 |
-
map_dict['team_map'].get(player, '') for player in row
|
163 |
-
if map_dict['team_map'].get(player, '') != ''
|
164 |
-
).most_common(1)[0][0] if any(map_dict['team_map'].get(player, '') for player in row) else '',
|
165 |
-
axis=1
|
166 |
-
)
|
167 |
-
working_df['stack_size'] = working_df.apply(
|
168 |
-
lambda row: Counter(
|
169 |
-
map_dict['team_map'].get(player, '') for player in row
|
170 |
-
if map_dict['team_map'].get(player, '') != ''
|
171 |
-
).most_common(1)[0][1] if any(map_dict['team_map'].get(player, '') for player in row) else '',
|
172 |
-
axis=1
|
173 |
-
)
|
174 |
-
working_df['salary'] = working_df.apply(
|
175 |
-
lambda row: map_dict['cpt_salary_map'].get(row.iloc[0], 0) +
|
176 |
-
sum(map_dict['salary_map'].get(player, 0) for player in row.iloc[1:]),
|
177 |
-
axis=1
|
178 |
-
)
|
179 |
-
working_df['median'] = working_df.apply(
|
180 |
-
lambda row: map_dict['cpt_proj_map'].get(row.iloc[0], 0) +
|
181 |
-
sum(map_dict['proj_map'].get(player, 0) for player in row.iloc[1:]),
|
182 |
-
axis=1
|
183 |
-
)
|
184 |
-
working_df['Own'] = working_df.apply(
|
185 |
-
lambda row: map_dict['cpt_own_map'].get(row.iloc[0], 0) +
|
186 |
-
sum(map_dict['own_map'].get(player, 0) for player in row.iloc[1:]),
|
187 |
-
axis=1
|
188 |
-
)
|
189 |
-
working_df['sorted'] = working_df[player_columns].apply(
|
190 |
-
lambda row: row[0] + '|' + ','.join(sorted(row[1:].values)),
|
191 |
-
axis=1
|
192 |
-
)
|
193 |
-
working_df['dupes'] = working_df.groupby('sorted').transform('size')
|
194 |
-
working_df = working_df.reset_index()
|
195 |
-
working_df['percentile_finish'] = working_df['index'].rank(pct=True)
|
196 |
-
working_df['finish'] = working_df['index']
|
197 |
-
working_df = working_df.drop(['sorted', 'index'], axis=1)
|
198 |
st.session_state['field_player_frame'] = create_player_exposures(working_df, player_columns)
|
199 |
st.session_state['field_stack_frame'] = create_stack_exposures(working_df)
|
200 |
|
|
|
107 |
|
108 |
with tab2:
|
109 |
excluded_cols = ['BaseName', 'EntryCount']
|
110 |
+
if 'Contest' in st.session_state:
|
111 |
+
player_columns = [col for col in st.session_state['Contest'].columns if col not in excluded_cols]
|
112 |
for col in player_columns:
|
113 |
+
st.session_state['Contest'][col] = st.session_state['Contest'][col].astype(str)
|
114 |
|
115 |
# Create mapping dictionaries
|
116 |
map_dict = {
|
117 |
+
'pos_map': st.session_state['pos_dict'],
|
118 |
+
'team_map': st.session_state['team_dict'],
|
119 |
+
'salary_map': st.session_state['salary_dict'],
|
120 |
+
'own_map': st.session_state['ownership_dict'],
|
121 |
+
'own_percent_rank': dict(zip(st.session_state['ownership_df']['Player'], st.session_state['ownership_df']['Own'].rank(pct=True)))
|
|
|
|
|
|
|
|
|
122 |
}
|
123 |
# Create a copy of the dataframe for calculations
|
124 |
+
working_df = st.session_state['Contest'].copy()
|
125 |
|
126 |
if type_var == 'Classic':
|
127 |
working_df['stack'] = working_df.apply(
|
|
|
139 |
axis=1
|
140 |
)
|
141 |
working_df['salary'] = working_df.apply(lambda row: sum(map_dict['salary_map'].get(player, 0) for player in row), axis=1)
|
|
|
142 |
working_df['actual_fpts'] = working_df.apply(lambda row: sum(st.session_state['actual_dict'].get(player, 0) for player in row), axis=1)
|
|
|
143 |
working_df['actual_own'] = working_df.apply(lambda row: sum(st.session_state['ownership_dict'].get(player, 0) for player in row), axis=1)
|
144 |
working_df['sorted'] = working_df[player_columns].apply(
|
145 |
lambda row: ','.join(sorted(row.values)),
|
|
|
150 |
working_df['percentile_finish'] = working_df['index'].rank(pct=True)
|
151 |
working_df['finish'] = working_df['index']
|
152 |
working_df = working_df.drop(['sorted', 'index'], axis=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
153 |
st.session_state['field_player_frame'] = create_player_exposures(working_df, player_columns)
|
154 |
st.session_state['field_stack_frame'] = create_stack_exposures(working_df)
|
155 |
|