James McCool commited on
Commit
3deb246
·
1 Parent(s): d209a5b

Refactor name standardization and mapping logic in app.py: introduce functions for creating site mappings and standardizing player names, improving code organization and efficiency in handling name variations across dataframes.

Browse files
Files changed (1) hide show
  1. app.py +92 -51
app.py CHANGED
@@ -135,60 +135,101 @@ with tab1:
135
  projections = projections.apply(lambda x: x.replace(player_wrong_names_mlb, player_right_names_mlb))
136
  st.dataframe(projections.head(10))
137
 
138
- if portfolio_file and projections_file:
139
- if st.session_state['portfolio'] is not None and projections is not None:
140
- st.subheader("Name Matching Analysis")
141
- # Initialize projections_df in session state if it doesn't exist
142
- if 'projections_df' not in st.session_state:
143
- st.session_state['projections_df'] = projections.copy()
144
- st.session_state['projections_df']['salary'] = (st.session_state['projections_df']['salary'].astype(str).str.replace(',', '').astype(float).astype(int))
145
-
146
- # Update projections_df with any new matches
147
- st.session_state['projections_df'] = find_name_mismatches(st.session_state['portfolio'], st.session_state['projections_df'])
148
- try:
149
- name_id_map = dict(zip(
150
- st.session_state['csv_file']['Name'],
151
- st.session_state['csv_file']['Name + ID']
152
- ))
153
- print("Using Name + ID mapping")
154
- except:
155
- name_id_map = dict(zip(
156
- st.session_state['csv_file']['Nickname'],
157
- st.session_state['csv_file']['Id']
158
- ))
159
- print("Using Nickname + Id mapping")
160
-
161
- # Get all names at once
162
- names = projections['player_names'].tolist()
163
- choices = list(name_id_map.keys())
164
-
165
- # Create a dictionary to store matches
166
- match_dict = {}
167
 
168
- # Process each name individually but more efficiently
169
- for name in names:
170
- # Use extractOne with score_cutoff for efficiency
171
- match = process.extractOne(
172
- name,
173
- choices,
174
- score_cutoff=85
175
- )
176
-
177
- if match:
178
- match_dict[name] = name_id_map[match[0]]
179
- else:
180
- match_dict[name] = name
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
 
182
- print(f"Number of entries in match_dict: {len(match_dict)}")
183
- print("Sample of match_dict:", list(match_dict.items())[:3])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
 
185
- # Apply the matches
186
- projections['upload_match'] = projections['player_names'].map(match_dict)
187
- st.session_state['export_dict'] = match_dict
188
-
189
-
190
- st.write(st.session_state['export_dict'])
191
- st.session_state['origin_portfolio'] = st.session_state['portfolio'].copy()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
 
193
  # with tab2:
194
  # if st.button('Clear data', key='reset2'):
 
135
  projections = projections.apply(lambda x: x.replace(player_wrong_names_mlb, player_right_names_mlb))
136
  st.dataframe(projections.head(10))
137
 
138
+ def create_site_mapping(site_csv):
139
+ """
140
+ Create a mapping dictionary from the site CSV that handles both Name and Nickname cases.
141
+
142
+ Args:
143
+ site_csv: DataFrame containing site data with either Name/Nickname and Name+ID/Id columns
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
 
145
+ Returns:
146
+ dict: Mapping of all possible name variations to their ID
147
+ """
148
+ mapping = {}
149
+
150
+ # Check which columns we have
151
+ has_name = 'Name' in site_csv.columns
152
+ has_nickname = 'Nickname' in site_csv.columns
153
+ has_name_id = 'Name + ID' in site_csv.columns
154
+ has_id = 'Id' in site_csv.columns
155
+
156
+ # Create mappings for all possible combinations
157
+ if has_name and has_name_id:
158
+ mapping.update(dict(zip(site_csv['Name'], site_csv['Name + ID'])))
159
+ if has_nickname and has_id:
160
+ mapping.update(dict(zip(site_csv['Nickname'], site_csv['Id'])))
161
+
162
+ return mapping
163
+
164
+ def standardize_names(df, name_columns, site_mapping):
165
+ """
166
+ Standardize names across a dataframe using the site mapping.
167
+
168
+ Args:
169
+ df: DataFrame containing player names
170
+ name_columns: List of column names containing player names
171
+ site_mapping: Dictionary mapping names to IDs from site CSV
172
 
173
+ Returns:
174
+ DataFrame: Updated dataframe with standardized names
175
+ """
176
+ df = df.copy()
177
+
178
+ # First try exact matches
179
+ for col in name_columns:
180
+ df[col] = df[col].map(lambda x: site_mapping.get(x, x))
181
+
182
+ # Then try fuzzy matching for any remaining unmatched names
183
+ unmatched = df[name_columns].apply(lambda x: x.isin(site_mapping.keys())).any(axis=1)
184
+ if unmatched.any():
185
+ for col in name_columns:
186
+ # Only process unmatched names
187
+ mask = ~df[col].isin(site_mapping.keys())
188
+ if mask.any():
189
+ # Get fuzzy matches for unmatched names
190
+ fuzzy_matches = {
191
+ name: process.extractOne(name, list(site_mapping.keys()), score_cutoff=90)[0]
192
+ for name in df.loc[mask, col].unique()
193
+ if process.extractOne(name, list(site_mapping.keys()), score_cutoff=90)
194
+ }
195
+ # Apply fuzzy matches
196
+ df.loc[mask, col] = df.loc[mask, col].map(lambda x: site_mapping.get(fuzzy_matches.get(x, x), x))
197
+
198
+ return df
199
 
200
+ def process_uploads(site_csv, portfolio_df, projections_df):
201
+ """
202
+ Process all three files and ensure name consistency.
203
+
204
+ Args:
205
+ site_csv: DataFrame from site CSV
206
+ portfolio_df: DataFrame containing portfolio data
207
+ projections_df: DataFrame containing projections
208
+ """
209
+ # Create site mapping
210
+ site_mapping = create_site_mapping(site_csv)
211
+
212
+ # Get portfolio columns that contain player names
213
+ portfolio_name_cols = [col for col in portfolio_df.columns
214
+ if col not in ['salary', 'median', 'Own']]
215
+
216
+ # Get projections column name
217
+ projections_name_col = 'player_names' # adjust if different
218
+
219
+ # Standardize names in both dataframes
220
+ portfolio_df = standardize_names(portfolio_df, portfolio_name_cols, site_mapping)
221
+ projections_df = standardize_names(projections_df, [projections_name_col], site_mapping)
222
+
223
+ return portfolio_df, projections_df
224
+
225
+ if portfolio_file and projections_file and csv_file:
226
+
227
+ # Process all files
228
+ portfolio_df, projections_df = process_uploads(csv_file, st.session_state['portfolio'], projections)
229
+
230
+ # Store in session state
231
+ st.session_state['portfolio'] = portfolio_df
232
+ st.session_state['projections_df'] = projections_df
233
 
234
  # with tab2:
235
  # if st.button('Clear data', key='reset2'):