Spaces:
Sleeping
Sleeping
# app.py | |
import gradio as gr | |
import xgboost as xgb | |
from xgboost import DMatrix | |
from huggingface_hub import hf_hub_download | |
from app_training_df_getter import create_app_user_training_df | |
import pandas as pd | |
from sklearn.model_selection import train_test_split | |
from sklearn.preprocessing import LabelEncoder | |
from helper import * | |
import joblib | |
# Define champion list for dropdowns | |
CHAMPIONS = [ | |
"Aatrox", "Ahri", "Akali", "Akshan", "Alistar", "Amumu", "Anivia", "Annie", "Aphelios", "Ashe", | |
"Aurelion Sol", "Azir", "Bard", "Bel'Veth", "Blitzcrank", "Brand", "Braum", "Caitlyn", "Camille", | |
"Cassiopeia", "Cho'Gath", "Corki", "Darius", "Diana", "Dr. Mundo", "Draven", "Ekko", "Elise", | |
"Evelynn", "Ezreal", "Fiddlesticks", "Fiora", "Fizz", "Galio", "Gangplank", "Garen", "Gnar", | |
"Gragas", "Graves", "Gwen", "Hecarim", "Heimerdinger", "Illaoi", "Irelia", "Ivern", "Janna", | |
"Jarvan IV", "Jax", "Jayce", "Jhin", "Jinx", "Kai'Sa", "Kalista", "Karma", "Karthus", "Kassadin", | |
"Katarina", "Kayle", "Kayn", "Kennen", "Kha'Zix", "Kindred", "Kled", "Kog'Maw", "KSante", "LeBlanc", | |
"Lee Sin", "Leona", "Lillia", "Lissandra", "Lucian", "Lulu", "Lux", "Malphite", "Malzahar", "Maokai", | |
"Master Yi", "Milio", "Miss Fortune", "Mordekaiser", "Morgana", "Naafiri", "Nami", "Nasus", "Nautilus", | |
"Neeko", "Nidalee", "Nilah", "Nocturne", "Nunu & Willump", "Olaf", "Orianna", "Ornn", "Pantheon", | |
"Poppy", "Pyke", "Qiyana", "Quinn", "Rakan", "Rammus", "Rek'Sai", "Rell", "Renata Glasc", "Renekton", | |
"Rengar", "Riven", "Rumble", "Ryze", "Samira", "Sejuani", "Senna", "Seraphine", "Sett", "Shaco", | |
"Shen", "Shyvana", "Singed", "Sion", "Sivir", "Skarner", "Sona", "Soraka", "Swain", "Sylas", | |
"Syndra", "Tahm Kench", "Taliyah", "Talon", "Taric", "Teemo", "Thresh", "Tristana", "Trundle", | |
"Tryndamere", "Twisted Fate", "Twitch", "Udyr", "Urgot", "Varus", "Vayne", "Veigar", "Vel'Koz", | |
"Vex", "Vi", "Viego", "Viktor", "Vladimir", "Volibear", "Warwick", "Wukong", "Xayah", "Xerath", | |
"Xin Zhao", "Yasuo", "Yone", "Yorick", "Yuumi", "Zac", "Zed", "Zeri", "Ziggs", "Zilean", "Zoe", "Zyra" | |
] | |
# Load model | |
try: | |
model_path = hf_hub_download( | |
repo_id="ivwhy/champion-predictor-model", | |
filename="champion_predictor.json" | |
) | |
model = xgb.Booster() | |
model.load_model(model_path) | |
except Exception as e: | |
print(f"Error loading model: {e}") | |
model = None | |
try: | |
label_encoder = joblib.load('util/label_encoder.joblib') | |
print("Label encoder loaded successfully") | |
except Exception as e: | |
print(f"Error loading label encoder: {e}") | |
label_encoder = None | |
# Initialize champion name encoder | |
champion_encoder = LabelEncoder() | |
champion_encoder.fit(CHAMPIONS) | |
#==================================== Functions ================================================= | |
def get_user_training_df(player_opgg_url): | |
try: | |
print("========= Inside get_user_training_df(player_opgg_url) ============= \n") | |
#print("player_opgg_url: ", player_opgg_url, "\n type(player_opgg_url): ", type(player_opgg_url), "\n") | |
# Add input validation | |
if not player_opgg_url or not isinstance(player_opgg_url, str): | |
return "Invalid URL provided" | |
training_df = create_app_user_training_df(player_opgg_url) | |
return training_df | |
except Exception as e: | |
# Add more detailed error information | |
import traceback | |
error_trace = traceback.format_exc() | |
print(f"Full error trace:\n{error_trace}") | |
return f"Error getting training data: {str(e)}" | |
#return f"Error getting training data: {e}" | |
def show_stats(player_opgg_url): | |
"""Display player statistics and recent matches""" | |
if not player_opgg_url: | |
return "Please enter a player link to OPGG", None | |
try: | |
training_features = get_user_training_df(player_opgg_url) | |
print("training_features: ", training_features, "\n") | |
if isinstance(training_features, str): # Error message | |
return training_features, None | |
wins = training_features['result'].sum() | |
losses = len(training_features) - wins | |
winrate = f"{(wins / len(training_features)) * 100:.0f}%" | |
favorite_champions = ( | |
training_features['champion'] | |
.value_counts() | |
.head(3) | |
.index.tolist() | |
) | |
stats_html = f""" | |
<div style='padding: 20px; background: #f5f5f5; border-radius: 10px;'> | |
<h3>Player's Recent Stats</h3> | |
<p>Wins: {wins} | Losses: {losses}</p> | |
<p>Winrate: {winrate}</p> | |
<p>Favorite Champions: {', '.join(favorite_champions)}</p> | |
</div> | |
""" | |
return stats_html, None | |
except Exception as e: | |
return f"Error processing stats: {e}. ", None | |
def predict_champion(player_opgg_url, *champions): | |
"""Make prediction based on selected champions""" | |
if not player_opgg_url or None in champions: | |
return "Please fill in all fields" | |
try: | |
if model is None: | |
return "Model not loaded properly" | |
if label_encoder is None: | |
return "Label encoder not loaded properly" | |
# Get and process the data | |
training_df = get_user_training_df(player_opgg_url) | |
if isinstance(training_df, str): | |
return training_df | |
training_df = convert_df(training_df) | |
#print("type(training_df): ", type(training_df), "\n") | |
print("check_datatypes(training_df) BEFORE feature eng: \n", check_datatypes(training_df), "\n") | |
training_df = apply_feature_engineering(training_df) | |
print("check_datatypes(training_df) AFTER feature eng: \n", check_datatypes(training_df), "\n") | |
# Get feature columns | |
feature_columns = [col for col in training_df.columns | |
if col not in ['champion', 'region', 'stratify_label']] | |
X = training_df[feature_columns] | |
# Handle categorical features | |
categorical_columns = X.select_dtypes(include=['category']).columns | |
X_processed = X.copy() | |
for col in categorical_columns: | |
X_processed[col] = X_processed[col].cat.codes | |
X_processed = X_processed.astype('float32') | |
# Create DMatrix and predict | |
dtest = DMatrix(X_processed, enable_categorical=True) | |
predictions = model.predict(dtest) | |
# Get prediction indices | |
if len(predictions.shape) > 1: | |
pred_indices = predictions.argmax(axis=1) | |
else: | |
pred_indices = predictions.astype(int) | |
# First get the numeric ID from the original label encoder | |
decoded_numeric = label_encoder.inverse_transform(pred_indices) | |
# Map numeric ID to index in CHAMPIONS list | |
# Since your label encoder seems to use champion IDs, we need to map these to list indices | |
try: | |
# Get the first prediction | |
champion_id = int(decoded_numeric[0]) | |
# Print debug information | |
print(f"Champion ID from model: {champion_id}") | |
# Find the closest matching index | |
# Note: This assumes champion IDs roughly correspond to their position in the list | |
champion_index = min(max(champion_id - 1, 0), len(CHAMPIONS) - 1) | |
predicted_champion = CHAMPIONS[champion_index] | |
print(f"Mapped to champion: {predicted_champion}") | |
return f"{predicted_champion}" | |
except Exception as e: | |
print(f"Error mapping champion ID: {e}") | |
return f"Error: Could not map champion ID {decoded_numeric[0]}" | |
except Exception as e: | |
import traceback | |
print(f"Full error trace:\n{traceback.format_exc()}") | |
return f"Error making prediction: {e}" | |
''' current working function!!!!!! | |
def predict_champion(player_opgg_url, *champions): | |
"""Make prediction based on selected champions""" | |
print("==================== Inside: predict_champion() ===================== \n") | |
if not player_opgg_url or None in champions: | |
return "Please fill in all fields" | |
try: | |
if model is None: | |
return "Model not loaded properly" | |
if label_encoder is None: | |
return "Label encoder not loaded properly" | |
# Print label encoder information | |
print("\nLabel Encoder Information:") | |
print("Classes in encoder:", label_encoder.classes_) | |
print("Number of classes:", len(label_encoder.classes_)) | |
# Get and process the data | |
training_df = get_user_training_df(player_opgg_url) | |
print("training_df retrieved: ", training_df, "\n") | |
if isinstance(training_df, str): # Error message | |
return training_df | |
# Apply necessary transformations | |
training_df = convert_df(training_df) | |
training_df = apply_feature_engineering(training_df) | |
print("training_df converted and feature engineered: ", training_df, "\n") | |
# Get feature columns (excluding champion and region) | |
feature_columns = [col for col in training_df.columns | |
if col not in ['champion', 'region', 'stratify_label']] | |
X = training_df[feature_columns] | |
print("Got feature columns X: ", X, "\n") | |
# Handle categorical features | |
categorical_columns = X.select_dtypes(include=['category']).columns | |
X_processed = X.copy() | |
print("Handled categorical features, X_processed = ", X_processed, "\n") | |
# Convert categorical columns to numeric | |
for col in categorical_columns: | |
X_processed[col] = X_processed[col].cat.codes | |
print("Converted categorical columns to numeric: ", categorical_columns, "\n") | |
# Convert to float32 | |
X_processed = X_processed.astype('float32') | |
print("Converted X_processed to float32: ", X_processed, "\n") | |
# Create DMatrix with categorical feature support | |
dtest = DMatrix(X_processed, enable_categorical=True) | |
print("Converted to Dmatrix: ", dtest, "\n") | |
# Make prediction | |
print("Starting model prediction...\n") | |
predictions = model.predict(dtest) | |
print("Model prediction complete\n") | |
print("\nPrediction Information:") | |
print("Raw predictions shape:", predictions.shape) | |
print("Raw predictions:", predictions) | |
# Get the highest probability prediction | |
if len(predictions.shape) > 1: | |
pred_indices = predictions.argmax(axis=1) | |
else: | |
pred_indices = predictions.astype(int) | |
print("\nPrediction Indices:") | |
print("Indices shape:", pred_indices.shape) | |
print("Indices:", pred_indices) | |
# Check if indices are within valid range | |
print("\nValidation:") | |
print("Min index:", pred_indices.min()) | |
print("Max index:", pred_indices.max()) | |
print("Valid index range:", 0, len(label_encoder.classes_) - 1) | |
# Try to decode predictions | |
try: | |
decoded_preds = label_encoder.inverse_transform(pred_indices) | |
print("\nDecoded Predictions:") | |
print("Type:", type(decoded_preds)) | |
print("Value:", decoded_preds) | |
print("==================== Exiting: predict_champion()===================\n") | |
return f"Predicted champion: {decoded_preds[0]}" | |
except Exception as e: | |
print(f"\nError during decoding: {e}") | |
# Fallback: try to directly index into classes | |
try: | |
champion = label_encoder.classes_[int(pred_indices[0])] | |
return f"Predicted champion: {champion}" | |
except Exception as e2: | |
print(f"Fallback error: {e2}") | |
return f"Error decoding prediction: {pred_indices[0]}" | |
except Exception as e: | |
import traceback | |
print(f"Full error trace:\n{traceback.format_exc()}") | |
return f"Error making prediction: {e}" | |
''' | |
# Define your interface | |
with gr.Blocks() as demo: | |
gr.Markdown("# League of Legends Champion Prediction") | |
with gr.Row(): | |
player_opgg_url = gr.Textbox(label="OPGG Player URL") | |
show_button = gr.Button("Show Player Stats") | |
with gr.Row(): | |
stats_output = gr.HTML(label="Player Statistics") | |
recent_matches = gr.HTML(label="Recent Matches") | |
with gr.Row(): | |
champion_dropdowns = [ | |
gr.Dropdown(choices=CHAMPIONS, label=f"Champion {i+1}") | |
for i in range(9) | |
] | |
with gr.Row(): | |
predict_button = gr.Button("Predict") | |
prediction_output = gr.Text(label="Prediction") | |
# Set up event handlers | |
show_button.click( | |
fn=show_stats, | |
inputs=[player_opgg_url], | |
outputs=[stats_output, recent_matches] | |
) | |
predict_button.click( | |
fn=predict_champion, | |
inputs=[player_opgg_url] + champion_dropdowns, | |
outputs=prediction_output | |
) | |
# Optional: Save the champion encoder for future use | |
joblib.dump(champion_encoder, 'champion_encoder.joblib') | |
# Enable queuing | |
demo.launch(debug=True) | |
# For local testing | |
if __name__ == "__main__": | |
demo.launch() |