gpu-poor-llm-arena / fun_stats.py
k-mktr's picture
Update fun_stats.py
0975917 verified
import json
from datetime import datetime, timezone
from typing import Dict, Any
from nc_py_api import Nextcloud
import arena_config
from leaderboard import load_leaderboard, get_human_readable_name, get_model_size
def get_internal_stats() -> Dict[str, Any]:
leaderboard = load_leaderboard()
total_battles = sum(
model_data['wins'] + model_data['losses']
for model_data in leaderboard.values()
)
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
active_models = len(leaderboard)
most_battles = max(
(model_data['wins'] + model_data['losses'], model)
for model, model_data in leaderboard.items()
)
highest_win_rate = max(
(model_data['wins'] / (model_data['wins'] + model_data['losses']) if (model_data['wins'] + model_data['losses']) > 0 else 0, model)
for model, model_data in leaderboard.items()
)
most_diverse_opponent = max(
(len(model_data['opponents']), model)
for model, model_data in leaderboard.items()
)
stats = {
"timestamp": timestamp,
"total_battles": total_battles,
"active_models": active_models,
"most_battles": {
"model": get_human_readable_name(most_battles[1]),
"battles": most_battles[0]
},
"highest_win_rate": {
"model": get_human_readable_name(highest_win_rate[1]),
"win_rate": f"{highest_win_rate[0]:.2%}"
},
"most_diverse_opponent": {
"model": get_human_readable_name(most_diverse_opponent[1]),
"unique_opponents": most_diverse_opponent[0]
}
}
return stats
def save_internal_stats(stats: Dict[str, Any]) -> bool:
nc = Nextcloud(
nextcloud_url=arena_config.NEXTCLOUD_URL,
nc_auth_user=arena_config.NEXTCLOUD_USERNAME,
nc_auth_pass=arena_config.NEXTCLOUD_PASSWORD
)
try:
json_data = json.dumps(stats, indent=2)
nc.files.upload(arena_config.NEXTCLOUD_INTERNAL_STATS_PATH, json_data.encode('utf-8'))
return True
except Exception as e:
print(f"Error saving internal stats to Nextcloud: {str(e)}")
return False
def save_local_stats(stats: Dict[str, Any], filename: str = "internal_stats.json") -> bool:
try:
with open(filename, 'w') as f:
json.dump(stats, f, indent=2)
return True
except Exception as e:
print(f"Error saving internal stats to local file: {str(e)}")
return False
def get_fun_stats() -> Dict[str, Any]:
leaderboard = load_leaderboard()
total_battles = sum(
model_data['wins'] + model_data['losses']
for model_data in leaderboard.values()
)
timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S UTC")
active_models = len(leaderboard)
most_battles = max(
(model_data['wins'] + model_data['losses'], model)
for model, model_data in leaderboard.items()
)
highest_win_rate = max(
(model_data['wins'] / (model_data['wins'] + model_data['losses']) if (model_data['wins'] + model_data['losses']) > 0 else 0, model)
for model, model_data in leaderboard.items()
)
most_diverse_opponent = max(
(len(model_data['opponents']), model)
for model, model_data in leaderboard.items()
)
# Existing fun stats
underdog_champion = min(
((get_model_size(model), model_data['wins'] / (model_data['wins'] + model_data['losses'])) if (model_data['wins'] + model_data['losses']) > 0 else (get_model_size(model), 0), model)
for model, model_data in leaderboard.items()
)
most_consistent = min(
(abs(model_data['wins'] - model_data['losses']), model)
for model, model_data in leaderboard.items()
if (model_data['wins'] + model_data['losses']) > 10 # Minimum battles threshold
)
biggest_rivalry = max(
(results['wins'] + results['losses'], (model, opponent))
for model, data in leaderboard.items()
for opponent, results in data['opponents'].items()
)
# New fun stats
david_vs_goliath = max(
((get_model_size(opponent) - get_model_size(model), model_data['opponents'][opponent]['wins']), (model, opponent))
for model, model_data in leaderboard.items()
for opponent in model_data['opponents']
if get_model_size(opponent) > get_model_size(model) and model_data['opponents'][opponent]['wins'] > 0
)
comeback_king = max(
(model_data['wins'] - model_data['losses'], model)
for model, model_data in leaderboard.items()
if model_data['losses'] > model_data['wins']
)
pyrrhic_victor = min(
(model_data['wins'] / (model_data['wins'] + model_data['losses']) if (model_data['wins'] + model_data['losses']) > 0 else float('inf'), model)
for model, model_data in leaderboard.items()
if model_data['wins'] > model_data['losses'] and (model_data['wins'] + model_data['losses']) > 10
)
stats = {
"timestamp": timestamp,
"total_battles": total_battles,
"active_models": active_models,
"most_battles": {
"model": get_human_readable_name(most_battles[1]),
"battles": most_battles[0]
},
"highest_win_rate": {
"model": get_human_readable_name(highest_win_rate[1]),
"win_rate": f"{highest_win_rate[0]:.2%}"
},
"most_diverse_opponent": {
"model": get_human_readable_name(most_diverse_opponent[1]),
"unique_opponents": most_diverse_opponent[0]
},
"underdog_champion": {
"model": get_human_readable_name(underdog_champion[1]),
"size": f"{underdog_champion[0][0]}B",
"win_rate": f"{underdog_champion[0][1]:.2%}"
},
"most_consistent": {
"model": get_human_readable_name(most_consistent[1]),
"win_loss_difference": most_consistent[0]
},
"biggest_rivalry": {
"model1": get_human_readable_name(biggest_rivalry[1][0]),
"model2": get_human_readable_name(biggest_rivalry[1][1]),
"total_battles": biggest_rivalry[0]
},
"david_vs_goliath": {
"david": get_human_readable_name(david_vs_goliath[1][0]),
"goliath": get_human_readable_name(david_vs_goliath[1][1]),
"size_difference": f"{david_vs_goliath[0][0]:.1f}B",
"wins": david_vs_goliath[0][1]
},
"comeback_king": {
"model": get_human_readable_name(comeback_king[1]),
"comeback_margin": comeback_king[0]
},
"pyrrhic_victor": {
"model": get_human_readable_name(pyrrhic_victor[1]),
"win_rate": f"{pyrrhic_victor[0]:.2%}"
}
}
return stats
if __name__ == "__main__":
stats = get_internal_stats()