Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -18,6 +18,7 @@ import sys
|
|
18 |
from internal_stats import get_fun_stats
|
19 |
import threading
|
20 |
import time
|
|
|
21 |
|
22 |
|
23 |
# Initialize logging for errors only
|
@@ -53,13 +54,29 @@ def call_ollama_api(model, prompt):
|
|
53 |
return f"Error: Unable to get response from the model."
|
54 |
|
55 |
# Generate responses using two randomly selected models
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
def generate_responses(prompt):
|
57 |
available_models = get_available_models()
|
58 |
if len(available_models) < 2:
|
59 |
return "Error: Not enough models available", "Error: Not enough models available", None, None
|
60 |
|
61 |
-
|
62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
model_a_response = call_ollama_api(model_a, prompt)
|
65 |
model_b_response = call_ollama_api(model_b, prompt)
|
@@ -213,7 +230,6 @@ def get_leaderboard_chart():
|
|
213 |
)
|
214 |
|
215 |
chart_data = fig.to_json()
|
216 |
-
print(f"Chart size: {sys.getsizeof(chart_data)} bytes")
|
217 |
return fig
|
218 |
|
219 |
def new_battle():
|
|
|
18 |
from internal_stats import get_fun_stats
|
19 |
import threading
|
20 |
import time
|
21 |
+
from collections import Counter
|
22 |
|
23 |
|
24 |
# Initialize logging for errors only
|
|
|
54 |
return f"Error: Unable to get response from the model."
|
55 |
|
56 |
# Generate responses using two randomly selected models
|
57 |
+
def get_battle_counts():
|
58 |
+
leaderboard = get_current_leaderboard()
|
59 |
+
battle_counts = Counter()
|
60 |
+
for model, data in leaderboard.items():
|
61 |
+
battle_counts[model] = data['wins'] + data['losses']
|
62 |
+
return battle_counts
|
63 |
+
|
64 |
def generate_responses(prompt):
|
65 |
available_models = get_available_models()
|
66 |
if len(available_models) < 2:
|
67 |
return "Error: Not enough models available", "Error: Not enough models available", None, None
|
68 |
|
69 |
+
battle_counts = get_battle_counts()
|
70 |
+
|
71 |
+
# Sort models by battle count (ascending)
|
72 |
+
sorted_models = sorted(available_models, key=lambda m: battle_counts.get(m, 0))
|
73 |
+
|
74 |
+
# Select the first model (least battles)
|
75 |
+
model_a = sorted_models[0]
|
76 |
+
|
77 |
+
# For the second model, use weighted random selection
|
78 |
+
weights = [1 / (battle_counts.get(m, 1) + 1) for m in sorted_models[1:]]
|
79 |
+
model_b = random.choices(sorted_models[1:], weights=weights, k=1)[0]
|
80 |
|
81 |
model_a_response = call_ollama_api(model_a, prompt)
|
82 |
model_b_response = call_ollama_api(model_b, prompt)
|
|
|
230 |
)
|
231 |
|
232 |
chart_data = fig.to_json()
|
|
|
233 |
return fig
|
234 |
|
235 |
def new_battle():
|