Spaces:
Sleeping
Sleeping
Add baseline
Browse files- app.py +34 -3
- evaluate.py +8 -4
- results.json +0 -0
app.py
CHANGED
@@ -9,15 +9,46 @@ with open('results.json', 'r') as file:
|
|
9 |
models = [key for key in results.keys()]
|
10 |
demo = gr.Blocks()
|
11 |
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
df.columns = ["Step", "Loss"]
|
14 |
df["Step"] = pd.to_numeric(df["Step"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
def return_results(model_name):
|
17 |
print(model_name)
|
18 |
-
df = pd.DataFrame.from_dict(results[model_name], orient = "index").reset_index()
|
19 |
df.columns = ["Step", "Loss"]
|
20 |
df["Step"] = pd.to_numeric(df["Step"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
return df
|
22 |
|
23 |
with demo:
|
@@ -27,7 +58,7 @@ with demo:
|
|
27 |
dropdown_1 = gr.Dropdown(choices = models, value = models[0])
|
28 |
button_1 = gr.Button("Submit")
|
29 |
with gr.Row():
|
30 |
-
chart = gr.LinePlot(df, "Step", "Loss")
|
31 |
|
32 |
button_1.click(return_results, dropdown_1, chart)
|
33 |
|
|
|
9 |
models = [key for key in results.keys()]
|
10 |
demo = gr.Blocks()
|
11 |
|
12 |
+
|
13 |
+
from random import randint, random
|
14 |
+
|
15 |
+
food_rating_data = pd.DataFrame(
|
16 |
+
{
|
17 |
+
"cuisine": [["Italian", "Mexican", "Chinese"][i % 3] for i in range(100)],
|
18 |
+
"rating": [random() * 4 + 0.5 * (i % 3) for i in range(100)],
|
19 |
+
"price": [randint(10, 50) + 4 * (i % 3) for i in range(100)],
|
20 |
+
"wait": [random() for i in range(100)],
|
21 |
+
}
|
22 |
+
)
|
23 |
+
|
24 |
+
df = pd.DataFrame.from_dict(results[models[0]]["main-net"], orient = "index").reset_index()
|
25 |
df.columns = ["Step", "Loss"]
|
26 |
df["Step"] = pd.to_numeric(df["Step"])
|
27 |
+
df["Test"] = "Main-net"
|
28 |
+
|
29 |
+
if "baseline" in results[models[0]]:
|
30 |
+
df_baseline = pd.DataFrame.from_dict(results[models[0]]["baseline"], orient = "index").reset_index()
|
31 |
+
df_baseline.columns = ["Step", "Loss"]
|
32 |
+
df_baseline["Step"] = pd.to_numeric(df_baseline["Step"])
|
33 |
+
df_baseline["Test"] = "Baseline"
|
34 |
+
|
35 |
+
df = pd.concat([df, df_baseline])
|
36 |
|
37 |
def return_results(model_name):
|
38 |
print(model_name)
|
39 |
+
df = pd.DataFrame.from_dict(results[model_name]["main-net"], orient = "index").reset_index()
|
40 |
df.columns = ["Step", "Loss"]
|
41 |
df["Step"] = pd.to_numeric(df["Step"])
|
42 |
+
df["Test"] = "Main-net"
|
43 |
+
|
44 |
+
if "baseline" in results[model_name]:
|
45 |
+
df_baseline = pd.DataFrame.from_dict(results[model_name]["baseline"], orient = "index").reset_index()
|
46 |
+
df_baseline.columns = ["Step", "Loss"]
|
47 |
+
df_baseline["Step"] = pd.to_numeric(df_baseline["Step"])
|
48 |
+
df_baseline["Test"] = "Baseline"
|
49 |
+
|
50 |
+
df = pd.concat([df, df_baseline])
|
51 |
+
|
52 |
return df
|
53 |
|
54 |
with demo:
|
|
|
58 |
dropdown_1 = gr.Dropdown(choices = models, value = models[0])
|
59 |
button_1 = gr.Button("Submit")
|
60 |
with gr.Row():
|
61 |
+
chart = gr.LinePlot(df, "Step", "Loss", color="Test", x_lim = (0, 2000))
|
62 |
|
63 |
button_1.click(return_results, dropdown_1, chart)
|
64 |
|
evaluate.py
CHANGED
@@ -2,17 +2,21 @@ import json
|
|
2 |
import random
|
3 |
|
4 |
import torch
|
|
|
5 |
from distributed_training.data.dataset import DataLoader
|
6 |
from huggingface_hub import list_repo_refs
|
7 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
8 |
|
9 |
device = "cuda"
|
10 |
-
test_indices_length =
|
11 |
|
12 |
models = ["distributed/optimized-gpt2-250m", "distributed/optimized-gpt2-250m-v0.1.1", "distributed/gpt2-94m"]
|
13 |
|
14 |
-
|
15 |
-
results
|
|
|
|
|
|
|
16 |
|
17 |
for model_name in models:
|
18 |
|
@@ -24,7 +28,7 @@ for model_name in models:
|
|
24 |
refs = list_repo_refs(model_name, repo_type="model")
|
25 |
global_epoch = max([int(tag.name) for tag in refs.tags]) if refs.tags else None
|
26 |
|
27 |
-
for epoch in range(0,
|
28 |
|
29 |
if str(epoch) in results[model_name].keys():
|
30 |
continue
|
|
|
2 |
import random
|
3 |
|
4 |
import torch
|
5 |
+
import os
|
6 |
from distributed_training.data.dataset import DataLoader
|
7 |
from huggingface_hub import list_repo_refs
|
8 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
9 |
|
10 |
device = "cuda"
|
11 |
+
test_indices_length = 1000
|
12 |
|
13 |
models = ["distributed/optimized-gpt2-250m", "distributed/optimized-gpt2-250m-v0.1.1", "distributed/gpt2-94m"]
|
14 |
|
15 |
+
if os.path.exists("results.json"):
|
16 |
+
with open('results.json', 'r') as file:
|
17 |
+
results = json.load(file)
|
18 |
+
else:
|
19 |
+
results = {}
|
20 |
|
21 |
for model_name in models:
|
22 |
|
|
|
28 |
refs = list_repo_refs(model_name, repo_type="model")
|
29 |
global_epoch = max([int(tag.name) for tag in refs.tags]) if refs.tags else None
|
30 |
|
31 |
+
for epoch in range(0,global_epoch, 5):
|
32 |
|
33 |
if str(epoch) in results[model_name].keys():
|
34 |
continue
|
results.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|