Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -30,8 +30,8 @@ def add_new_eval(
|
|
30 |
family_prediction_dataset,
|
31 |
):
|
32 |
representation_name = model_name_textbox if revision_name_textbox == '' else revision_name_textbox
|
33 |
-
return None
|
34 |
results = run_probe(benchmark_type, representation_name, human_file, skempi_file, similarity_tasks, function_prediction_aspect, function_prediction_dataset, family_prediction_dataset)
|
|
|
35 |
|
36 |
for benchmark_type in results:
|
37 |
if benchmark_type == 'similarity':
|
@@ -97,6 +97,9 @@ with block:
|
|
97 |
outputs=data_component
|
98 |
)
|
99 |
|
|
|
|
|
|
|
100 |
# Dynamic selectors
|
101 |
x_metric_selector = gr.Dropdown(choices=[], label="Select X-axis Metric", visible=False)
|
102 |
y_metric_selector = gr.Dropdown(choices=[], label="Select Y-axis Metric", visible=False)
|
@@ -112,50 +115,6 @@ with block:
|
|
112 |
plot_button = gr.Button("Plot")
|
113 |
plot_output = gr.Image(label="Plot")
|
114 |
|
115 |
-
# Update metric selectors based on benchmark type
|
116 |
-
def update_metric_choices(benchmark_type):
|
117 |
-
if benchmark_type == 'similarity':
|
118 |
-
# Show x and y metric selectors for similarity
|
119 |
-
metric_names = benchmark_specific_metrics.get(benchmark_type, [])
|
120 |
-
return (
|
121 |
-
gr.update(choices=metric_names, value=metric_names[0], visible=True),
|
122 |
-
gr.update(choices=metric_names, value=metric_names[1], visible=True),
|
123 |
-
gr.update(visible=False), gr.update(visible=False),
|
124 |
-
gr.update(visible=False), gr.update(visible=False)
|
125 |
-
)
|
126 |
-
elif benchmark_type == 'function':
|
127 |
-
# Show aspect and dataset type selectors for function
|
128 |
-
aspect_types = benchmark_specific_metrics[benchmark_type]['aspect_types']
|
129 |
-
dataset_types = benchmark_specific_metrics[benchmark_type]['dataset_types']
|
130 |
-
return (
|
131 |
-
gr.update(visible=False), gr.update(visible=False),
|
132 |
-
gr.update(choices=aspect_types, value=aspect_types[0], visible=True),
|
133 |
-
gr.update(choices=dataset_types, value=dataset_types[0], visible=True),
|
134 |
-
gr.update(visible=False), gr.update(visible=False)
|
135 |
-
)
|
136 |
-
elif benchmark_type == 'family':
|
137 |
-
# Show dataset and metric selectors for family
|
138 |
-
datasets = benchmark_specific_metrics[benchmark_type]['datasets']
|
139 |
-
metrics = benchmark_specific_metrics[benchmark_type]['metrics']
|
140 |
-
return (
|
141 |
-
gr.update(visible=False), gr.update(visible=False),
|
142 |
-
gr.update(visible=False), gr.update(visible=False),
|
143 |
-
gr.update(choices=datasets, value=datasets[0], visible=True),
|
144 |
-
gr.update(choices=metrics, value=metrics[0], visible=True)
|
145 |
-
)
|
146 |
-
elif benchmark_type == 'affinity':
|
147 |
-
# Show single metric selector for affinity
|
148 |
-
metrics = benchmark_specific_metrics[benchmark_type]
|
149 |
-
return (
|
150 |
-
gr.update(visible=False), gr.update(visible=False),
|
151 |
-
gr.update(visible=False), gr.update(visible=False),
|
152 |
-
gr.update(visible=False), gr.update(choices=metrics, value=metrics[0], visible=True)
|
153 |
-
)
|
154 |
-
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
|
155 |
-
|
156 |
-
# Dropdown for benchmark type
|
157 |
-
benchmark_type_selector = gr.Dropdown(choices=list(benchmark_specific_metrics.keys()), label="Select Benchmark Type")
|
158 |
-
|
159 |
# Update selectors when benchmark type changes
|
160 |
benchmark_type_selector.change(
|
161 |
update_metric_choices,
|
|
|
30 |
family_prediction_dataset,
|
31 |
):
|
32 |
representation_name = model_name_textbox if revision_name_textbox == '' else revision_name_textbox
|
|
|
33 |
results = run_probe(benchmark_type, representation_name, human_file, skempi_file, similarity_tasks, function_prediction_aspect, function_prediction_dataset, family_prediction_dataset)
|
34 |
+
return results
|
35 |
|
36 |
for benchmark_type in results:
|
37 |
if benchmark_type == 'similarity':
|
|
|
97 |
outputs=data_component
|
98 |
)
|
99 |
|
100 |
+
# Dropdown for benchmark type
|
101 |
+
benchmark_type_selector = gr.Dropdown(choices=list(benchmark_specific_metrics.keys()), label="Select Benchmark Type")
|
102 |
+
|
103 |
# Dynamic selectors
|
104 |
x_metric_selector = gr.Dropdown(choices=[], label="Select X-axis Metric", visible=False)
|
105 |
y_metric_selector = gr.Dropdown(choices=[], label="Select Y-axis Metric", visible=False)
|
|
|
115 |
plot_button = gr.Button("Plot")
|
116 |
plot_output = gr.Image(label="Plot")
|
117 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
# Update selectors when benchmark type changes
|
119 |
benchmark_type_selector.change(
|
120 |
update_metric_choices,
|