mgyigit commited on
Commit
d167cc2
·
verified ·
1 Parent(s): 761c866

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -18
app.py CHANGED
@@ -63,14 +63,12 @@ with block:
63
  metric_names.remove('method_name') # Remove method_name from the metric options
64
 
65
  # Leaderboard section with method and metric selectors
66
- with gr.Row():
67
- # Add method and metric selectors for leaderboard
68
- leaderboard_method_selector = gr.CheckboxGroup(
69
- choices=method_names, label="Select method_names for Leaderboard", value=method_names, interactive=True
70
- )
71
- leaderboard_metric_selector = gr.CheckboxGroup(
72
- choices=metric_names, label="Select Metrics for Leaderboard", value=metric_names, interactive=True
73
- )
74
 
75
  # Display the filtered leaderboard
76
  baseline_value = get_baseline_df(method_names, metric_names)
@@ -103,20 +101,19 @@ with block:
103
  # Dropdown for benchmark type
104
  benchmark_type_selector = gr.Dropdown(choices=list(benchmark_specific_metrics.keys()), label="Select Benchmark Type")
105
 
106
- with gr.Row():
107
- # Dynamic selectors
108
- x_metric_selector = gr.Dropdown(choices=[], label="Select X-axis Metric", visible=False)
109
- y_metric_selector = gr.Dropdown(choices=[], label="Select Y-axis Metric", visible=False)
110
- aspect_type_selector = gr.Dropdown(choices=[], label="Select Aspect Type", visible=False)
111
- dataset_type_selector = gr.Dropdown(choices=[], label="Select Dataset Type", visible=False)
112
- dataset_selector = gr.Dropdown(choices=[], label="Select Dataset", visible=False)
113
- single_metric_selector = gr.Dropdown(choices=[], label="Select Metric", visible=False)
114
 
115
  method_selector = gr.CheckboxGroup(choices=method_names, label="Select methods to visualize", interactive=True, value=method_names)
116
 
117
  # Button to draw the plot for the selected benchmark
118
- with gr.Row():
119
- plot_button = gr.Button("Plot")
120
 
121
  plot_output = gr.Image(label="Plot")
122
 
 
63
  metric_names.remove('method_name') # Remove method_name from the metric options
64
 
65
  # Leaderboard section with method and metric selectors
66
+ leaderboard_method_selector = gr.CheckboxGroup(
67
+ choices=method_names, label="Select method_names for Leaderboard", value=method_names, interactive=True
68
+ )
69
+ leaderboard_metric_selector = gr.CheckboxGroup(
70
+ choices=metric_names, label="Select Metrics for Leaderboard", value=metric_names, interactive=True
71
+ )
 
 
72
 
73
  # Display the filtered leaderboard
74
  baseline_value = get_baseline_df(method_names, metric_names)
 
101
  # Dropdown for benchmark type
102
  benchmark_type_selector = gr.Dropdown(choices=list(benchmark_specific_metrics.keys()), label="Select Benchmark Type")
103
 
104
+ # Dynamic selectors
105
+ x_metric_selector = gr.Dropdown(choices=[], label="Select X-axis Metric", visible=False)
106
+ y_metric_selector = gr.Dropdown(choices=[], label="Select Y-axis Metric", visible=False)
107
+ aspect_type_selector = gr.Dropdown(choices=[], label="Select Aspect Type", visible=False)
108
+ dataset_type_selector = gr.Dropdown(choices=[], label="Select Dataset Type", visible=False)
109
+ dataset_selector = gr.Dropdown(choices=[], label="Select Dataset", visible=False)
110
+ single_metric_selector = gr.Dropdown(choices=[], label="Select Metric", visible=False)
 
111
 
112
  method_selector = gr.CheckboxGroup(choices=method_names, label="Select methods to visualize", interactive=True, value=method_names)
113
 
114
  # Button to draw the plot for the selected benchmark
115
+
116
+ plot_button = gr.Button("Plot")
117
 
118
  plot_output = gr.Image(label="Plot")
119