Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -2,7 +2,10 @@ import gradio as gr
|
|
2 |
from datasets import load_dataset
|
3 |
|
4 |
# --- Configuration ---
|
5 |
-
|
|
|
|
|
|
|
6 |
SUBSETS = ["speech_clean", "speech_noisy", "speechless_clean", "speechless_noisy"]
|
7 |
SPLITS = ["train", "validation", "test"]
|
8 |
TEXT_COLUMN = "raw_text"
|
@@ -81,10 +84,9 @@ def get_audio_row(dataset, index):
|
|
81 |
|
82 |
# --- Build the Gradio Interface ---
|
83 |
with gr.Blocks(css="footer {display: none !important}") as demo:
|
84 |
-
gr.Markdown("# Vibravox Multi-Sensor Explorer")
|
85 |
|
86 |
# This state object holds the currently loaded dataset in memory
|
87 |
-
# It's invisible to the user but accessible by our functions
|
88 |
loaded_dataset_state = gr.State(None)
|
89 |
|
90 |
# --- INPUT CONTROLS ---
|
@@ -106,20 +108,15 @@ with gr.Blocks(css="footer {display: none !important}") as demo:
|
|
106 |
audio5 = gr.Audio(label="Forehead Accelerometer")
|
107 |
audio6 = gr.Audio(label="Temple Vibration Pickup")
|
108 |
|
109 |
-
# A list of all the output components for easier reference
|
110 |
all_outputs = [loaded_dataset_state, slider, sentence_output, audio1, audio2, audio3, audio4, audio5, audio6, error_box]
|
111 |
audio_outputs = [sentence_output, audio1, audio2, audio3, audio4, audio5, audio6]
|
112 |
|
113 |
# --- WIRING THE EVENT HANDLERS ---
|
114 |
-
|
115 |
-
# 1. When the app first loads, run the main function with default values
|
116 |
demo.load(
|
117 |
fn=load_and_update_all,
|
118 |
inputs=[subset_dropdown, split_dropdown],
|
119 |
outputs=all_outputs
|
120 |
)
|
121 |
-
|
122 |
-
# 2. When a dropdown value changes, re-run the main function
|
123 |
subset_dropdown.change(
|
124 |
fn=load_and_update_all,
|
125 |
inputs=[subset_dropdown, split_dropdown],
|
@@ -130,8 +127,6 @@ with gr.Blocks(css="footer {display: none !important}") as demo:
|
|
130 |
inputs=[subset_dropdown, split_dropdown],
|
131 |
outputs=all_outputs
|
132 |
)
|
133 |
-
|
134 |
-
# 3. When ONLY the slider changes, run the simpler function
|
135 |
slider.change(
|
136 |
fn=get_audio_row,
|
137 |
inputs=[loaded_dataset_state, slider],
|
|
|
2 |
from datasets import load_dataset
|
3 |
|
4 |
# --- Configuration ---
|
5 |
+
# The ONLY change is on this line: we're pointing back to the smaller test dataset.
|
6 |
+
DATASET_NAME = "Cnam-LMSSC/vibravox-test"
|
7 |
+
# ---------------------------------------------------------------------------------
|
8 |
+
|
9 |
SUBSETS = ["speech_clean", "speech_noisy", "speechless_clean", "speechless_noisy"]
|
10 |
SPLITS = ["train", "validation", "test"]
|
11 |
TEXT_COLUMN = "raw_text"
|
|
|
84 |
|
85 |
# --- Build the Gradio Interface ---
|
86 |
with gr.Blocks(css="footer {display: none !important}") as demo:
|
87 |
+
gr.Markdown("# Vibravox Multi-Sensor Explorer (Test Dataset)")
|
88 |
|
89 |
# This state object holds the currently loaded dataset in memory
|
|
|
90 |
loaded_dataset_state = gr.State(None)
|
91 |
|
92 |
# --- INPUT CONTROLS ---
|
|
|
108 |
audio5 = gr.Audio(label="Forehead Accelerometer")
|
109 |
audio6 = gr.Audio(label="Temple Vibration Pickup")
|
110 |
|
|
|
111 |
all_outputs = [loaded_dataset_state, slider, sentence_output, audio1, audio2, audio3, audio4, audio5, audio6, error_box]
|
112 |
audio_outputs = [sentence_output, audio1, audio2, audio3, audio4, audio5, audio6]
|
113 |
|
114 |
# --- WIRING THE EVENT HANDLERS ---
|
|
|
|
|
115 |
demo.load(
|
116 |
fn=load_and_update_all,
|
117 |
inputs=[subset_dropdown, split_dropdown],
|
118 |
outputs=all_outputs
|
119 |
)
|
|
|
|
|
120 |
subset_dropdown.change(
|
121 |
fn=load_and_update_all,
|
122 |
inputs=[subset_dropdown, split_dropdown],
|
|
|
127 |
inputs=[subset_dropdown, split_dropdown],
|
128 |
outputs=all_outputs
|
129 |
)
|
|
|
|
|
130 |
slider.change(
|
131 |
fn=get_audio_row,
|
132 |
inputs=[loaded_dataset_state, slider],
|