Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,7 @@ import os
|
|
2 |
import shutil
|
3 |
import zipfile
|
4 |
import torch
|
|
|
5 |
import pandas as pd
|
6 |
from pathlib import Path
|
7 |
import gradio as gr
|
@@ -27,173 +28,115 @@ TEMP_DIR = "./temp_audio"
|
|
27 |
os.makedirs(TEMP_DIR, exist_ok=True)
|
28 |
|
29 |
def init_metadata_state():
|
30 |
-
|
31 |
-
return pd.DataFrame(columns=["Texte", "Début (s)", "Fin (s)", "ID"])
|
32 |
|
33 |
# -------------------------------------------------
|
34 |
# 2. Transcription de l'audio avec Whisper
|
35 |
# -------------------------------------------------
|
36 |
def transcribe_audio(audio_path):
|
37 |
-
"""Effectue la transcription de l'audio et génère les timestamps."""
|
38 |
if not audio_path:
|
39 |
print("[LOG] Aucun fichier audio fourni.")
|
40 |
-
return "Aucun fichier audio fourni", None, ""
|
41 |
-
|
42 |
print(f"[LOG] Début de la transcription de {audio_path}...")
|
43 |
result = pipe(audio_path, return_timestamps="word")
|
44 |
words = result.get("chunks", [])
|
45 |
-
|
46 |
if not words:
|
47 |
-
print("[LOG ERROR] Aucun timestamp détecté.")
|
48 |
-
return "Erreur : Aucun timestamp détecté.", None, ""
|
49 |
-
|
50 |
raw_transcription = " ".join([w["text"] for w in words])
|
51 |
word_timestamps = [(w["text"], w["timestamp"][0]) for w in words]
|
52 |
-
|
53 |
-
|
54 |
-
[f"{w[0]}[{w[1]:.2f}]" for w in word_timestamps]
|
55 |
-
)
|
56 |
-
|
57 |
print(f"[LOG] Transcription brute : {raw_transcription}")
|
58 |
-
|
59 |
-
|
60 |
-
return raw_transcription, word_timestamps, transcription_with_timestamps
|
61 |
|
62 |
# -------------------------------------------------
|
63 |
-
# 3.
|
64 |
# -------------------------------------------------
|
65 |
-
def
|
66 |
-
"""Ajoute dynamiquement des lignes au tableau en suivant le format structuré."""
|
67 |
-
if new_rows is None:
|
68 |
-
new_rows = []
|
69 |
-
|
70 |
-
formatted_rows = []
|
71 |
-
|
72 |
-
# Gestion flexible des entrées (dictionnaires, listes, tuples, DataFrame)
|
73 |
-
if isinstance(new_rows, list):
|
74 |
-
for row in new_rows:
|
75 |
-
if isinstance(row, dict):
|
76 |
-
texte = row.get("Texte", "")
|
77 |
-
debut = row.get("Début (s)", None)
|
78 |
-
fin = row.get("Fin (s)", None)
|
79 |
-
elif isinstance(row, (list, tuple)) and len(row) >= 3:
|
80 |
-
texte, debut, fin = row[:3]
|
81 |
-
else:
|
82 |
-
continue
|
83 |
-
formatted_rows.append([texte, debut, fin, ""])
|
84 |
-
|
85 |
-
elif isinstance(new_rows, pd.DataFrame):
|
86 |
-
for _, row in new_rows.iterrows():
|
87 |
-
formatted_rows.append([row.get("Texte", ""), row.get("Début (s)", None), row.get("Fin (s)", None), ""])
|
88 |
-
|
89 |
-
# Conversion en DataFrame et fusion avec l'état actuel
|
90 |
-
if formatted_rows:
|
91 |
-
new_data = pd.DataFrame(formatted_rows, columns=["Texte", "Début (s)", "Fin (s)", "ID"])
|
92 |
-
metadata_state = pd.concat([metadata_state, new_data], ignore_index=True)
|
93 |
-
|
94 |
-
print(f"[LOG] {len(new_rows)} nouvelles lignes ajoutées.")
|
95 |
-
|
96 |
-
return metadata_state
|
97 |
-
|
98 |
-
def save_segments(metadata_table):
|
99 |
-
"""Sauvegarde les modifications apportées par l'utilisateur."""
|
100 |
-
metadata_state = pd.DataFrame(metadata_table, columns=["Texte", "Début (s)", "Fin (s)", "ID"])
|
101 |
-
|
102 |
print("[LOG] Enregistrement des segments définis par l'utilisateur...")
|
103 |
-
|
|
|
|
|
|
|
|
|
|
|
104 |
try:
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
except ValueError as e:
|
110 |
print(f"[LOG ERROR] Erreur de conversion des timestamps : {e}")
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
# -------------------------------------------------
|
115 |
-
# 4. Validation et découpage des extraits audio
|
116 |
-
# -------------------------------------------------
|
117 |
-
def validate_segments(audio_path, metadata_state):
|
118 |
-
"""Découpe les extraits audio en fonction des segments définis."""
|
119 |
-
print("[LOG] Début de la validation des segments...")
|
120 |
-
|
121 |
-
if not audio_path or metadata_state.empty:
|
122 |
-
print("[LOG ERROR] Aucun segment valide trouvé !")
|
123 |
-
return metadata_state
|
124 |
-
|
125 |
-
if os.path.exists(TEMP_DIR):
|
126 |
-
shutil.rmtree(TEMP_DIR)
|
127 |
-
os.makedirs(TEMP_DIR, exist_ok=True)
|
128 |
-
|
129 |
-
original_audio = AudioSegment.from_file(audio_path)
|
130 |
-
|
131 |
-
for index, row in metadata_state.iterrows():
|
132 |
-
if row["Début (s)"] is None or row["Fin (s)"] is None:
|
133 |
-
print(f"[LOG ERROR] Timestamp manquant pour : {row['Texte']}")
|
134 |
-
continue
|
135 |
-
|
136 |
-
start_ms = int(float(row["Début (s)"]) * 1000)
|
137 |
-
end_ms = int(float(row["Fin (s)"]) * 1000)
|
138 |
-
|
139 |
-
if start_ms < 0 or end_ms <= start_ms:
|
140 |
-
print(f"[LOG ERROR] Problème de découpage : {row['Texte']} | {row['Début (s)']}s - {row['Fin (s)']}s")
|
141 |
-
continue
|
142 |
-
|
143 |
-
segment_filename = f"{Path(audio_path).stem}_{row['ID']}.wav"
|
144 |
-
segment_path = os.path.join(TEMP_DIR, segment_filename)
|
145 |
-
|
146 |
-
extract = original_audio[start_ms:end_ms]
|
147 |
-
extract.export(segment_path, format="wav")
|
148 |
-
|
149 |
-
metadata_state.at[index, "audio_file"] = segment_filename
|
150 |
-
print(f"[LOG] Extrait généré : {segment_filename}")
|
151 |
-
|
152 |
-
return metadata_state
|
153 |
|
154 |
# -------------------------------------------------
|
155 |
-
#
|
156 |
# -------------------------------------------------
|
157 |
-
def generate_zip(metadata_state):
|
158 |
-
|
159 |
-
|
|
|
|
|
160 |
print("[LOG ERROR] Aucun segment valide trouvé pour la génération du ZIP.")
|
161 |
return None
|
162 |
-
|
163 |
zip_path = os.path.join(TEMP_DIR, "dataset.zip")
|
164 |
if os.path.exists(zip_path):
|
165 |
os.remove(zip_path)
|
166 |
-
|
167 |
metadata_csv_path = os.path.join(TEMP_DIR, "metadata.csv")
|
168 |
metadata_state.to_csv(metadata_csv_path, sep="|", index=False)
|
169 |
-
|
170 |
with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zf:
|
171 |
zf.write(metadata_csv_path, "metadata.csv")
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
|
|
|
|
|
|
|
|
|
|
177 |
print("[LOG] Fichier ZIP généré avec succès.")
|
178 |
return zip_path
|
179 |
|
180 |
# -------------------------------------------------
|
181 |
-
#
|
182 |
# -------------------------------------------------
|
183 |
with gr.Blocks() as demo:
|
184 |
gr.Markdown("# Application de Découpe Audio")
|
185 |
-
|
186 |
metadata_state = gr.State(init_metadata_state())
|
187 |
-
|
188 |
audio_input = gr.Audio(type="filepath", label="Fichier audio")
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
save_button = gr.Button("Enregistrer")
|
193 |
-
validate_button = gr.Button("Valider")
|
194 |
generate_button = gr.Button("Générer ZIP")
|
|
|
|
|
|
|
|
|
|
|
|
|
195 |
|
196 |
-
|
197 |
-
save_button.click(save_segments, inputs=table, outputs=metadata_state)
|
198 |
-
|
199 |
-
demo.queue().launch()
|
|
|
2 |
import shutil
|
3 |
import zipfile
|
4 |
import torch
|
5 |
+
import numpy as np
|
6 |
import pandas as pd
|
7 |
from pathlib import Path
|
8 |
import gradio as gr
|
|
|
28 |
os.makedirs(TEMP_DIR, exist_ok=True)
|
29 |
|
30 |
def init_metadata_state():
|
31 |
+
return []
|
|
|
32 |
|
33 |
# -------------------------------------------------
|
34 |
# 2. Transcription de l'audio avec Whisper
|
35 |
# -------------------------------------------------
|
36 |
def transcribe_audio(audio_path):
|
|
|
37 |
if not audio_path:
|
38 |
print("[LOG] Aucun fichier audio fourni.")
|
39 |
+
return "Aucun fichier audio fourni", None, [], ""
|
40 |
+
|
41 |
print(f"[LOG] Début de la transcription de {audio_path}...")
|
42 |
result = pipe(audio_path, return_timestamps="word")
|
43 |
words = result.get("chunks", [])
|
44 |
+
|
45 |
if not words:
|
46 |
+
print("[LOG ERROR] Erreur : Aucun timestamp détecté.")
|
47 |
+
return "Erreur : Aucun timestamp détecté.", None, [], ""
|
48 |
+
|
49 |
raw_transcription = " ".join([w["text"] for w in words])
|
50 |
word_timestamps = [(w["text"], w["timestamp"][0]) for w in words]
|
51 |
+
transcription_with_timestamps = " ".join([f"{w[0]}[{w[1]:.2f}]" for w in word_timestamps])
|
52 |
+
|
|
|
|
|
|
|
53 |
print(f"[LOG] Transcription brute : {raw_transcription}")
|
54 |
+
return raw_transcription, word_timestamps, transcription_with_timestamps, audio_path
|
|
|
|
|
55 |
|
56 |
# -------------------------------------------------
|
57 |
+
# 3. Enregistrement des segments définis par l'utilisateur
|
58 |
# -------------------------------------------------
|
59 |
+
def save_segments(table_data):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
print("[LOG] Enregistrement des segments définis par l'utilisateur...")
|
61 |
+
formatted_data = []
|
62 |
+
|
63 |
+
for i, row in table_data.iterrows():
|
64 |
+
text, start_time, end_time = row["Texte"], row["Début (s)"], row["Fin (s)"]
|
65 |
+
segment_id = f"seg_{i+1:02d}"
|
66 |
+
|
67 |
try:
|
68 |
+
start_time = str(start_time).replace(",", ".")
|
69 |
+
end_time = str(end_time).replace(",", ".")
|
70 |
+
|
71 |
+
if not start_time.replace(".", "").isdigit() or not end_time.replace(".", "").isdigit():
|
72 |
+
raise ValueError("Valeurs de timestamps invalides")
|
73 |
+
|
74 |
+
start_time = float(start_time)
|
75 |
+
end_time = float(end_time)
|
76 |
+
|
77 |
+
if start_time < 0 or end_time <= start_time:
|
78 |
+
raise ValueError("Valeurs incohérentes")
|
79 |
+
|
80 |
+
formatted_data.append([text, start_time, end_time, segment_id])
|
81 |
+
print(f"[LOG] Segment ajouté : {text} | Début: {start_time:.2f}s, Fin: {end_time:.2f}s, ID: {segment_id}")
|
82 |
+
|
83 |
except ValueError as e:
|
84 |
print(f"[LOG ERROR] Erreur de conversion des timestamps : {e}")
|
85 |
+
return pd.DataFrame(), "Erreur : Vérifiez que les valeurs sont bien des nombres valides."
|
86 |
+
|
87 |
+
return pd.DataFrame(formatted_data, columns=["Texte", "Début (s)", "Fin (s)", "ID"]), ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
|
89 |
# -------------------------------------------------
|
90 |
+
# 4. Génération du fichier ZIP
|
91 |
# -------------------------------------------------
|
92 |
+
def generate_zip(metadata_state, audio_path):
|
93 |
+
if isinstance(metadata_state, tuple):
|
94 |
+
metadata_state = metadata_state[0] # Extraire le DataFrame si c'est un tuple
|
95 |
+
|
96 |
+
if metadata_state is None or metadata_state.empty:
|
97 |
print("[LOG ERROR] Aucun segment valide trouvé pour la génération du ZIP.")
|
98 |
return None
|
99 |
+
|
100 |
zip_path = os.path.join(TEMP_DIR, "dataset.zip")
|
101 |
if os.path.exists(zip_path):
|
102 |
os.remove(zip_path)
|
103 |
+
|
104 |
metadata_csv_path = os.path.join(TEMP_DIR, "metadata.csv")
|
105 |
metadata_state.to_csv(metadata_csv_path, sep="|", index=False)
|
106 |
+
|
107 |
with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zf:
|
108 |
zf.write(metadata_csv_path, "metadata.csv")
|
109 |
+
original_audio = AudioSegment.from_file(audio_path)
|
110 |
+
|
111 |
+
for _, row in metadata_state.iterrows():
|
112 |
+
start_ms, end_ms = int(row["Début (s)"] * 1000), int(row["Fin (s)"] * 1000)
|
113 |
+
segment_audio = original_audio[start_ms:end_ms]
|
114 |
+
segment_filename = f"{Path(audio_path).stem}_{row['ID']}.wav"
|
115 |
+
segment_path = os.path.join(TEMP_DIR, segment_filename)
|
116 |
+
segment_audio.export(segment_path, format="wav")
|
117 |
+
zf.write(segment_path, segment_filename)
|
118 |
+
|
119 |
print("[LOG] Fichier ZIP généré avec succès.")
|
120 |
return zip_path
|
121 |
|
122 |
# -------------------------------------------------
|
123 |
+
# 5. Interface utilisateur Gradio
|
124 |
# -------------------------------------------------
|
125 |
with gr.Blocks() as demo:
|
126 |
gr.Markdown("# Application de Découpe Audio")
|
|
|
127 |
metadata_state = gr.State(init_metadata_state())
|
128 |
+
|
129 |
audio_input = gr.Audio(type="filepath", label="Fichier audio")
|
130 |
+
raw_transcription = gr.Textbox(label="Transcription", interactive=False)
|
131 |
+
transcription_timestamps = gr.Textbox(label="Transcription avec Timestamps", interactive=False)
|
132 |
+
table = gr.Dataframe(headers=["Texte", "Début (s)", "Fin (s)"], datatype=["str", "str", "str"], row_count=(1, "dynamic"))
|
133 |
+
save_button = gr.Button("Enregistrer les segments")
|
|
|
134 |
generate_button = gr.Button("Générer ZIP")
|
135 |
+
zip_file = gr.File(label="Télécharger le ZIP")
|
136 |
+
word_timestamps = gr.State()
|
137 |
+
|
138 |
+
audio_input.change(transcribe_audio, inputs=audio_input, outputs=[raw_transcription, word_timestamps, transcription_timestamps, audio_input])
|
139 |
+
save_button.click(save_segments, inputs=table, outputs=[metadata_state])
|
140 |
+
generate_button.click(generate_zip, inputs=[metadata_state, audio_input], outputs=zip_file)
|
141 |
|
142 |
+
demo.queue().launch()
|
|
|
|
|
|