sad
Browse files
app.py
CHANGED
@@ -80,8 +80,8 @@ def transcribe(path, task):
|
|
80 |
segments[i]["speaker"] = '**SPEAKER ' + str(labels[i] + 1) + "**"
|
81 |
for (i, segment) in enumerate(segments):
|
82 |
if i == 0 or segments[i - 1]["speaker"] != segment["speaker"]:
|
83 |
-
output_text += "
|
84 |
-
output_text += segment["text"][1:] + '
|
85 |
return output_text
|
86 |
|
87 |
|
@@ -136,7 +136,7 @@ file_transcribe = gr.Interface(
|
|
136 |
gr.Audio(sources="upload", type="filepath", label="Audio file"),
|
137 |
gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
|
138 |
],
|
139 |
-
outputs=gr.Markdown(label="Sortie Markdown",height=
|
140 |
title="VerbaLens Demo 1 : Prototype",
|
141 |
description="Transcribe uploaded audio files using WhisperX.",
|
142 |
allow_flagging="never",
|
|
|
80 |
segments[i]["speaker"] = '**SPEAKER ' + str(labels[i] + 1) + "**"
|
81 |
for (i, segment) in enumerate(segments):
|
82 |
if i == 0 or segments[i - 1]["speaker"] != segment["speaker"]:
|
83 |
+
output_text += " "+segment["speaker"] + ' : '
|
84 |
+
output_text += segment["text"][1:] + ' <br> '
|
85 |
return output_text
|
86 |
|
87 |
|
|
|
136 |
gr.Audio(sources="upload", type="filepath", label="Audio file"),
|
137 |
gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
|
138 |
],
|
139 |
+
outputs=gr.Markdown(label="Sortie Markdown",height=500),
|
140 |
title="VerbaLens Demo 1 : Prototype",
|
141 |
description="Transcribe uploaded audio files using WhisperX.",
|
142 |
allow_flagging="never",
|