Commit
·
e208789
1
Parent(s):
e314182
update space app
Browse files
app.py
CHANGED
@@ -2,13 +2,18 @@ import gradio as gr
|
|
2 |
from pyannote_viewer import PyannoteViewer
|
3 |
from pyannote.audio import Pipeline
|
4 |
import os
|
|
|
5 |
|
6 |
-
|
7 |
-
def apply_pipeline(audio: str) -> tuple:
|
8 |
pipeline = Pipeline.from_pretrained(
|
9 |
-
|
10 |
)
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
|
14 |
with gr.Blocks() as demo:
|
@@ -22,20 +27,29 @@ with gr.Blocks() as demo:
|
|
22 |
)
|
23 |
# space title and description
|
24 |
with gr.Column(scale=10):
|
25 |
-
gr.Markdown('#
|
26 |
|
27 |
gr.Markdown(
|
28 |
-
"
|
29 |
-
"\
|
30 |
-
"\n - Click on the apply pipeline button"
|
31 |
-
"\n - After pipeline processed the audio, you can then listen for each speaker separetely. Annotations on waveforms correspond to the speaker diarization produced by the pipeline, with one color per speaker."
|
32 |
)
|
|
|
|
|
|
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
audio = gr.Audio(type="filepath")
|
35 |
-
|
|
|
|
|
36 |
source_viewer = PyannoteViewer(interactive=False)
|
37 |
|
38 |
-
btn.click(fn=apply_pipeline, inputs=[audio], outputs=[source_viewer])
|
39 |
|
40 |
|
41 |
if __name__ == "__main__":
|
|
|
2 |
from pyannote_viewer import PyannoteViewer
|
3 |
from pyannote.audio import Pipeline
|
4 |
import os
|
5 |
+
from huggingface_hub import HfApi
|
6 |
|
7 |
+
def apply_pipeline(audio: str, pipeline_name: str) -> tuple:
|
|
|
8 |
pipeline = Pipeline.from_pretrained(
|
9 |
+
pipeline_name, use_auth_token=os.environ["HF_TOKEN"]
|
10 |
)
|
11 |
+
|
12 |
+
outputs = pipeline(audio)
|
13 |
+
if isinstance(outputs, tuple):
|
14 |
+
return outputs
|
15 |
+
else:
|
16 |
+
return (outputs, audio)
|
17 |
|
18 |
|
19 |
with gr.Blocks() as demo:
|
|
|
27 |
)
|
28 |
# space title and description
|
29 |
with gr.Column(scale=10):
|
30 |
+
gr.Markdown('# pyannote pretrained pipelines')
|
31 |
|
32 |
gr.Markdown(
|
33 |
+
"You like [pyannote.audio](https://github.com/pyannote/pyannote-audio)? Consider using [pyannoteAI](https://pyannote.ai/) for better and faster options.\n"
|
34 |
+
"\nGo [here](https://huggingface.co/pyannote) for more detail on each pipeline available in this space."
|
|
|
|
|
35 |
)
|
36 |
+
|
37 |
+
gr.Markdown()
|
38 |
+
|
39 |
|
40 |
+
gr.Markdown("#### Select a pretrained pipeline:")
|
41 |
+
available_pipelines = [p.modelId for p in HfApi().list_models(filter="pyannote-audio-pipeline")]
|
42 |
+
available_pipelines = list(filter(lambda p: p.startswith("pyannote/"), available_pipelines))
|
43 |
+
dropdown = gr.Dropdown(choices=available_pipelines, value=available_pipelines[0], interactive=True, label="Pretrained pipeline")
|
44 |
+
|
45 |
+
gr.Markdown("#### Upload or record an audio:")
|
46 |
audio = gr.Audio(type="filepath")
|
47 |
+
|
48 |
+
btn = gr.Button("Apply pipeline")
|
49 |
+
|
50 |
source_viewer = PyannoteViewer(interactive=False)
|
51 |
|
52 |
+
btn.click(fn=apply_pipeline, inputs=[audio, dropdown], outputs=[source_viewer])
|
53 |
|
54 |
|
55 |
if __name__ == "__main__":
|