clement-pages commited on
Commit
e314182
·
1 Parent(s): 562db75

update component demo

Browse files
pyannote_viewer/README.md CHANGED
@@ -1,20 +1,20 @@
1
 
2
- # `gradio_sourceviewer`
3
  <img alt="Static Badge" src="https://img.shields.io/badge/version%20-%201.0.0%20-%20orange">
4
 
5
- Python library for easily interacting with trained machine learning models
6
 
7
  ## Installation
8
 
9
  ```bash
10
- pip install gradio_sourceviewer
11
  ```
12
 
13
  ## Usage
14
 
15
  ```python
16
  import gradio as gr
17
- from gradio_sourceviewer import SourceViewer
18
  from pyannote.audio import Pipeline
19
  import os
20
 
@@ -29,9 +29,9 @@ def apply_pipeline(audio: str) -> tuple:
29
  with gr.Blocks() as demo:
30
  audio = gr.Audio(type="filepath")
31
  btn = gr.Button("Apply separation pipeline")
32
- source_viewer = SourceViewer(interactive=False)
33
 
34
- btn.click(fn=apply_pipeline, inputs=[audio], outputs=[source_viewer])
35
 
36
 
37
  if __name__ == "__main__":
@@ -39,7 +39,7 @@ if __name__ == "__main__":
39
 
40
  ```
41
 
42
- ## `SourceViewer`
43
 
44
  ### Initialization
45
 
@@ -362,16 +362,16 @@ WaveformOptions | dict | None
362
 
363
  | name | description |
364
  |:-----|:------------|
365
- | `stream` | This listener is triggered when the user streams the SourceViewer. |
366
- | `change` | Triggered when the value of the SourceViewer changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. |
367
- | `clear` | This listener is triggered when the user clears the SourceViewer using the X button for the component. |
368
- | `play` | This listener is triggered when the user plays the media in the SourceViewer. |
369
- | `pause` | This listener is triggered when the media in the SourceViewer stops for any reason. |
370
- | `stop` | This listener is triggered when the user reaches the end of the media playing in the SourceViewer. |
371
- | `start_recording` | This listener is triggered when the user starts recording with the SourceViewer. |
372
- | `pause_recording` | This listener is triggered when the user pauses recording with the SourceViewer. |
373
- | `stop_recording` | This listener is triggered when the user stops recording with the SourceViewer. |
374
- | `upload` | This listener is triggered when the user uploads a file into the SourceViewer. |
375
 
376
 
377
 
@@ -391,7 +391,8 @@ The code snippet below is accurate in cases where the component is used as both
391
  def predict(
392
  value: str | tuple[int, numpy.ndarray] | None
393
  ) -> tuple[
394
- pyannote.core.annotation.Annotation, numpy.ndarray
 
395
  ]
396
  | None:
397
  return value
 
1
 
2
+ # `pyannote_viewer`
3
  <img alt="Static Badge" src="https://img.shields.io/badge/version%20-%201.0.0%20-%20orange">
4
 
5
+ Gradio custom component to visualize pyannote's pipelines outputs
6
 
7
  ## Installation
8
 
9
  ```bash
10
+ pip install pyannote-viewer
11
  ```
12
 
13
  ## Usage
14
 
15
  ```python
16
  import gradio as gr
17
+ from pyannote_viewer import PyannoteViewer
18
  from pyannote.audio import Pipeline
19
  import os
20
 
 
29
  with gr.Blocks() as demo:
30
  audio = gr.Audio(type="filepath")
31
  btn = gr.Button("Apply separation pipeline")
32
+ pyannote_viewer = PyannoteViewer(interactive=False)
33
 
34
+ btn.click(fn=apply_pipeline, inputs=[audio], outputs=[pyannote_viewer])
35
 
36
 
37
  if __name__ == "__main__":
 
39
 
40
  ```
41
 
42
+ ## `PyannoteViewer`
43
 
44
  ### Initialization
45
 
 
362
 
363
  | name | description |
364
  |:-----|:------------|
365
+ | `stream` | This listener is triggered when the user streams the PyannoteViewer. |
366
+ | `change` | Triggered when the value of the PyannoteViewer changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. |
367
+ | `clear` | This listener is triggered when the user clears the PyannoteViewer using the X button for the component. |
368
+ | `play` | This listener is triggered when the user plays the media in the PyannoteViewer. |
369
+ | `pause` | This listener is triggered when the media in the PyannoteViewer stops for any reason. |
370
+ | `stop` | This listener is triggered when the user reaches the end of the media playing in the PyannoteViewer. |
371
+ | `start_recording` | This listener is triggered when the user starts recording with the PyannoteViewer. |
372
+ | `pause_recording` | This listener is triggered when the user pauses recording with the PyannoteViewer. |
373
+ | `stop_recording` | This listener is triggered when the user stops recording with the PyannoteViewer. |
374
+ | `upload` | This listener is triggered when the user uploads a file into the PyannoteViewer. |
375
 
376
 
377
 
 
391
  def predict(
392
  value: str | tuple[int, numpy.ndarray] | None
393
  ) -> tuple[
394
+ pyannote.core.annotation.Annotation,
395
+ numpy.ndarray | pathlib.Path | str,
396
  ]
397
  | None:
398
  return value
pyannote_viewer/demo/app.py CHANGED
@@ -5,20 +5,10 @@ import os
5
 
6
 
7
  def apply_pipeline(audio: str) -> tuple:
8
- # pipeline = Pipeline.from_pretrained(
9
- # "pyannote/speech-separation-ami-1.0", use_auth_token=os.environ["HF_TOKEN"]
10
- # )
11
-
12
  pipeline = Pipeline.from_pretrained(
13
- "pyannote/speaker-diarization-3.1", use_auth_token=os.environ["HF_TOKEN"]
14
  )
15
-
16
-
17
- outputs = pipeline(audio)
18
- if isinstance(outputs, tuple):
19
- return outputs
20
- else:
21
- return (outputs, audio)
22
 
23
 
24
  with gr.Blocks() as demo:
 
5
 
6
 
7
  def apply_pipeline(audio: str) -> tuple:
 
 
 
 
8
  pipeline = Pipeline.from_pretrained(
9
+ "pyannote/speech-separation-ami-1.0", use_auth_token=os.environ["HF_TOKEN"]
10
  )
11
+ return pipeline(audio)
 
 
 
 
 
 
12
 
13
 
14
  with gr.Blocks() as demo: