File size: 1,753 Bytes
f55798e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import os
import gradio as gr


demo = gr.Blocks()

EXAMPLES = ["cantina.wav"]

def speech_to_text(x):
    return [("yada yada", "speaker 0"), ("blah blah blah", "speaker 1")]

def summarize(y, c):
    return "> " + len(c)*"stuff"

def sentiment(x, y):
    if y == 0:
        return [("yada yada", "happy")]
    if y == 1:
        return [("blah blah blah", "sad")]

with demo:
    with gr.Row():
        with gr.Column():
            audio = gr.Audio(label="Audio file", type='filepath')
            with gr.Row():
                btn = gr.Button("Transcribe")
            with gr.Row():
                examples = gr.components.Dataset(components=[audio], samples=[EXAMPLES], type="index")
        with gr.Column():
                gr.Markdown("**Diarized Output:**")
                diarized = gr.HighlightedText(label="Diarized Output")
                gr.Markdown("Choose speaker(s) for summarization:")
                check = gr.CheckboxGroup(["Speaker 0", "Speaker 1"], show_label=False)
                gr.Textbox("**Summary:**")
                summary = gr.Markdown()
                gr.Markdown("Choose speaker for sentiment analysis:")                
                radio = gr.Radio(["Speaker 0", "Speaker 1"], show_label=False, type="index")
                analyzed = gr.HighlightedText(label="Customer Sentiment")

    btn.click(speech_to_text, audio, diarized)
    check.change(summarize, [diarized, check], summary)
    radio.change(sentiment, [diarized, radio], analyzed)

    def load_example(example_id):
        processed_examples = audio.preprocess_example(EXAMPLES[example_id])
        return processed_examples

    examples.click(load_example, inputs=[examples], outputs=[audio], _preprocess=False)
        
    demo.launch()