File size: 1,820 Bytes
ccbbbf4
 
 
 
e4a6674
 
ccbbbf4
e4a6674
 
 
 
05856c6
e4a6674
 
ccbbbf4
 
e4a6674
 
 
ccbbbf4
 
 
 
 
 
 
 
 
 
 
 
 
e4a6674
 
 
 
 
 
 
 
 
8e06a33
ccbbbf4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e4a6674
ccbbbf4
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import os
os.system("pip install git+https://github.com/openai/whisper.git")
import gradio as gr
import whisper
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from transformers import pipeline


#call tokenizer and NLP model for text classification
tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment-latest")
model_nlp = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment-latest")


# call whisper model for audio/speech processing
model = whisper.load_model("small")



def inference_audio(audio):
    audio = whisper.load_audio(audio)
    audio = whisper.pad_or_trim(audio)
    
    mel = whisper.log_mel_spectrogram(audio).to(model.device)
    
    _, probs = model.detect_language(mel)
    
    options = whisper.DecodingOptions(fp16 = False)
    result = whisper.decode(model, mel, options)
    
    print(result.text)
    return result.text, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)

def inference_text(audio):
    text,_,_,_ =inference_audio(audio)

    sentiment_task = pipeline("sentiment-analysis", model=model_nlp, tokenizer=tokenizer)
    result=sentiment_task(text)
    return result



block = gr.Blocks()
with block:
    with gr.Group():
        with gr.Box():
            with gr.Row().style(mobile_collapse=False, equal_height=True):
                audio = gr.Audio(
                    label="Input Audio",
                    show_label=False,
                    source="microphone",
                    type="filepath"
                )

                btn = gr.Button("Transcribe")
        text = gr.Textbox(show_label=False, elem_id="result-textarea")
        


        
        btn.click(inference_text, inputs=[audio], outputs=[text])


block.launch()