import os os.system("pip install git+https://github.com/openai/whisper.git") import gradio as gr import whisper from transformers import AutoTokenizer, AutoModelForSequenceClassification from transformers import pipeline #call tokenizer and NLP model for text classification tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment-latest") model_nlp = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment-latest") # call whisper model for audio/speech processing model = whisper.load_model("small") def inference_audio(audio): audio = whisper.load_audio(audio) audio = whisper.pad_or_trim(audio) mel = whisper.log_mel_spectrogram(audio).to(model.device) _, probs = model.detect_language(mel) options = whisper.DecodingOptions(fp16 = False) result = whisper.decode(model, mel, options) print(result.text) return result.text, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True) def inference_text(audio): text,_,_,_ =inference_audio(audio) sentiment_task = pipeline("sentiment-analysis", model=model_nlp, tokenizer=tokenizer) result=sentiment_task(text) return result block = gr.Blocks() with block: with gr.Group(): with gr.Box(): with gr.Row().style(mobile_collapse=False, equal_height=True): audio = gr.Audio( label="Input Audio", show_label=False, source="microphone", type="filepath" ) btn = gr.Button("Transcribe") text = gr.Textbox(show_label=False, elem_id="result-textarea") btn.click(inference_text, inputs=[audio], outputs=[text]) block.launch()