# https://github.com/openai/whisper/discussions/categories/show-and-tell import wavio as wv import datetime import gradio as gr from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline import torch from dotenv import load_dotenv import os import whisper import ffmpeg import gradio as gr from transformers import pipeline p = pipeline("automatic-speech-recognition") basedir = os.path.abspath(os.path.dirname(__file__)) load_dotenv(os.path.join(basedir, '.env')) OPENAI_API_KEY=os.getenv("OPENAI_API_KEY") whisper_model = whisper.load_model("base") # this model was loaded from https://hf.co/models model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-distilled-600M") tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-200-distilled-600M") device = 0 if torch.cuda.is_available() else -1 LANGS = ["ace_Arab", "eng_Latn", "fra_Latn", "spa_Latn", "yue_Hant","zho_Hans","zho_Hant"] LANGS_source = ["eng_Latn"] # Yue Chinese - yue_Hant, Chinese (Simplified)-Zho_Hans, Chinese(Traditional)-zho_Hant # https://github.com/facebookresearch/flores/tree/main/flores200#languages-in-flores-200 def translate(text, src_lang, tgt_lang): """ Translate the text from source lang to target lang """ translation_pipeline = pipeline("translation", model=model, tokenizer=tokenizer, src_lang=src_lang, tgt_lang=tgt_lang, max_length=400, device=device) result = translation_pipeline(text) return result[0]['translation_text'] def transcribe(audio): # text_audio = p(audio)["text"] # text_for_audio = whisper_model.transcribe(audio) # text_from_whisper = text_for_audio["text"] # text=translate(text_from_whisper,"eng_Latn","zho_Hans") text="abc" return text gr.Interface( fn=transcribe, inputs=gr.Audio(source="microphone", type="filepath"), outputs="text").launch()