# import gradio as gr # gr.Interface.load("models/rohitp1/kkkh_whisper_small_distillation_att_loss_libri360_epochs_100_batch_4_concat_dataset").launch() import gradio as gr import os import transformers from transformers import pipeline import time auth_token = os.getenv("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") # p = pipeline('automatic-speech-recognition', model='rohitp1/kkkh_whisper_small_distillation_att_loss_libri360_epochs_100_batch_4_concat_dataset') # def transcribe(audio, state=""): # time.sleep(3) # text = p(audio)["text"] # state = text + " " # return state, state # gr.Interface( # fn=transcribe, # inputs=[ # gr.inputs.Audio(source="microphone", type="filepath"), # 'state' # ], # outputs=[ # "textbox", # "state" # ], # live=False, # api_key=auth_token).launch() demo = gr.load( "huggingface/rohitp1/kkkh_whisper_small_distillation_att_loss_libri360_epochs_100_batch_4_concat_dataset", title="Speech-to-text", inputs="mic", description="Let me try to guess what you're saying!", api_key=auth_token ) demo.launch()