from transformers import pipeline import gradio as gr p = pipeline("automatic-speech-recognition", model="aware-ai/wav2vec2-base-german", feature_extractor = "aware-ai/wav2vec2-xls-r-1b-5gram-german", tokenizer = "aware-ai/wav2vec2-xls-r-1b-5gram-german") def transcribe(audio): return p(audio, chunk_length_s=10, stride_length_s=(4, 2))["text"] gr.Interface( fn=transcribe, inputs=[ gr.inputs.Audio(source="microphone", type="filepath") ], outputs=[ "textbox" ]).launch(server_name = "0.0.0.0")