codexxx commited on
Commit
90f7335
Β·
verified Β·
1 Parent(s): fec9b19

Update README.md

Browse files

import gradio as gr
from transformers import pipeline

# Load the Whisper model
whisper = pipeline("automatic-speech-recognition", model="openai/whisper-base")

# Define the function for speech-to-text
def transcribe_audio(audio):
if audio is None:
return ""
transcription = whisper(audio)["text"]
return transcription

# Create a Gradio interface with microphone and file upload options
iface = gr.Interface(
fn=transcribe_audio,
inputs=[
gr.Audio(sources=["microphone", "upload"], type="filepath", label="Record or Upload Audio")
],
outputs=gr.Textbox(label="Transcription", placeholder="Transcribed text will appear here..."),
title="Speech-to-Text Transcription",
description="Record audio using your microphone or upload an audio file to transcribe it.",
live=True, # Automatically transcribe when audio is provided
)

# Launch the app
if __name__ == "__main__":
iface.launch()

Files changed (1) hide show
  1. README.md +3 -9
README.md CHANGED
@@ -1,17 +1,11 @@
1
  ---
2
- title: OpenAI's Whisper Real-time Demo
3
  emoji: πŸŽ™οΈ
4
  colorFrom: indigo
5
  colorTo: red
6
  sdk: gradio
7
- sdk_version: 3.3.1
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
- ---
12
-
13
- OpenAI's Whisper Real-time Demo
14
-
15
- A simple demo of OpenAI's [**Whisper**](https://github.com/openai/whisper) speech recognition model.
16
-
17
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: vepp-stt
3
  emoji: πŸŽ™οΈ
4
  colorFrom: indigo
5
  colorTo: red
6
  sdk: gradio
7
+ sdk_version: 5.12.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
+ ---