File size: 1,225 Bytes
006d8db
 
95adb28
006d8db
 
b28847e
 
006d8db
 
 
 
 
 
 
 
 
 
 
 
7ea10f9
006d8db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import os
os.system("pip install git+https://github.com/openai/whisper.git")
import gradio
import whisper

model = whisper.load_model("base")

def transcribe_audio(audio):
  # Load the audio and trim/pad it to fit for 30 seconds
  audio = whisper.load_audio(audio)
  audio = whisper.pad_or_trim(audio)

  # Make mel log spectrogram
  mel = whisper.log_mel_spectrogram(audio).to(model.device)

  # Detect the spoken language
  _, probs = model.detect_language(mel)

  # Decode the audio
  options = whisper.DecodingOptions(fp16 = False)
  result = whisper.decode(model, mel, options)

  return result.text

title = "Automatic Speech Recognition"
description = "Speech to Text Conversion using whisper"

# Input from user
in_prompt = gradio.components.Audio(source="microphone", type="filepath")

# Output response
out_response = gradio.components.Textbox(label='Text')

# Gradio interface to generate UI link
iface = gradio.Interface(fn=transcribe_audio,
                         inputs = in_prompt,
                         outputs = out_response,
                         title=title,
                         description=description,
                         live=True
                         )

iface.launch(debug = True)