Interview-GPT / app.py
GreenRaptor's picture
Update app.py
b1a4f5a
import time
import random
import whisper
import gradio as gr
from transformers import pipeline
transcription_model = whisper.load_model("base")
def transcribe(audio):
# time.sleep(3)
# load audio and pad/trim it to fit 30 seconds
audio = whisper.load_audio(audio)
audio = whisper.pad_or_trim(audio)
# make log-Mel spectrogram and move to the same device as the transcription_model
mel = whisper.log_mel_spectrogram(audio).to(transcription_model.device)
# detect the spoken language
_, probs = transcription_model.detect_language(mel)
print(f"Detected language: {max(probs, key=probs.get)}")
# decode the audio
options = whisper.DecodingOptions(fp16 = False)
result = whisper.decode(transcription_model, mel, options)
return result.text
# p = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
# def transcribe(audio):
# text = p(audio)["text"]
# return text
def user(user_message, history):
return "", history + [[user_message, None]]
def bot(history):
bot_message = random.choice(["How are you?", "I wanted to tell you that...", "hehehe", "huihuihuihui", "I'm very hungry"])
history[-1][1] = ""
for character in bot_message:
history[-1][1] += character
time.sleep(0.05)
yield history
css = """
.gradio-container {
font-family: 'IBM Plex Sans', sans-serif;
}
.gr-button {
color: white;
border-color: black;
background: black;
}
.container {
max-width: 730px;
margin: auto;
padding-top: 1.5rem;
}
#chatbot {
min-height: 30rem;
margin-bottom: 15px;
margin-left: auto;
margin-right: auto;
}
#prompt-container {
margin-bottom: 15px;
margin-left: auto;
margin-right: auto;
}
"""
with gr.Blocks(css=css) as demo:
gr.HTML(
"""
<div style="text-align: center; margin: 0 auto;">
<div
style="
display: inline-flex;
align-items: center;
gap: 0.8rem;
font-size: 1.75rem;
"
>
<h1 style="font-weight: 900; margin-bottom: 7px;margin-top:5px">
Interview with AI (Really?)
</h1>
</div>
</div>
"""
)
with gr.Box():
chatbot = gr.Chatbot([], show_label=False, elem_id="chatbot").style(height="auto")
with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True):
with gr.Column(scale=0.8):
txt = gr.Textbox(
show_label=False,
placeholder="Type and press enter, or record your response...",
).style(container=False)
with gr.Column(scale=0.2, min_width=0):
send = gr.Button("Send")
with gr.Row(elem_id="audio-container").style(equal_height=True):
with gr.Column(scale=0.8):
recorder = gr.Audio(source="microphone", type="filepath", show_label=False).style(container=False)
with gr.Column(scale=0.2, min_width=0):
speech = gr.Button("Submit speech").style(height="auto")
speech.click(transcribe, inputs=recorder, outputs=txt)
txt.submit(user, [txt, chatbot], [txt, chatbot], queue=False).then(
bot, chatbot, chatbot
)
send.click(user, [txt, chatbot], [txt, chatbot], queue=False).then(
bot, chatbot, chatbot
)
demo.queue()
demo.launch()