|
import gradio as gr |
|
|
|
import os |
|
import requests |
|
api = os.environ.get('API_ENDPOINT') |
|
|
|
|
|
|
|
whisper = gr.Interface.load(name="spaces/sanchit-gandhi/whisper-large-v2") |
|
|
|
def call_api(message): |
|
response = requests.get(f'{api}?q={message}') |
|
if response.status_code == 200: |
|
|
|
return str(response.text).split('\n', 2)[2] |
|
else: |
|
return """Sorry, I'm quite busy right now, but please try again later :)""" |
|
|
|
def chat_hf(audio, task): |
|
|
|
try: |
|
whisper_text = translate(audio, task) |
|
if whisper_text == "ERROR: You have to either use the microphone or upload an audio file": |
|
gpt_response = "MISSING AUDIO: Record your voice by clicking the microphone button, do not forget to stop recording before sending your message ;)" |
|
else: |
|
gpt_response = call_api(whisper_text) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
except: |
|
|
|
|
|
gpt_response = """Sorry, I'm quite busy right now, but please try again later :)""" |
|
|
|
print(f""" |
|
{whisper_text} |
|
ββββ |
|
{gpt_response} |
|
""") |
|
|
|
return whisper_text, gpt_response |
|
|
|
|
|
def translate(audio, task): |
|
|
|
if task == "transcribe": |
|
text_result = whisper(audio, None, "transcribe", fn_index=0) |
|
else: |
|
text_result = whisper(audio, None, "translate", fn_index=0) |
|
|
|
return text_result |
|
|
|
title = """ |
|
<div style="text-align: center; max-width: 500px; margin: 0 auto;"> |
|
<div |
|
style=" |
|
display: inline-flex; |
|
align-items: center; |
|
gap: 0.8rem; |
|
font-size: 1.75rem; |
|
margin-bottom: 10px; |
|
" |
|
> |
|
<h1 style="font-weight: 600; margin-bottom: 7px;"> |
|
Whisper-to-chatGPT |
|
</h1> |
|
</div> |
|
<p style="margin-bottom: 10px;font-size: 94%;font-weight: 100;line-height: 1.5em;"> |
|
Chat with GPT with your voice in your native language ! |
|
<!--<br />If you need a custom session key, see |
|
<a href="https://youtu.be/TdNSj_qgdFk" target="_blank">Bhavesh Baht video for reference</a> |
|
</p>--> |
|
<!--<p style="font-size: 94%"> |
|
You can skip the queue by duplicating this space: |
|
<span style="display: flex;align-items: center;justify-content: center;height: 30px;"> |
|
<a href="https://huggingface.co/nightfury/whisperAI?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a> |
|
</span> |
|
</p>--> |
|
</div> |
|
""" |
|
|
|
article = """ |
|
<p style="font-size: 0.8em;line-height: 1.2em;">Note: this demo is not able to sustain a conversation from earlier responses. |
|
For more detailed results and dialogue, you should use the official ChatGPT interface. |
|
<br />β |
|
<br/>Also, be aware that audio records from iOS devices will not be decoded as expected by Gradio. For the best experience, record your voice from a computer instead of your smartphone ;)</p> |
|
<div class="footer"> |
|
<p>Whisper & |
|
<a href="https://chat.openai.com/chat" target="_blank">chatGPT</a> |
|
by <a href="https://openai.com/" style="text-decoration: underline;" target="_blank">OpenAI</a> - |
|
Gradio Demo by π€ <a href="https://huggingface.co/nightfury/" target="_blank">Nightfury</a> |
|
</p> |
|
</div> |
|
""" |
|
|
|
css = ''' |
|
#col-container, #col-container-2 {max-width: 510px; margin-left: auto; margin-right: auto;} |
|
a {text-decoration-line: underline; font-weight: 600;} |
|
div#record_btn > .mt-6 { |
|
margin-top: 0!important; |
|
} |
|
div#record_btn > .mt-6 button { |
|
width: 100%; |
|
height: 40px; |
|
} |
|
.footer { |
|
margin-bottom: 45px; |
|
margin-top: 10px; |
|
text-align: center; |
|
border-bottom: 1px solid #e5e5e5; |
|
} |
|
.footer>p { |
|
font-size: .8rem; |
|
display: inline-block; |
|
padding: 0 10px; |
|
transform: translateY(10px); |
|
background: white; |
|
} |
|
.dark .footer { |
|
border-color: #303030; |
|
} |
|
.dark .footer>p { |
|
background: #0b0f19; |
|
} |
|
''' |
|
|
|
|
|
|
|
with gr.Blocks(css=css) as demo: |
|
|
|
with gr.Column(elem_id="col-container"): |
|
|
|
gr.HTML(title) |
|
|
|
with gr.Row(): |
|
record_input = gr.Audio(source="microphone",type="filepath", show_label=False,elem_id="record_btn") |
|
task = gr.Radio(choices=["transcribe","translate"], value="transcribe", show_label=False) |
|
|
|
with gr.Row(): |
|
|
|
send_btn = gr.Button("Send my request !") |
|
|
|
|
|
with gr.Column(elem_id="col-container-2"): |
|
audio_translation = gr.Textbox(type="text",label="Whisper transcript") |
|
gpt_response = gr.Textbox(type="text",label="chatGPT response") |
|
|
|
gr.HTML(article) |
|
|
|
send_btn.click(chat_hf, inputs=[record_input, task], outputs=[audio_translation, gpt_response]) |
|
|
|
demo.queue(max_size=32, concurrency_count=20).launch(debug=True) |
|
|