Sharmitha's picture
Create app.py
adecf1e
raw
history blame
1.97 kB
pip install openai==0.28
import openai
openai.api_key="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"
def translate_audio(audio_file):
file1=open(audio_file,"rb")
text = openai.Audio.translate(
model="whisper-1",
file= file1
)
return text["text"]
def text_response(t):
messages = [{"role": "system", "content": "You are a therapist,you are supposed to answer questions related to stress, mental health.For the rest of the questions, respond you don't know. Respond to all input in 100 words or less."}]
#document1="http://www.smchealth.org/sites/main/files/file-attachments/bhrsdocmanual.pdf"
#messages.append({"role":"user","content":document1})
#doc2="https://www.cms.gov/medicare-medicaid-coordination/fraud-prevention/medicaid-integrity-education/downloads/docmatters-behavioralhealth-factsheet.pdf"
#messages.append({"role":"user","content":doc2})
messages.append({"role": "user", "content": t})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
message = response["choices"][0]["message"]["content"]
return message
!pip install gtts
from gtts import gTTS
pip install gtts --upgrade
pip install pyttsx3
import pyttsx3
!apt-get install espeak
def audio_response(t):
tts = gTTS(text=t, lang='en', slow=False)
tts.save("output.mp3")
mp3_file_path = "output.mp3"
return mp3_file_path
pip install IPython
from IPython.display import Audio
def transcribe(a):
t1 = translate_audio(a)
t2= text_response(t1)
t3= audio_response(t2)
return (t1,t2,t3)
pip install gradio
output_1 = gr.Textbox(label="Speech to Text")
output_2 = gr.Textbox(label="ChatGPT Output")
output_3 = gr.Audio(label="ChatGPT output to audio")
gr.Interface(
title = 'AI Voice Assistant',
fn=transcribe,
inputs=[
gr.Audio(sources="microphone", type="filepath"),
],
outputs=[
output_1, output_2, output_3
]).launch(share=True)