Sharmitha commited on
Commit
7e0b7da
·
1 Parent(s): 764161b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -0
app.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ from gtts import gTTS
3
+ import pyttsx3
4
+ from IPython.display import Audio
5
+ import gradio as gr
6
+
7
+ openai.api_key = "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"
8
+
9
+ def translate_audio(audio_file):
10
+ file1 = open(audio_file, "rb")
11
+ text = openai.Audio.translate(
12
+ model="whisper-1",
13
+ file=file1
14
+ )
15
+ return text["text"]
16
+
17
+ def text_response(t):
18
+ messages = [{"role": "system", "content": "You are a therapist, you are supposed to answer questions related to stress, mental health. For the rest of the questions, respond you don't know. Respond to all input in 100 words or less."}]
19
+ messages.append({"role": "user", "content": t})
20
+ response = openai.ChatCompletion.create(
21
+ model="gpt-3.5-turbo",
22
+ messages=messages
23
+ )
24
+ message = response["choices"][0]["message"]["content"]
25
+ return message
26
+
27
+ from gtts import gTTS
28
+
29
+ def audio_response(t):
30
+ tts = gTTS(text=t, lang='en', slow=False)
31
+ tts.save("output.mp3")
32
+ mp3_file_path = "output.mp3"
33
+ return mp3_file_path
34
+
35
+ from IPython.display import Audio
36
+
37
+ def transcribe(a):
38
+ t1 = translate_audio(a)
39
+ t2 = text_response(t1)
40
+ t3 = audio_response(t2)
41
+ return (t1, t2, t3)
42
+
43
+ import gradio as gr
44
+
45
+ output_1 = gr.Textbox(label="Speech to Text")
46
+ output_2 = gr.Textbox(label="ChatGPT Output")
47
+ output_3 = gr.Audio(label="ChatGPT output to audio")
48
+
49
+ gr.Interface(
50
+ title='AI Voice Assistant',
51
+ fn=transcribe,
52
+ inputs=[
53
+ gr.Audio(sources="microphone", type="filepath"),
54
+ ],
55
+ outputs=[
56
+ output_1, output_2, output_3
57
+ ]
58
+ ).launch(share=True)