Sharmitha commited on
Commit
288dcf3
·
1 Parent(s): 4587507

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -34
app.py CHANGED
@@ -1,56 +1,58 @@
1
- pip install openai==0.28
2
  import openai
3
- openai.api_key="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"
 
 
 
 
 
 
4
  def translate_audio(audio_file):
5
- file1=open(audio_file,"rb")
6
- text = openai.Audio.translate(
7
- model="whisper-1",
8
- file= file1
9
- )
10
- return text["text"]
 
11
  def text_response(t):
12
- messages = [{"role": "system", "content": "You are a therapist,you are supposed to answer questions related to stress, mental health.For the rest of the questions, respond you don't know. Respond to all input in 100 words or less."}]
13
- #document1="http://www.smchealth.org/sites/main/files/file-attachments/bhrsdocmanual.pdf"
14
- #messages.append({"role":"user","content":document1})
15
- #doc2="https://www.cms.gov/medicare-medicaid-coordination/fraud-prevention/medicaid-integrity-education/downloads/docmatters-behavioralhealth-factsheet.pdf"
16
- #messages.append({"role":"user","content":doc2})
17
- messages.append({"role": "user", "content": t})
18
- response = openai.ChatCompletion.create(
19
  model="gpt-3.5-turbo",
20
  messages=messages
21
  )
22
- message = response["choices"][0]["message"]["content"]
23
- return message
24
- !pip install gtts
25
  from gtts import gTTS
26
- pip install gtts --upgrade
27
- pip install pyttsx3
28
- import pyttsx3
29
- !apt-get install espeak
30
  def audio_response(t):
31
  tts = gTTS(text=t, lang='en', slow=False)
32
  tts.save("output.mp3")
33
  mp3_file_path = "output.mp3"
34
  return mp3_file_path
35
- pip install IPython
36
  from IPython.display import Audio
 
37
  def transcribe(a):
38
- t1 = translate_audio(a)
39
- t2= text_response(t1)
40
- t3= audio_response(t2)
41
- return (t1,t2,t3)
42
- pip install gradio
 
 
43
  output_1 = gr.Textbox(label="Speech to Text")
44
  output_2 = gr.Textbox(label="ChatGPT Output")
45
  output_3 = gr.Audio(label="ChatGPT output to audio")
46
 
47
  gr.Interface(
48
- title = 'AI Voice Assistant',
49
  fn=transcribe,
50
  inputs=[
51
- gr.Audio(sources="microphone", type="filepath"),
52
- ],
53
-
54
  outputs=[
55
- output_1, output_2, output_3
56
- ]).launch(share=True)
 
 
 
1
  import openai
2
+ from gtts import gTTS
3
+ import pyttsx3
4
+ from IPython.display import Audio
5
+ import gradio as gr
6
+
7
+ openai.api_key = "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"
8
+
9
  def translate_audio(audio_file):
10
+ file1 = open(audio_file, "rb")
11
+ text = openai.Audio.translate(
12
+ model="whisper-1",
13
+ file=file1
14
+ )
15
+ return text["text"]
16
+
17
  def text_response(t):
18
+ messages = [{"role": "system", "content": "You are a therapist, you are supposed to answer questions related to stress, mental health. For the rest of the questions, respond you don't know. Respond to all input in 100 words or less."}]
19
+ messages.append({"role": "user", "content": t})
20
+ response = openai.ChatCompletion.create(
 
 
 
 
21
  model="gpt-3.5-turbo",
22
  messages=messages
23
  )
24
+ message = response["choices"][0]["message"]["content"]
25
+ return message
26
+
27
  from gtts import gTTS
28
+
 
 
 
29
  def audio_response(t):
30
  tts = gTTS(text=t, lang='en', slow=False)
31
  tts.save("output.mp3")
32
  mp3_file_path = "output.mp3"
33
  return mp3_file_path
34
+
35
  from IPython.display import Audio
36
+
37
  def transcribe(a):
38
+ t1 = translate_audio(a)
39
+ t2 = text_response(t1)
40
+ t3 = audio_response(t2)
41
+ return (t1, t2, t3)
42
+
43
+ import gradio as gr
44
+
45
  output_1 = gr.Textbox(label="Speech to Text")
46
  output_2 = gr.Textbox(label="ChatGPT Output")
47
  output_3 = gr.Audio(label="ChatGPT output to audio")
48
 
49
  gr.Interface(
50
+ title='AI Voice Assistant',
51
  fn=transcribe,
52
  inputs=[
53
+ gr.Audio(sources="microphone", type="filepath"),
54
+ ],
 
55
  outputs=[
56
+ output_1, output_2, output_3
57
+ ]
58
+ ).launch(share=True)