binqiangliu commited on
Commit
034895f
·
1 Parent(s): 13ffcf6

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. .env +1 -0
  2. README.md +2 -8
  3. app.py +70 -0
  4. requirements.txt +4 -0
.env ADDED
@@ -0,0 +1 @@
 
 
1
+ OPENAI_API_KEY="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"
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
  title: VoiceChatbot
3
- emoji: 🐢
4
- colorFrom: yellow
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.35.2
8
  app_file: app.py
9
- pinned: false
 
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: VoiceChatbot
 
 
 
 
 
3
  app_file: app.py
4
+ sdk: gradio
5
+ sdk_version: 3.34.0
6
  ---
 
 
app.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pip install openai
2
+ #pip install gradio
3
+ #pip install pyttsx3
4
+ #pip install pydantic
5
+ #pip install openai gradio pyttsx3 pydantic
6
+
7
+ import gradio as gr
8
+ import openai
9
+ import pyttsx3
10
+ #import pydantic
11
+
12
+ from dotenv import load_dotenv
13
+ import os
14
+ load_dotenv()
15
+ openai.api_key = os.getenv("OPENAI_API_KEY")
16
+
17
+ #openai.api_key = ""
18
+
19
+ # Global variable to hold the chat history, initialise with system role
20
+ conversation = [
21
+ {"role": "system", "content": "You are an intelligent professor."}
22
+ ]
23
+
24
+ # transcribe function to record the audio input
25
+
26
+ def transcribe(audio):
27
+ print(audio)
28
+
29
+ # Whisper API
30
+
31
+ audio_file = open(audio, "rb")
32
+ transcript = openai.Audio.transcribe("whisper-1", audio_file)
33
+
34
+
35
+ print(transcript)
36
+
37
+ # ChatGPT API
38
+
39
+ # append user's inut to conversation
40
+ conversation.append({"role": "user", "content": transcript["text"]})
41
+
42
+ response = openai.ChatCompletion.create(
43
+ model="gpt-3.5-turbo",
44
+ messages=conversation
45
+ )
46
+
47
+ print(response)
48
+
49
+ # system_message is the response from ChatGPT API
50
+ system_message = response["choices"][0]["message"]["content"]
51
+
52
+ # append ChatGPT response (assistant role) back to conversation
53
+ conversation.append({"role": "assistant", "content": system_message})
54
+
55
+
56
+ # Text to speech
57
+ engine = pyttsx3.init()
58
+ engine.setProperty("rate", 150)
59
+ engine.setProperty("voice", "english-us")
60
+ engine.save_to_file(system_message, "response.mp3")
61
+ engine.runAndWait()
62
+
63
+ return "response.mp3"
64
+
65
+ # Gradio output
66
+
67
+ bot = gr.Interface(fn=transcribe, inputs=gr.Audio(source="microphone", type="filepath"), outputs="audio")
68
+ bot.launch(share=True)
69
+
70
+ iface.share()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ openai
2
+ gradio
3
+ pyttsx3
4
+ python-dotenv