Upload folder using huggingface_hub
Browse files
chat.py
CHANGED
@@ -4,6 +4,11 @@ from openai import OpenAI
|
|
4 |
import gradio as gr
|
5 |
import codecs
|
6 |
import base64
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
print("Start")
|
9 |
client = OpenAI(api_key=api_key)
|
@@ -15,7 +20,7 @@ for a in assistants:
|
|
15 |
instruction = codecs.open("instruction.txt", "r", "utf-8").read()
|
16 |
#instruction = "You are helpful assistant. Keep your responses clear and concise."
|
17 |
model = "gpt-4o"
|
18 |
-
model = "gpt-3.5-turbo"
|
19 |
assistant = client.beta.assistants.create(name="Patient Sim", instructions=instruction, model=model)
|
20 |
toggle_js = open("toggle_speech.js").read()
|
21 |
|
@@ -25,6 +30,7 @@ def start_thread():
|
|
25 |
def user(text, audio, history, thread):
|
26 |
if audio:
|
27 |
text = transcribe(audio)
|
|
|
28 |
message = client.beta.threads.messages.create(thread_id=thread.id, role="user", content=text)
|
29 |
return "", history + [[text, None]]
|
30 |
|
@@ -39,6 +45,7 @@ def bot(history, thread):
|
|
39 |
except: pass
|
40 |
|
41 |
def transcribe(file):
|
|
|
42 |
file = open(file, "rb")
|
43 |
response = client.audio.transcriptions.create(
|
44 |
file=file,
|
@@ -52,7 +59,8 @@ def speak(history):
|
|
52 |
text = history[-1][1]
|
53 |
speech = client.audio.speech.create(model="tts-1", voice="alloy", input=text)
|
54 |
audio = base64.b64encode(speech.read()).decode("utf-8")
|
55 |
-
|
|
|
56 |
return audio_element
|
57 |
|
58 |
def vote(data: gr.LikeData):
|
@@ -83,17 +91,17 @@ with gr.Blocks(title="Paitient Sim", css=css) as demo:
|
|
83 |
msg = gr.Textbox(label="Say something.", elem_id="textbox")
|
84 |
mic = gr.Microphone(type="filepath", format="mp3", editable=False, waveform_options={"show_controls": False}, visible=False)
|
85 |
thread = gr.State(start_thread)
|
86 |
-
audio_html = gr.HTML()
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
speak, chatbot, audio_html
|
91 |
)
|
92 |
-
mic.stop_recording(user, [msg, mic, chatbot, thread], [msg, chatbot]
|
93 |
lambda:None, None, mic).then(
|
94 |
bot, [chatbot, thread], chatbot).then(
|
95 |
-
speak, chatbot, audio_html
|
|
|
96 |
|
97 |
-
demo.queue()
|
98 |
-
demo.launch()
|
99 |
-
|
|
|
4 |
import gradio as gr
|
5 |
import codecs
|
6 |
import base64
|
7 |
+
import json
|
8 |
+
|
9 |
+
def login(username, password):
|
10 |
+
users = json.loads(os.environ.get('users'))
|
11 |
+
return users[username] == password
|
12 |
|
13 |
print("Start")
|
14 |
client = OpenAI(api_key=api_key)
|
|
|
20 |
instruction = codecs.open("instruction.txt", "r", "utf-8").read()
|
21 |
#instruction = "You are helpful assistant. Keep your responses clear and concise."
|
22 |
model = "gpt-4o"
|
23 |
+
#model = "gpt-3.5-turbo"
|
24 |
assistant = client.beta.assistants.create(name="Patient Sim", instructions=instruction, model=model)
|
25 |
toggle_js = open("toggle_speech.js").read()
|
26 |
|
|
|
30 |
def user(text, audio, history, thread):
|
31 |
if audio:
|
32 |
text = transcribe(audio)
|
33 |
+
print(f"Message: {text}")
|
34 |
message = client.beta.threads.messages.create(thread_id=thread.id, role="user", content=text)
|
35 |
return "", history + [[text, None]]
|
36 |
|
|
|
45 |
except: pass
|
46 |
|
47 |
def transcribe(file):
|
48 |
+
print(f"Transcribe: {file}")
|
49 |
file = open(file, "rb")
|
50 |
response = client.audio.transcriptions.create(
|
51 |
file=file,
|
|
|
59 |
text = history[-1][1]
|
60 |
speech = client.audio.speech.create(model="tts-1", voice="alloy", input=text)
|
61 |
audio = base64.b64encode(speech.read()).decode("utf-8")
|
62 |
+
src = f"data:audio/mpeg;base64,{audio}"
|
63 |
+
audio_element = f'<audio src="{src}" controls autoplay></audio>'
|
64 |
return audio_element
|
65 |
|
66 |
def vote(data: gr.LikeData):
|
|
|
91 |
msg = gr.Textbox(label="Say something.", elem_id="textbox")
|
92 |
mic = gr.Microphone(type="filepath", format="mp3", editable=False, waveform_options={"show_controls": False}, visible=False)
|
93 |
thread = gr.State(start_thread)
|
94 |
+
audio_html = gr.HTML(visible=False)
|
95 |
+
msg.submit(user, [msg, mic, chatbot, thread], [msg, chatbot]).then(
|
96 |
+
bot, [chatbot, thread], chatbot
|
97 |
+
).then(
|
98 |
speak, chatbot, audio_html
|
99 |
)
|
100 |
+
mic.stop_recording(user, [msg, mic, chatbot, thread], [msg, chatbot]).then(
|
101 |
lambda:None, None, mic).then(
|
102 |
bot, [chatbot, thread], chatbot).then(
|
103 |
+
speak, chatbot, audio_html
|
104 |
+
)
|
105 |
|
106 |
+
#demo.queue()
|
107 |
+
demo.launch(auth=login)
|
|