hickman2049 commited on
Commit
3910061
·
1 Parent(s): ffbbbc6

Create web.py

Browse files
Files changed (1) hide show
  1. web.py +44 -0
web.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import openai
3
+ import config
4
+
5
+ openai.api_key = config.OPENAI_API_KEY
6
+
7
+ messages = [
8
+ {"role": "system", "content": "You are a job interviewer who will be conducting a mock interview for practice. Respond in less than 40 words."},
9
+ ]
10
+
11
+ def transcribe(audio):
12
+ global messages
13
+ print(audio)
14
+
15
+ audio_file = open(audio, "rb")
16
+ transcript = openai.Audio.transcribe("whisper-1", audio_file)
17
+ print(transcript)
18
+
19
+ messages.append({"role": "user", "content": transcript["text"]})
20
+
21
+
22
+ response = openai.ChatCompletion.create(
23
+ model="gpt-3.5-turbo",
24
+ messages=messages
25
+ )
26
+
27
+ system_message = response["choices"][0]["message"]["content"]
28
+
29
+ messages.append({"role": "assistant", "content": system_message})
30
+
31
+ chat_transcript = ""
32
+ for message in messages:
33
+ if message['role'] != 'system':
34
+ chat_transcript += message['role'] + ": " + message['content'] + "\n\n"
35
+
36
+ return chat_transcript
37
+
38
+ #with gr.Blocks() as ui:
39
+ #advisor = gr.Image(value=config.UI_IMAGE).style(width=config.UI_IMAGE_WIDTH, height=config.UI_IMAGE_HEIGHT)
40
+
41
+ ui = gr.Interface(fn=transcribe, inputs=gr.Audio(source="microphone", type="filepath"), outputs="text").launch()
42
+
43
+ ui.launch()
44
+