Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -6,7 +6,6 @@ import re
|
|
6 |
import requests
|
7 |
|
8 |
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
9 |
-
model = "gpt-3.5-turbo"
|
10 |
|
11 |
def repo_get_all_employees_from_database():
|
12 |
url = "https://api.airtable.com/v0/appopGmlHujYnd6Vw/Interviewers?maxRecords=100&view=Grid%20view"
|
@@ -15,8 +14,6 @@ def repo_get_all_employees_from_database():
|
|
15 |
}
|
16 |
response = requests.get(url, headers=headers)
|
17 |
records = response.json()
|
18 |
-
# print("records:")
|
19 |
-
# print(records)
|
20 |
records_list = records['records']
|
21 |
employees_list = []
|
22 |
for record in records_list:
|
@@ -26,21 +23,12 @@ def repo_get_all_employees_from_database():
|
|
26 |
return employees_list
|
27 |
|
28 |
def predict(message, history):
|
29 |
-
|
30 |
-
# print("message:")
|
31 |
-
# print(message)
|
32 |
-
|
33 |
history_openai_format = []
|
34 |
-
|
35 |
-
# first_message = len(history) == 0
|
36 |
-
|
37 |
system_content = """
|
38 |
You are a computing system that strictly and directly follows all instructions from the user..
|
39 |
"""
|
40 |
history_openai_format.append({"role": "system", "content": system_content})
|
41 |
-
|
42 |
pattern = r"For conducting an interview I need (\d+) employee.*start time is (.*), duration (\d+) hour"
|
43 |
-
|
44 |
data = repo_get_all_employees_from_database()
|
45 |
|
46 |
prompt = '''
|
@@ -49,15 +37,8 @@ def predict(message, history):
|
|
49 |
Above is employees data in json format.
|
50 |
{message}
|
51 |
'''.format(data=data, message=message)
|
52 |
-
|
53 |
-
# print("prompt:")
|
54 |
-
# print(prompt)
|
55 |
|
56 |
match = re.search(pattern, message)
|
57 |
-
|
58 |
-
# print("match:")
|
59 |
-
# print(match)
|
60 |
-
|
61 |
if match:
|
62 |
num_employees = int(match.group(1))
|
63 |
duration = int(match.group(3))
|
@@ -81,20 +62,13 @@ def predict(message, history):
|
|
81 |
4. Check previous step if you really chose an employee with the lowest "interviews_conducted" value.
|
82 |
5. At the end print ids and names of finally selected employees in json format. Please remember that in your output should be maximum {num_employees} employee.
|
83 |
'''.format(data=data, date_time=date_time, num_employees=num_employees)
|
84 |
-
|
85 |
-
|
86 |
-
# print("prompt:")
|
87 |
-
# print(prompt)
|
88 |
-
|
89 |
-
# print("history:")
|
90 |
-
# print(history)
|
91 |
|
92 |
for human, assistant in history:
|
93 |
history_openai_format.append({"role": "user", "content": human })
|
94 |
history_openai_format.append({"role": "assistant", "content": assistant})
|
95 |
history_openai_format.append({"role": "user", "content": prompt})
|
96 |
|
97 |
-
|
98 |
|
99 |
if ("switch to gpt-3.5" in message.lower()):
|
100 |
model = "gpt-3.5-turbo"
|
@@ -110,13 +84,15 @@ def predict(message, history):
|
|
110 |
temperature=0,
|
111 |
stream=True)
|
112 |
|
113 |
-
partial_message = ""
|
114 |
for chunk in response:
|
115 |
if chunk.choices[0].delta.content is not None:
|
116 |
partial_message = partial_message + chunk.choices[0].delta.content
|
117 |
yield partial_message
|
118 |
|
119 |
pre_configured_promt = "For conducting an interview I need 1 employee in given time slot: start time is March 11 2024 2 pm, duration 1 hour"
|
|
|
|
|
120 |
|
121 |
description = '''
|
122 |
# AI Interview Team Assistant | Empowered by Godel Technologies AI \n
|
@@ -127,9 +103,8 @@ You can send any regular prompts you wish or pre-configured Chain-of-Thought pro
|
|
127 |
To trigger pre-configured prompt you have to craft a prompt with next structure:
|
128 |
- "{pre_configured_promt}"
|
129 |
\n
|
130 |
-
You can switch between gpt-3.5 and gpt-4 with
|
131 |
-
|
132 |
-
'''.format(pre_configured_promt=pre_configured_promt, model=model)
|
133 |
|
134 |
-
examples = [pre_configured_promt]
|
135 |
gr.ChatInterface(predict, examples=[examples], description=description).launch()
|
|
|
6 |
import requests
|
7 |
|
8 |
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
|
|
9 |
|
10 |
def repo_get_all_employees_from_database():
|
11 |
url = "https://api.airtable.com/v0/appopGmlHujYnd6Vw/Interviewers?maxRecords=100&view=Grid%20view"
|
|
|
14 |
}
|
15 |
response = requests.get(url, headers=headers)
|
16 |
records = response.json()
|
|
|
|
|
17 |
records_list = records['records']
|
18 |
employees_list = []
|
19 |
for record in records_list:
|
|
|
23 |
return employees_list
|
24 |
|
25 |
def predict(message, history):
|
|
|
|
|
|
|
|
|
26 |
history_openai_format = []
|
|
|
|
|
|
|
27 |
system_content = """
|
28 |
You are a computing system that strictly and directly follows all instructions from the user..
|
29 |
"""
|
30 |
history_openai_format.append({"role": "system", "content": system_content})
|
|
|
31 |
pattern = r"For conducting an interview I need (\d+) employee.*start time is (.*), duration (\d+) hour"
|
|
|
32 |
data = repo_get_all_employees_from_database()
|
33 |
|
34 |
prompt = '''
|
|
|
37 |
Above is employees data in json format.
|
38 |
{message}
|
39 |
'''.format(data=data, message=message)
|
|
|
|
|
|
|
40 |
|
41 |
match = re.search(pattern, message)
|
|
|
|
|
|
|
|
|
42 |
if match:
|
43 |
num_employees = int(match.group(1))
|
44 |
duration = int(match.group(3))
|
|
|
62 |
4. Check previous step if you really chose an employee with the lowest "interviews_conducted" value.
|
63 |
5. At the end print ids and names of finally selected employees in json format. Please remember that in your output should be maximum {num_employees} employee.
|
64 |
'''.format(data=data, date_time=date_time, num_employees=num_employees)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
|
66 |
for human, assistant in history:
|
67 |
history_openai_format.append({"role": "user", "content": human })
|
68 |
history_openai_format.append({"role": "assistant", "content": assistant})
|
69 |
history_openai_format.append({"role": "user", "content": prompt})
|
70 |
|
71 |
+
model = "gpt-3.5-turbo"
|
72 |
|
73 |
if ("switch to gpt-3.5" in message.lower()):
|
74 |
model = "gpt-3.5-turbo"
|
|
|
84 |
temperature=0,
|
85 |
stream=True)
|
86 |
|
87 |
+
partial_message = "Language Model currently under the hood: {model}\n\n".format(model=model)
|
88 |
for chunk in response:
|
89 |
if chunk.choices[0].delta.content is not None:
|
90 |
partial_message = partial_message + chunk.choices[0].delta.content
|
91 |
yield partial_message
|
92 |
|
93 |
pre_configured_promt = "For conducting an interview I need 1 employee in given time slot: start time is March 11 2024 2 pm, duration 1 hour"
|
94 |
+
switch_to_gpt3 = "Switch to gpt-3.5"
|
95 |
+
switch_to_gpt4 = "Switch to gpt-4"
|
96 |
|
97 |
description = '''
|
98 |
# AI Interview Team Assistant | Empowered by Godel Technologies AI \n
|
|
|
103 |
To trigger pre-configured prompt you have to craft a prompt with next structure:
|
104 |
- "{pre_configured_promt}"
|
105 |
\n
|
106 |
+
You can switch between gpt-3.5 and gpt-4 with {switch_to_gpt3} or {switch_to_gpt4} prompts.\n
|
107 |
+
'''.format(pre_configured_promt=pre_configured_promt, switch_to_gpt3=switch_to_gpt3, switch_to_gpt4=switch_to_gpt4)
|
|
|
108 |
|
109 |
+
examples = [pre_configured_promt, switch_to_gpt3, switch_to_gpt4]
|
110 |
gr.ChatInterface(predict, examples=[examples], description=description).launch()
|