Spaces:
Runtime error
Runtime error
Commit
·
d54ca4f
1
Parent(s):
c976c04
Update app.py
Browse files
app.py
CHANGED
@@ -8,7 +8,22 @@ openai.api_key = os.environ.get("openai_api_key")
|
|
8 |
|
9 |
# Define a function to generate responses using GPT-3.5 Turbo
|
10 |
def generate_response(user_prompt):
|
11 |
-
response = openai.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
model="gpt-3.5-turbo", # Use GPT-3.5 Turbo engine
|
13 |
prompt= f'''I will give you a question and you detect which category does this question belong to. It should be from these categories -
|
14 |
physical activity, sleep, nutrition and preventive care. Make sure you just reply with response in json format "category":"[sleep,nutrition]".
|
@@ -16,7 +31,7 @@ def generate_response(user_prompt):
|
|
16 |
Question: {user_prompt}''',
|
17 |
max_tokens=100, # You can adjust this to limit the response length
|
18 |
)
|
19 |
-
return response
|
20 |
|
21 |
# Create a Gradio interface
|
22 |
iface = gr.Interface(
|
|
|
8 |
|
9 |
# Define a function to generate responses using GPT-3.5 Turbo
|
10 |
def generate_response(user_prompt):
|
11 |
+
# response = openai.ChatCompletion.create(
|
12 |
+
# engine="gpt432k", #engine = "leadership",
|
13 |
+
# messages=[
|
14 |
+
# {"role": "system", "content": "You are a helpful assistant."},
|
15 |
+
|
16 |
+
# {"role": "user", "content": prompt}
|
17 |
+
# ],
|
18 |
+
# temperature=0,
|
19 |
+
# max_tokens=200
|
20 |
+
# # stream = True
|
21 |
+
|
22 |
+
# )
|
23 |
+
# text = response['choices'][0]['message']['content']
|
24 |
+
|
25 |
+
# return text
|
26 |
+
response = openai.ChatCompletion.create(
|
27 |
model="gpt-3.5-turbo", # Use GPT-3.5 Turbo engine
|
28 |
prompt= f'''I will give you a question and you detect which category does this question belong to. It should be from these categories -
|
29 |
physical activity, sleep, nutrition and preventive care. Make sure you just reply with response in json format "category":"[sleep,nutrition]".
|
|
|
31 |
Question: {user_prompt}''',
|
32 |
max_tokens=100, # You can adjust this to limit the response length
|
33 |
)
|
34 |
+
return response['choices'][0]['message']['content']
|
35 |
|
36 |
# Create a Gradio interface
|
37 |
iface = gr.Interface(
|