Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -16,7 +16,7 @@ client = OpenAI(api_key=OPENAI_API_KEY)
|
|
16 |
|
17 |
def gpt_call(history, user_message,
|
18 |
model="gpt-4o",
|
19 |
-
max_tokens=512
|
20 |
temperature=0.7,
|
21 |
top_p=0.95):
|
22 |
"""
|
@@ -41,17 +41,15 @@ def gpt_call(history, user_message,
|
|
41 |
completion = client.chat.completions.create(
|
42 |
model=model,
|
43 |
messages=messages,
|
44 |
-
max_tokens=max_tokens,
|
45 |
temperature=temperature,
|
46 |
top_p=top_p
|
47 |
)
|
48 |
|
49 |
-
# 5) Ensure
|
50 |
-
|
51 |
-
formatted_response = response_text.replace("\\[", "$$").replace("\\]", "$$")
|
52 |
-
formatted_response = formatted_response.replace("\\(", "$").replace("\\)", "$")
|
53 |
|
54 |
-
return
|
55 |
|
56 |
|
57 |
def respond(user_message, history):
|
|
|
16 |
|
17 |
def gpt_call(history, user_message,
|
18 |
model="gpt-4o",
|
19 |
+
max_tokens=1500, # Increased from 512 to 1500 to prevent truncation
|
20 |
temperature=0.7,
|
21 |
top_p=0.95):
|
22 |
"""
|
|
|
41 |
completion = client.chat.completions.create(
|
42 |
model=model,
|
43 |
messages=messages,
|
44 |
+
max_tokens=max_tokens, # Increased to allow longer responses
|
45 |
temperature=temperature,
|
46 |
top_p=top_p
|
47 |
)
|
48 |
|
49 |
+
# 5) Ensure full response is returned without being cut off
|
50 |
+
full_response = "".join(choice.message.content for choice in completion.choices).strip()
|
|
|
|
|
51 |
|
52 |
+
return full_response
|
53 |
|
54 |
|
55 |
def respond(user_message, history):
|