Update app.py
Browse files
app.py
CHANGED
@@ -128,18 +128,30 @@ def get_ai_response(prompt, history):
|
|
128 |
model = st.session_state.selected_model
|
129 |
model_config = AVAILABLE_MODELS.get(model, AVAILABLE_MODELS["gpt-3.5-turbo"])
|
130 |
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
138 |
return response.choices[0].message.content
|
139 |
|
140 |
except Exception as e:
|
141 |
return f"An error occurred: {str(e)}."
|
142 |
|
|
|
143 |
# Function to create a new conversation
|
144 |
def create_new_chat():
|
145 |
new_id = str(uuid.uuid4())
|
|
|
128 |
model = st.session_state.selected_model
|
129 |
model_config = AVAILABLE_MODELS.get(model, AVAILABLE_MODELS["gpt-3.5-turbo"])
|
130 |
|
131 |
+
# Check if the model requires max_completion_tokens instead of max_tokens
|
132 |
+
if model.startswith("o3-mini") or model.startswith("o1") or model.startswith("gpt-4o"):
|
133 |
+
response = client.chat.completions.create(
|
134 |
+
model=model,
|
135 |
+
messages=messages,
|
136 |
+
temperature=model_config["temperature"],
|
137 |
+
max_completion_tokens=model_config["output_tokens"],
|
138 |
+
stream=False
|
139 |
+
)
|
140 |
+
else:
|
141 |
+
response = client.chat.completions.create(
|
142 |
+
model=model,
|
143 |
+
messages=messages,
|
144 |
+
temperature=model_config["temperature"],
|
145 |
+
max_tokens=model_config["output_tokens"],
|
146 |
+
stream=False
|
147 |
+
)
|
148 |
+
|
149 |
return response.choices[0].message.content
|
150 |
|
151 |
except Exception as e:
|
152 |
return f"An error occurred: {str(e)}."
|
153 |
|
154 |
+
|
155 |
# Function to create a new conversation
|
156 |
def create_new_chat():
|
157 |
new_id = str(uuid.uuid4())
|