neuralleap commited on
Commit
897bc55
·
verified ·
1 Parent(s): ba5c73c
Files changed (1) hide show
  1. app.py +12 -3
app.py CHANGED
@@ -128,16 +128,24 @@ def get_ai_response(prompt, history):
128
  model = st.session_state.selected_model
129
  model_config = AVAILABLE_MODELS.get(model, AVAILABLE_MODELS["gpt-3.5-turbo"])
130
 
131
- # Check if the model requires max_completion_tokens instead of max_tokens
132
- if model.startswith("o3-mini") or model.startswith("o1") or model.startswith("gpt-4o"):
 
 
 
 
 
 
 
 
133
  response = client.chat.completions.create(
134
  model=model,
135
  messages=messages,
136
- temperature=model_config["temperature"],
137
  max_completion_tokens=model_config["output_tokens"],
138
  stream=False
139
  )
140
  else:
 
141
  response = client.chat.completions.create(
142
  model=model,
143
  messages=messages,
@@ -152,6 +160,7 @@ def get_ai_response(prompt, history):
152
  return f"An error occurred: {str(e)}."
153
 
154
 
 
155
  # Function to create a new conversation
156
  def create_new_chat():
157
  new_id = str(uuid.uuid4())
 
128
  model = st.session_state.selected_model
129
  model_config = AVAILABLE_MODELS.get(model, AVAILABLE_MODELS["gpt-3.5-turbo"])
130
 
131
+ # Check if the model does NOT support temperature
132
+ models_without_temperature = [
133
+ "o3-mini",
134
+ "o1",
135
+ "gpt-4o",
136
+ "o3-mini-2025-01-31"
137
+ ]
138
+
139
+ if any(model.startswith(prefix) for prefix in models_without_temperature):
140
+ # Models that do not support temperature
141
  response = client.chat.completions.create(
142
  model=model,
143
  messages=messages,
 
144
  max_completion_tokens=model_config["output_tokens"],
145
  stream=False
146
  )
147
  else:
148
+ # Models that support temperature
149
  response = client.chat.completions.create(
150
  model=model,
151
  messages=messages,
 
160
  return f"An error occurred: {str(e)}."
161
 
162
 
163
+
164
  # Function to create a new conversation
165
  def create_new_chat():
166
  new_id = str(uuid.uuid4())