Update app.py
Browse filesUpdate new code
app.py
CHANGED
@@ -81,13 +81,11 @@ if "selected_model" not in st.session_state:
|
|
81 |
st.session_state.selected_model = "gpt-3.5-turbo"
|
82 |
|
83 |
# Get OpenAI API key from environment or let user enter it
|
84 |
-
|
85 |
-
# openai_api_key = os.getenv("OPENAI_API_KEY_NEW_1")
|
86 |
-
openai_api_key = os.getenv("OPENAI_API_KEY_NEW_2")
|
87 |
|
88 |
# Configure OpenAI client
|
89 |
if openai_api_key:
|
90 |
-
|
91 |
|
92 |
# Available models with descriptions and token limits
|
93 |
AVAILABLE_MODELS = {
|
@@ -115,51 +113,12 @@ AVAILABLE_MODELS = {
|
|
115 |
"output_tokens": 1200,
|
116 |
"temperature": 0.7
|
117 |
},
|
118 |
-
"
|
119 |
-
"description": "Latest
|
120 |
-
"max_tokens":
|
121 |
-
"output_tokens": 1200,
|
122 |
-
"temperature": 0.7
|
123 |
-
},
|
124 |
-
"gpt-4o-mini": {
|
125 |
-
"description": "Efficient version of GPT-4o",
|
126 |
-
"max_tokens": 128000,
|
127 |
"output_tokens": 1000,
|
128 |
"temperature": 0.7
|
129 |
-
},
|
130 |
-
|
131 |
-
"o1-mini": {
|
132 |
-
"description": "OpenAI Reasoning Model - Mini",
|
133 |
-
"max_tokens": 180000,
|
134 |
-
"output_tokens": 1000,
|
135 |
-
"temperature": 0.7
|
136 |
-
},
|
137 |
-
"o1": {
|
138 |
-
"description": "OpenAI Reasoning Model - Standard",
|
139 |
-
"max_tokens": 200000,
|
140 |
-
"output_tokens": 1200,
|
141 |
-
"temperature": 0.7
|
142 |
-
},
|
143 |
-
"o1-pro": {
|
144 |
-
"description": "OpenAI Reasoning Model - Professional",
|
145 |
-
"max_tokens": 200000,
|
146 |
-
"output_tokens": 1500,
|
147 |
-
"temperature": 0.7
|
148 |
-
},
|
149 |
-
"o3-mini": {
|
150 |
-
"description": "OpenAI Advanced Reasoning - Mini",
|
151 |
-
"max_tokens": 200000,
|
152 |
-
"output_tokens": 1000,
|
153 |
-
"temperature": 0.7
|
154 |
-
},
|
155 |
-
"o3-mini-2025-01-31": {
|
156 |
-
"description": "OpenAI Advanced Reasoning - Enhanced",
|
157 |
-
# "max_tokens": 200000,
|
158 |
-
# "output_tokens": 1200,
|
159 |
-
# "temperature": 0.7
|
160 |
}
|
161 |
-
#"""
|
162 |
-
|
163 |
}
|
164 |
|
165 |
# Function to call OpenAI API
|
@@ -198,17 +157,27 @@ def get_ai_response(prompt, history):
|
|
198 |
output_tokens = model_config["output_tokens"]
|
199 |
temperature = model_config["temperature"]
|
200 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
201 |
# Call OpenAI API
|
202 |
-
response =
|
203 |
model=model,
|
204 |
messages=messages,
|
205 |
temperature=temperature,
|
206 |
max_tokens=output_tokens,
|
|
|
207 |
stream=False
|
208 |
)
|
209 |
|
210 |
# Extract the response
|
211 |
-
return response.choices[0].message
|
212 |
|
213 |
except Exception as e:
|
214 |
st.error(f"An error occurred: {str(e)}")
|
@@ -239,179 +208,6 @@ def get_demo_response(prompt):
|
|
239 |
|
240 |
reasoning_examples = [
|
241 |
"This is a demonstration of how I would process a reasoning task. In a real scenario with the full model, I would analyze the problem step by step, consider multiple angles, and provide a detailed explanation.",
|
242 |
-
"When solving problems, I would typically break them down into smaller parts, examine each component, and build
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
# Simple pattern matching
|
247 |
-
if any(word in prompt_lower for word in ["hello", "hi", "hey", "greetings"]):
|
248 |
-
return random.choice(greetings)
|
249 |
-
elif any(word in prompt_lower for word in ["bye", "goodbye", "farewell", "see you"]):
|
250 |
-
return random.choice(farewells)
|
251 |
-
elif any(phrase in prompt_lower for phrase in ["who are you", "what are you", "tell me about yourself", "what can you do"]):
|
252 |
-
return random.choice(info_responses)
|
253 |
-
elif any(word in prompt_lower for word in ["think", "reason", "analyze", "solve", "explain", "why", "how"]):
|
254 |
-
return random.choice(reasoning_examples)
|
255 |
-
elif "weather" in prompt_lower:
|
256 |
-
return "I'm sorry, I don't have access to real-time weather data in demo mode."
|
257 |
-
elif any(word in prompt_lower for word in ["help", "assist", "support"]):
|
258 |
-
return "To get better assistance, please add your OpenAI API key. You can get one for free at https://platform.openai.com/account/api-keys."
|
259 |
-
else:
|
260 |
-
return "I'm running in demo mode with limited responses. For a full conversation experience, please add your OpenAI API key above."
|
261 |
-
|
262 |
-
# Function to create a new conversation
|
263 |
-
def create_new_chat():
|
264 |
-
new_id = str(uuid.uuid4())
|
265 |
-
st.session_state.current_conversation_id = new_id
|
266 |
-
st.session_state.conversations[new_id] = {
|
267 |
-
"title": f"New chat {datetime.now().strftime('%H:%M')}",
|
268 |
-
"messages": []
|
269 |
-
}
|
270 |
-
|
271 |
-
# Function to update conversation title based on first message
|
272 |
-
def update_conversation_title(conv_id, user_message):
|
273 |
-
current_title = st.session_state.conversations[conv_id]["title"]
|
274 |
-
if current_title.startswith("New chat"):
|
275 |
-
# Limit title length to prevent overflow
|
276 |
-
new_title = user_message[:30] + "..." if len(user_message) > 30 else user_message
|
277 |
-
st.session_state.conversations[conv_id]["title"] = new_title
|
278 |
-
|
279 |
-
# Function to delete a conversation
|
280 |
-
def delete_conversation(conv_id):
|
281 |
-
if conv_id in st.session_state.conversations:
|
282 |
-
del st.session_state.conversations[conv_id]
|
283 |
-
# If we deleted the current conversation, set a new one
|
284 |
-
if conv_id == st.session_state.current_conversation_id:
|
285 |
-
if st.session_state.conversations:
|
286 |
-
st.session_state.current_conversation_id = next(iter(st.session_state.conversations))
|
287 |
-
else:
|
288 |
-
create_new_chat()
|
289 |
-
|
290 |
-
# Create a two-column layout
|
291 |
-
sidebar, main_content = st.columns([1, 3])
|
292 |
-
|
293 |
-
# Sidebar (conversation history)
|
294 |
-
with sidebar:
|
295 |
-
st.sidebar.title("Conversations")
|
296 |
-
|
297 |
-
# Add a new chat button
|
298 |
-
if st.sidebar.button("+ New Chat", use_container_width=True):
|
299 |
-
create_new_chat()
|
300 |
-
st.rerun()
|
301 |
-
|
302 |
-
st.sidebar.markdown("---")
|
303 |
-
|
304 |
-
# API token input in sidebar if not available
|
305 |
-
if not openai_api_key:
|
306 |
-
st.sidebar.info("⚠️ No OpenAI API key found.", icon="ℹ️")
|
307 |
-
entered_token = st.sidebar.text_input("Enter OpenAI API Key", type="password")
|
308 |
-
if entered_token:
|
309 |
-
openai_api_key = entered_token
|
310 |
-
client = openai.OpenAI(api_key=openai_api_key)
|
311 |
-
|
312 |
-
# Model selection dropdown
|
313 |
-
st.sidebar.subheader("Model Selection")
|
314 |
-
model_options = list(AVAILABLE_MODELS.keys())
|
315 |
-
model_descriptions = [f"{model} - {AVAILABLE_MODELS[model]['description']}" for model in model_options]
|
316 |
-
selected_model_index = model_options.index(st.session_state.selected_model) if st.session_state.selected_model in model_options else 0
|
317 |
-
|
318 |
-
selected_description = st.sidebar.selectbox(
|
319 |
-
"Choose a model:",
|
320 |
-
model_descriptions,
|
321 |
-
index=selected_model_index
|
322 |
-
)
|
323 |
-
|
324 |
-
# Extract model name from description
|
325 |
-
selected_model = model_options[model_descriptions.index(selected_description)]
|
326 |
-
if selected_model != st.session_state.selected_model:
|
327 |
-
st.session_state.selected_model = selected_model
|
328 |
-
st.sidebar.info(f"Model set to {selected_model}")
|
329 |
-
|
330 |
-
# Show model details
|
331 |
-
with st.sidebar.expander("Model Details"):
|
332 |
-
model_info = AVAILABLE_MODELS[selected_model]
|
333 |
-
st.write(f"**Description:** {model_info['description']}")
|
334 |
-
st.write(f"**Max tokens:** {model_info['max_tokens']}")
|
335 |
-
st.write(f"**Default temperature:** {model_info['temperature']}")
|
336 |
-
st.write("""
|
337 |
-
**Note:** Some models may not be available with your current API key.
|
338 |
-
If you encounter an error, try selecting a different model.
|
339 |
-
""")
|
340 |
-
|
341 |
-
st.sidebar.markdown("---")
|
342 |
-
|
343 |
-
# Display conversation history
|
344 |
-
for conv_id, conv_data in st.session_state.conversations.items():
|
345 |
-
col1, col2 = st.sidebar.columns([4, 1])
|
346 |
-
is_active = conv_id == st.session_state.current_conversation_id
|
347 |
-
|
348 |
-
with col1:
|
349 |
-
if st.button(
|
350 |
-
conv_data["title"],
|
351 |
-
key=f"conv_{conv_id}",
|
352 |
-
use_container_width=True,
|
353 |
-
type="secondary" if is_active else "tertiary"
|
354 |
-
):
|
355 |
-
st.session_state.current_conversation_id = conv_id
|
356 |
-
st.rerun()
|
357 |
-
|
358 |
-
with col2:
|
359 |
-
if st.button("🗑️", key=f"del_{conv_id}"):
|
360 |
-
delete_conversation(conv_id)
|
361 |
-
st.rerun()
|
362 |
-
|
363 |
-
# Main content area
|
364 |
-
with main_content:
|
365 |
-
st.write("") # Add some space at the top
|
366 |
-
|
367 |
-
# Get current conversation
|
368 |
-
current_id = st.session_state.current_conversation_id
|
369 |
-
current_conv = st.session_state.conversations.get(current_id, {"messages": []})
|
370 |
-
messages = current_conv["messages"]
|
371 |
-
|
372 |
-
# Create a container for the chat area (scrollable)
|
373 |
-
chat_container = st.container()
|
374 |
-
|
375 |
-
# Display chat messages
|
376 |
-
with chat_container:
|
377 |
-
for i, message in enumerate(messages):
|
378 |
-
with st.chat_message(message["role"]):
|
379 |
-
st.markdown(message["content"])
|
380 |
-
|
381 |
-
# Chat input at the bottom
|
382 |
-
prompt = st.chat_input("What's on your mind?")
|
383 |
-
|
384 |
-
if prompt:
|
385 |
-
# Add user message to the current conversation
|
386 |
-
messages.append({"role": "user", "content": prompt})
|
387 |
-
|
388 |
-
# Update conversation title if this is the first message
|
389 |
-
if len(messages) == 1:
|
390 |
-
update_conversation_title(current_id, prompt)
|
391 |
-
|
392 |
-
# Display user message
|
393 |
-
with st.chat_message("user"):
|
394 |
-
st.markdown(prompt)
|
395 |
-
|
396 |
-
# Display assistant response with typing animation
|
397 |
-
with st.chat_message("assistant"):
|
398 |
-
message_placeholder = st.empty()
|
399 |
-
|
400 |
-
# Get response from AI
|
401 |
-
full_response = get_ai_response(prompt, messages[:-1])
|
402 |
-
|
403 |
-
# Simulate typing
|
404 |
-
displayed_response = ""
|
405 |
-
for i in range(len(full_response)):
|
406 |
-
displayed_response += full_response[i]
|
407 |
-
message_placeholder.markdown(displayed_response + "▌")
|
408 |
-
time.sleep(0.005) # Slightly faster typing
|
409 |
-
|
410 |
-
# Display final response
|
411 |
-
message_placeholder.markdown(full_response)
|
412 |
-
|
413 |
-
# Add assistant response to the conversation
|
414 |
-
messages.append({"role": "assistant", "content": full_response})
|
415 |
-
|
416 |
-
# Force a rerun to update the sidebar with the new conversation title
|
417 |
-
st.rerun()
|
|
|
81 |
st.session_state.selected_model = "gpt-3.5-turbo"
|
82 |
|
83 |
# Get OpenAI API key from environment or let user enter it
|
84 |
+
openai_api_key = os.getenv("OPENAI_API_KEY")
|
|
|
|
|
85 |
|
86 |
# Configure OpenAI client
|
87 |
if openai_api_key:
|
88 |
+
openai.api_key = openai_api_key
|
89 |
|
90 |
# Available models with descriptions and token limits
|
91 |
AVAILABLE_MODELS = {
|
|
|
113 |
"output_tokens": 1200,
|
114 |
"temperature": 0.7
|
115 |
},
|
116 |
+
"o3-mini": {
|
117 |
+
"description": "Latest cost-efficient reasoning model",
|
118 |
+
"max_tokens": 200000,
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
"output_tokens": 1000,
|
120 |
"temperature": 0.7
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
}
|
|
|
|
|
122 |
}
|
123 |
|
124 |
# Function to call OpenAI API
|
|
|
157 |
output_tokens = model_config["output_tokens"]
|
158 |
temperature = model_config["temperature"]
|
159 |
|
160 |
+
# Set reasoning effort for o3-mini model
|
161 |
+
reasoning_effort = "medium" # Default reasoning effort
|
162 |
+
if model == "o3-mini":
|
163 |
+
reasoning_effort = st.sidebar.selectbox(
|
164 |
+
"Select reasoning effort:",
|
165 |
+
["low", "medium", "high"],
|
166 |
+
index=1
|
167 |
+
)
|
168 |
+
|
169 |
# Call OpenAI API
|
170 |
+
response = openai.ChatCompletion.create(
|
171 |
model=model,
|
172 |
messages=messages,
|
173 |
temperature=temperature,
|
174 |
max_tokens=output_tokens,
|
175 |
+
reasoning_effort=reasoning_effort if model == "o3-mini" else None,
|
176 |
stream=False
|
177 |
)
|
178 |
|
179 |
# Extract the response
|
180 |
+
return response.choices[0].message["content"]
|
181 |
|
182 |
except Exception as e:
|
183 |
st.error(f"An error occurred: {str(e)}")
|
|
|
208 |
|
209 |
reasoning_examples = [
|
210 |
"This is a demonstration of how I would process a reasoning task. In a real scenario with the full model, I would analyze the problem step by step, consider multiple angles, and provide a detailed explanation.",
|
211 |
+
"When solving problems, I would typically break them down into smaller parts, examine each component, and build
|
212 |
+
::contentReference[oaicite:0]{index=0}
|
213 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|