neuralleap commited on
Commit
eda487c
·
verified ·
1 Parent(s): 4324921

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +212 -53
app.py CHANGED
@@ -81,11 +81,21 @@ if "selected_model" not in st.session_state:
81
  st.session_state.selected_model = "gpt-3.5-turbo"
82
 
83
  # Get OpenAI API key from environment or let user enter it
84
- openai_api_key = os.getenv("OPENAI_API_KEY")
 
 
85
 
86
  # Configure OpenAI client
87
  if openai_api_key:
88
- openai.api_key = openai_api_key
 
 
 
 
 
 
 
 
89
 
90
  # Available models with descriptions and token limits
91
  AVAILABLE_MODELS = {
@@ -113,101 +123,250 @@ AVAILABLE_MODELS = {
113
  "output_tokens": 1200,
114
  "temperature": 0.7
115
  },
116
- "o3-mini": {
117
- "description": "Latest cost-efficient reasoning model",
118
- "max_tokens": 200000,
 
 
 
 
 
 
119
  "output_tokens": 1000,
120
  "temperature": 0.7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  }
122
  }
123
 
124
  # Function to call OpenAI API
125
  def get_ai_response(prompt, history):
126
- # Use demo mode if no API key is provided
127
  if not openai_api_key:
128
  return get_demo_response(prompt)
129
 
130
  try:
131
- # Format messages for API
132
- messages = []
133
-
134
- # Add system message
135
- system_message = "You are a helpful assistant that provides clear, concise, and accurate information."
136
- messages.append({
137
- "role": "system",
138
- "content": system_message
139
- })
140
-
141
- # Add conversation history
142
  for msg in history:
143
- messages.append({
144
- "role": msg["role"],
145
- "content": msg["content"]
146
- })
147
 
148
- # Add the current prompt
149
- messages.append({
150
- "role": "user",
151
- "content": prompt
152
- })
153
-
154
- # Get model configuration
155
  model = st.session_state.selected_model
156
  model_config = AVAILABLE_MODELS.get(model, AVAILABLE_MODELS["gpt-3.5-turbo"])
157
  output_tokens = model_config["output_tokens"]
158
  temperature = model_config["temperature"]
159
 
160
- # Set reasoning effort for o3-mini model
161
- reasoning_effort = "medium" # Default reasoning effort
162
- if model == "o3-mini":
163
- reasoning_effort = st.sidebar.selectbox(
164
- "Select reasoning effort:",
165
- ["low", "medium", "high"],
166
- index=1
167
- )
168
-
169
- # Call OpenAI API
170
- response = openai.ChatCompletion.create(
171
  model=model,
172
  messages=messages,
173
  temperature=temperature,
174
  max_tokens=output_tokens,
175
- reasoning_effort=reasoning_effort if model == "o3-mini" else None,
176
  stream=False
177
  )
 
178
 
179
- # Extract the response
180
- return response.choices[0].message["content"]
181
-
 
182
  except Exception as e:
183
- st.error(f"An error occurred: {str(e)}")
184
- return "I'm sorry, I encountered an error while processing your request. Please check your OpenAI API key or try again later."
185
 
186
  # Demo mode responses for when no API key is available
187
  def get_demo_response(prompt):
188
  prompt_lower = prompt.lower()
189
-
190
- # Simple response templates
191
  greetings = [
192
  "Hello! How can I assist you today?",
193
  "Hi there! I'm a demo AI assistant. What can I help you with?",
194
  "Greetings! I'm running in demo mode. Feel free to ask simple questions."
195
  ]
196
-
197
  farewells = [
198
  "Goodbye! Have a great day!",
199
  "Farewell! Come back soon!",
200
  "Take care! It was nice chatting with you."
201
  ]
202
-
203
  info_responses = [
204
  "I'm a simple AI assistant running in demo mode. To use the full features, please provide an OpenAI API key.",
205
  "This is a demo version with limited capabilities. For a better experience, add your OpenAI API key.",
206
  "I'm just demonstrating basic functionality. Get a free API key from OpenAI to unlock my full potential!"
207
  ]
208
-
209
  reasoning_examples = [
210
  "This is a demonstration of how I would process a reasoning task. In a real scenario with the full model, I would analyze the problem step by step, consider multiple angles, and provide a detailed explanation.",
211
- "When solving problems, I would typically break them down into smaller parts, examine each component, and build
212
- ::contentReference[oaicite:0]{index=0}
213
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  st.session_state.selected_model = "gpt-3.5-turbo"
82
 
83
  # Get OpenAI API key from environment or let user enter it
84
+ openai_api_key = os.getenv("OPENAI_API_KEY_NEW_3")
85
+ # For debugging in Hugging Face Spaces, you can temporarily uncomment this:
86
+ # openai_api_key = "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"
87
 
88
  # Configure OpenAI client
89
  if openai_api_key:
90
+ client = openai.OpenAI(api_key=openai_api_key)
91
+ # Optional: Check available models
92
+ try:
93
+ available_models = client.models.list()
94
+ model_ids = [model.id for model in available_models.data]
95
+ if 'o3-mini-2025-01-31' not in model_ids:
96
+ st.sidebar.warning("The 'o3-mini-2025-01-31' model is not available with this API key.")
97
+ except Exception as e:
98
+ st.sidebar.error(f"Could not fetch available models: {str(e)}")
99
 
100
  # Available models with descriptions and token limits
101
  AVAILABLE_MODELS = {
 
123
  "output_tokens": 1200,
124
  "temperature": 0.7
125
  },
126
+ "gpt-4o": {
127
+ "description": "Latest GPT-4 Omni model",
128
+ "max_tokens": 128000,
129
+ "output_tokens": 1200,
130
+ "temperature": 0.7
131
+ },
132
+ "gpt-4o-mini": {
133
+ "description": "Efficient version of GPT-4o",
134
+ "max_tokens": 128000,
135
  "output_tokens": 1000,
136
  "temperature": 0.7
137
+ },
138
+ "o1-mini": {
139
+ "description": "OpenAI Reasoning Model - Mini",
140
+ "max_tokens": 180000,
141
+ "output_tokens": 1000,
142
+ "temperature": 0.7
143
+ },
144
+ "o1": {
145
+ "description": "OpenAI Reasoning Model - Standard",
146
+ "max_tokens": 200000,
147
+ "output_tokens": 1200,
148
+ "temperature": 0.7
149
+ },
150
+ "o1-pro": {
151
+ "description": "OpenAI Reasoning Model - Professional",
152
+ "max_tokens": 200000,
153
+ "output_tokens": 1500,
154
+ "temperature": 0.7
155
+ },
156
+ "o3-mini": {
157
+ "description": "OpenAI Advanced Reasoning - Mini",
158
+ "max_tokens": 200000,
159
+ "output_tokens": 1000,
160
+ "temperature": 0.7
161
+ },
162
+ "o3-mini-2025-01-31": {
163
+ "description": "OpenAI Advanced Reasoning - Enhanced",
164
+ "max_tokens": 200000,
165
+ "output_tokens": 1200,
166
+ "temperature": 0.7
167
  }
168
  }
169
 
170
  # Function to call OpenAI API
171
  def get_ai_response(prompt, history):
 
172
  if not openai_api_key:
173
  return get_demo_response(prompt)
174
 
175
  try:
176
+ messages = [
177
+ {"role": "system", "content": "You are a helpful assistant that provides clear, concise, and accurate information."}
178
+ ]
 
 
 
 
 
 
 
 
179
  for msg in history:
180
+ messages.append({"role": msg["role"], "content": msg["content"]})
181
+ messages.append({"role": "user", "content": prompt})
 
 
182
 
 
 
 
 
 
 
 
183
  model = st.session_state.selected_model
184
  model_config = AVAILABLE_MODELS.get(model, AVAILABLE_MODELS["gpt-3.5-turbo"])
185
  output_tokens = model_config["output_tokens"]
186
  temperature = model_config["temperature"]
187
 
188
+ response = client.chat.completions.create(
 
 
 
 
 
 
 
 
 
 
189
  model=model,
190
  messages=messages,
191
  temperature=temperature,
192
  max_tokens=output_tokens,
 
193
  stream=False
194
  )
195
+ return response.choices[0].message.content
196
 
197
+ except openai.error.AuthenticationError as auth_err:
198
+ return f"Authentication error: {str(auth_err)}. Check your API key."
199
+ except openai.error.InvalidRequestError as inv_err:
200
+ return f"Invalid request: {str(inv_err)}. The model '{model}' might not be available with your API key."
201
  except Exception as e:
202
+ return f"An error occurred: {str(e)}. Please try a different model or check your setup."
 
203
 
204
  # Demo mode responses for when no API key is available
205
  def get_demo_response(prompt):
206
  prompt_lower = prompt.lower()
 
 
207
  greetings = [
208
  "Hello! How can I assist you today?",
209
  "Hi there! I'm a demo AI assistant. What can I help you with?",
210
  "Greetings! I'm running in demo mode. Feel free to ask simple questions."
211
  ]
 
212
  farewells = [
213
  "Goodbye! Have a great day!",
214
  "Farewell! Come back soon!",
215
  "Take care! It was nice chatting with you."
216
  ]
 
217
  info_responses = [
218
  "I'm a simple AI assistant running in demo mode. To use the full features, please provide an OpenAI API key.",
219
  "This is a demo version with limited capabilities. For a better experience, add your OpenAI API key.",
220
  "I'm just demonstrating basic functionality. Get a free API key from OpenAI to unlock my full potential!"
221
  ]
 
222
  reasoning_examples = [
223
  "This is a demonstration of how I would process a reasoning task. In a real scenario with the full model, I would analyze the problem step by step, consider multiple angles, and provide a detailed explanation.",
224
+ "When solving problems, I would typically break them down into smaller parts, examine each component, and build towards a comprehensive solution. This demo just simulates that process.",
225
+ "Reasoning typically involves identifying key facts, applying logical rules, and drawing conclusions based on available information. With a proper API key, I could demonstrate this more effectively."
226
+ ]
227
+
228
+ if any(word in prompt_lower for word in ["hello", "hi", "hey", "greetings"]):
229
+ return random.choice(greetings)
230
+ elif any(word in prompt_lower for word in ["bye", "goodbye", "farewell", "see you"]):
231
+ return random.choice(farewells)
232
+ elif any(phrase in prompt_lower for phrase in ["who are you", "what are you", "tell me about yourself", "what can you do"]):
233
+ return random.choice(info_responses)
234
+ elif any(word in prompt_lower for word in ["think", "reason", "analyze", "solve", "explain", "why", "how"]):
235
+ return random.choice(reasoning_examples)
236
+ elif "weather" in prompt_lower:
237
+ return "I'm sorry, I don't have access to real-time weather data in demo mode."
238
+ elif any(word in prompt_lower for word in ["help", "assist", "support"]):
239
+ return "To get better assistance, please add your OpenAI API key. You can get one for free at https://platform.openai.com/account/api-keys."
240
+ else:
241
+ return "I'm running in demo mode with limited responses. For a full conversation experience, please add your OpenAI API key above."
242
+
243
+ # Function to create a new conversation
244
+ def create_new_chat():
245
+ new_id = str(uuid.uuid4())
246
+ st.session_state.current_conversation_id = new_id
247
+ st.session_state.conversations[new_id] = {
248
+ "title": f"New chat {datetime.now().strftime('%H:%M')}",
249
+ "messages": []
250
+ }
251
+
252
+ # Function to update conversation title based on first message
253
+ def update_conversation_title(conv_id, user_message):
254
+ current_title = st.session_state.conversations[conv_id]["title"]
255
+ if current_title.startswith("New chat"):
256
+ new_title = user_message[:30] + "..." if len(user_message) > 30 else user_message
257
+ st.session_state.conversations[conv_id]["title"] = new_title
258
+
259
+ # Function to delete a conversation
260
+ def delete_conversation(conv_id):
261
+ if conv_id in st.session_state.conversations:
262
+ del st.session_state.conversations[conv_id]
263
+ if conv_id == st.session_state.current_conversation_id:
264
+ if st.session_state.conversations:
265
+ st.session_state.current_conversation_id = next(iter(st.session_state.conversations))
266
+ else:
267
+ create_new_chat()
268
+
269
+ # Create a two-column layout
270
+ sidebar, main_content = st.columns([1, 3])
271
+
272
+ # Sidebar (conversation history)
273
+ with sidebar:
274
+ st.sidebar.title("Conversations")
275
+
276
+ if st.sidebar.button("+ New Chat", use_container_width=True):
277
+ create_new_chat()
278
+ st.rerun()
279
+
280
+ st.sidebar.markdown("---")
281
+
282
+ if not openai_api_key:
283
+ st.sidebar.info("⚠️ No OpenAI API key found.", icon="ℹ️")
284
+ entered_token = st.sidebar.text_input("Enter OpenAI API Key", type="password")
285
+ if entered_token:
286
+ openai_api_key = entered_token
287
+ client = openai.OpenAI(api_key=openai_api_key)
288
+
289
+ st.sidebar.subheader("Model Selection")
290
+ model_options = list(AVAILABLE_MODELS.keys())
291
+ model_descriptions = [f"{model} - {AVAILABLE_MODELS[model]['description']}" for model in model_options]
292
+ selected_model_index = model_options.index(st.session_state.selected_model) if st.session_state.selected_model in model_options else 0
293
+
294
+ selected_description = st.sidebar.selectbox(
295
+ "Choose a model:",
296
+ model_descriptions,
297
+ index=selected_model_index
298
+ )
299
+
300
+ selected_model = model_options[model_descriptions.index(selected_description)]
301
+ if selected_model != st.session_state.selected_model:
302
+ st.session_state.selected_model = selected_model
303
+ st.sidebar.info(f"Model set to {selected_model}")
304
+
305
+ with st.sidebar.expander("Model Details"):
306
+ model_info = AVAILABLE_MODELS[selected_model]
307
+ st.write(f"**Description:** {model_info['description']}")
308
+ st.write(f"**Max tokens:** {model_info['max_tokens']}")
309
+ st.write(f"**Default temperature:** {model_info['temperature']}")
310
+ st.write("""
311
+ **Note:** Some models may not be available with your current API key.
312
+ If you encounter an error, try selecting a different model.
313
+ """)
314
+
315
+ st.sidebar.markdown("---")
316
+
317
+ for conv_id, conv_data in st.session_state.conversations.items():
318
+ col1, col2 = st.sidebar.columns([4, 1])
319
+ is_active = conv_id == st.session_state.current_conversation_id
320
+
321
+ with col1:
322
+ if st.button(
323
+ conv_data["title"],
324
+ key=f"conv_{conv_id}",
325
+ use_container_width=True,
326
+ type="secondary" if is_active else "tertiary"
327
+ ):
328
+ st.session_state.current_conversation_id = conv_id
329
+ st.rerun()
330
+
331
+ with col2:
332
+ if st.button("🗑️", key=f"del_{conv_id}"):
333
+ delete_conversation(conv_id)
334
+ st.rerun()
335
+
336
+ # Main content area
337
+ with main_content:
338
+ st.write("") # Add some space at the top
339
+
340
+ current_id = st.session_state.current_conversation_id
341
+ current_conv = st.session_state.conversations.get(current_id, {"messages": []})
342
+ messages = current_conv["messages"]
343
+
344
+ chat_container = st.container()
345
+
346
+ with chat_container:
347
+ for i, message in enumerate(messages):
348
+ with st.chat_message(message["role"]):
349
+ st.markdown(message["content"])
350
+
351
+ prompt = st.chat_input("What's on your mind?")
352
+
353
+ if prompt:
354
+ messages.append({"role": "user", "content": prompt})
355
+
356
+ if len(messages) == 1:
357
+ update_conversation_title(current_id, prompt)
358
+
359
+ with st.chat_message("user"):
360
+ st.markdown(prompt)
361
+
362
+ with st.chat_message("assistant"):
363
+ message_placeholder = st.empty()
364
+ full_response = get_ai_response(prompt, messages[:-1])
365
+ displayed_response = ""
366
+ for i in range(len(full_response)):
367
+ displayed_response += full_response[i]
368
+ message_placeholder.markdown(displayed_response + "▌")
369
+ time.sleep(0.005)
370
+ message_placeholder.markdown(full_response)
371
+ messages.append({"role": "assistant", "content": full_response})
372
+ st.rerun()