neuralleap commited on
Commit
077dbb1
·
verified ·
1 Parent(s): f98f7c7

Create app_old.py

Browse files
Files changed (1) hide show
  1. app_old.py +418 -0
app_old.py ADDED
@@ -0,0 +1,418 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import streamlit as st
3
+ import requests
4
+ from dotenv import load_dotenv
5
+ import os
6
+ import sys
7
+ import time
8
+ import random
9
+ import uuid
10
+ from datetime import datetime
11
+ import openai
12
+
13
+ # Load environment variables
14
+ load_dotenv()
15
+
16
+ # Set page config with a wider layout
17
+ st.set_page_config(
18
+ page_title="GPT-Style Chat Assistant",
19
+ page_icon="🤖",
20
+ layout="wide"
21
+ )
22
+
23
+ # Add custom CSS for better styling
24
+ st.markdown("""
25
+ <style>
26
+ .main-content {
27
+ max-width: 800px;
28
+ margin: 0 auto;
29
+ padding: 1rem;
30
+ }
31
+ .chat-message {
32
+ padding: 1.5rem;
33
+ border-radius: 0.5rem;
34
+ margin-bottom: 1rem;
35
+ display: flex;
36
+ flex-direction: column;
37
+ }
38
+ .user-message {
39
+ background-color: #f0f2f6;
40
+ }
41
+ .assistant-message {
42
+ background-color: #e6f3f7;
43
+ }
44
+ .chat-input {
45
+ position: fixed;
46
+ bottom: 0;
47
+ width: 100%;
48
+ padding: 1rem;
49
+ background-color: white;
50
+ }
51
+ .sidebar-conv {
52
+ padding: 0.5rem 1rem;
53
+ border-radius: 0.5rem;
54
+ margin-bottom: 0.5rem;
55
+ cursor: pointer;
56
+ transition: background-color 0.3s;
57
+ }
58
+ .sidebar-conv:hover {
59
+ background-color: #f0f2f6;
60
+ }
61
+ .active-conv {
62
+ background-color: #e6f3f7;
63
+ font-weight: bold;
64
+ }
65
+ </style>
66
+ """, unsafe_allow_html=True)
67
+
68
+ # Initialize session state
69
+ if "conversations" not in st.session_state:
70
+ st.session_state.conversations = {}
71
+
72
+ if "current_conversation_id" not in st.session_state:
73
+ new_id = str(uuid.uuid4())
74
+ st.session_state.current_conversation_id = new_id
75
+ st.session_state.conversations[new_id] = {
76
+ "title": f"New chat {datetime.now().strftime('%H:%M')}",
77
+ "messages": []
78
+ }
79
+
80
+ # Initialize selected model
81
+ if "selected_model" not in st.session_state:
82
+ st.session_state.selected_model = "gpt-3.5-turbo"
83
+
84
+ # Get OpenAI API key from environment or let user enter it
85
+ # openai_api_key = os.getenv("OPENAI_API_KEY")
86
+ # openai_api_key = os.getenv("OPENAI_API_KEY_NEW_1")
87
+ openai_api_key = os.getenv("OPENAI_API_KEY_NEW_3")
88
+
89
+ # Configure OpenAI client
90
+ if openai_api_key:
91
+ client = openai.OpenAI(api_key=openai_api_key)
92
+
93
+ # Available models with descriptions and token limits
94
+ AVAILABLE_MODELS = {
95
+ "gpt-3.5-turbo": {
96
+ "description": "Fast and cost-effective",
97
+ "max_tokens": 4096,
98
+ "output_tokens": 500,
99
+ "temperature": 0.7
100
+ },
101
+ "gpt-4": {
102
+ "description": "More capable but slower",
103
+ "max_tokens": 8192,
104
+ "output_tokens": 800,
105
+ "temperature": 0.7
106
+ },
107
+ "gpt-3.5-turbo-16k": {
108
+ "description": "Longer context window",
109
+ "max_tokens": 16384,
110
+ "output_tokens": 1000,
111
+ "temperature": 0.7
112
+ },
113
+ "gpt-4-turbo": {
114
+ "description": "Most powerful model (if available)",
115
+ "max_tokens": 128000,
116
+ "output_tokens": 1200,
117
+ "temperature": 0.7
118
+ },
119
+ "gpt-4o": {
120
+ "description": "Latest GPT-4 Omni model",
121
+ "max_tokens": 128000,
122
+ "output_tokens": 1200,
123
+ "temperature": 0.7
124
+ },
125
+ "gpt-4o-mini": {
126
+ "description": "Efficient version of GPT-4o",
127
+ "max_tokens": 128000,
128
+ "output_tokens": 1000,
129
+ "temperature": 0.7
130
+ },
131
+
132
+ "o1-mini": {
133
+ "description": "OpenAI Reasoning Model - Mini",
134
+ "max_tokens": 180000,
135
+ "output_tokens": 1000,
136
+ "temperature": 0.7
137
+ },
138
+ "o1": {
139
+ "description": "OpenAI Reasoning Model - Standard",
140
+ "max_tokens": 200000,
141
+ "output_tokens": 1200,
142
+ "temperature": 0.7
143
+ },
144
+ "o1-pro": {
145
+ "description": "OpenAI Reasoning Model - Professional",
146
+ "max_tokens": 200000,
147
+ "output_tokens": 1500,
148
+ "temperature": 0.7
149
+ },
150
+ "o3-mini": {
151
+ "description": "OpenAI Advanced Reasoning - Mini",
152
+ "max_tokens": 200000,
153
+ "output_tokens": 1000,
154
+ "temperature": 0.7
155
+ },
156
+ "o3-mini-2025-01-31": {
157
+ "description": "OpenAI Advanced Reasoning - Enhanced",
158
+ "max_tokens": 200000,
159
+ "output_tokens": 1200,
160
+ "temperature": 0.7
161
+ }
162
+ #"""
163
+
164
+ }
165
+
166
+ # Function to call OpenAI API
167
+ def get_ai_response(prompt, history):
168
+ # Use demo mode if no API key is provided
169
+ if not openai_api_key:
170
+ return get_demo_response(prompt)
171
+
172
+ try:
173
+ # Format messages for API
174
+ messages = []
175
+
176
+ # Add system message
177
+ system_message = "You are a helpful assistant that provides clear, concise, and accurate information."
178
+ messages.append({
179
+ "role": "system",
180
+ "content": system_message
181
+ })
182
+
183
+ # Add conversation history
184
+ for msg in history:
185
+ messages.append({
186
+ "role": msg["role"],
187
+ "content": msg["content"]
188
+ })
189
+
190
+ # Add the current prompt
191
+ messages.append({
192
+ "role": "user",
193
+ "content": prompt
194
+ })
195
+
196
+ # Get model configuration
197
+ model = st.session_state.selected_model
198
+ model_config = AVAILABLE_MODELS.get(model, AVAILABLE_MODELS["gpt-3.5-turbo"])
199
+ output_tokens = model_config["output_tokens"]
200
+ temperature = model_config["temperature"]
201
+
202
+ # Call OpenAI API
203
+ response = client.chat.completions.create(
204
+ model=model,
205
+ messages=messages,
206
+ temperature=temperature,
207
+ max_tokens=output_tokens,
208
+ stream=False
209
+ )
210
+
211
+ # Extract the response
212
+ return response.choices[0].message.content
213
+
214
+ except Exception as e:
215
+ st.error(f"An error occurred: {str(e)}")
216
+ return "I'm sorry, I encountered an error while processing your request. Please check your OpenAI API key or try again later."
217
+
218
+ # Demo mode responses for when no API key is available
219
+ def get_demo_response(prompt):
220
+ prompt_lower = prompt.lower()
221
+
222
+ # Simple response templates
223
+ greetings = [
224
+ "Hello! How can I assist you today?",
225
+ "Hi there! I'm a demo AI assistant. What can I help you with?",
226
+ "Greetings! I'm running in demo mode. Feel free to ask simple questions."
227
+ ]
228
+
229
+ farewells = [
230
+ "Goodbye! Have a great day!",
231
+ "Farewell! Come back soon!",
232
+ "Take care! It was nice chatting with you."
233
+ ]
234
+
235
+ info_responses = [
236
+ "I'm a simple AI assistant running in demo mode. To use the full features, please provide an OpenAI API key.",
237
+ "This is a demo version with limited capabilities. For a better experience, add your OpenAI API key.",
238
+ "I'm just demonstrating basic functionality. Get a free API key from OpenAI to unlock my full potential!"
239
+ ]
240
+
241
+ reasoning_examples = [
242
+ "This is a demonstration of how I would process a reasoning task. In a real scenario with the full model, I would analyze the problem step by step, consider multiple angles, and provide a detailed explanation.",
243
+ "When solving problems, I would typically break them down into smaller parts, examine each component, and build towards a comprehensive solution. This demo just simulates that process.",
244
+ "Reasoning typically involves identifying key facts, applying logical rules, and drawing conclusions based on available information. With a proper API key, I could demonstrate this more effectively."
245
+ ]
246
+
247
+ # Simple pattern matching
248
+ if any(word in prompt_lower for word in ["hello", "hi", "hey", "greetings"]):
249
+ return random.choice(greetings)
250
+ elif any(word in prompt_lower for word in ["bye", "goodbye", "farewell", "see you"]):
251
+ return random.choice(farewells)
252
+ elif any(phrase in prompt_lower for phrase in ["who are you", "what are you", "tell me about yourself", "what can you do"]):
253
+ return random.choice(info_responses)
254
+ elif any(word in prompt_lower for word in ["think", "reason", "analyze", "solve", "explain", "why", "how"]):
255
+ return random.choice(reasoning_examples)
256
+ elif "weather" in prompt_lower:
257
+ return "I'm sorry, I don't have access to real-time weather data in demo mode."
258
+ elif any(word in prompt_lower for word in ["help", "assist", "support"]):
259
+ return "To get better assistance, please add your OpenAI API key. You can get one for free at https://platform.openai.com/account/api-keys."
260
+ else:
261
+ return "I'm running in demo mode with limited responses. For a full conversation experience, please add your OpenAI API key above."
262
+
263
+ # Function to create a new conversation
264
+ def create_new_chat():
265
+ new_id = str(uuid.uuid4())
266
+ st.session_state.current_conversation_id = new_id
267
+ st.session_state.conversations[new_id] = {
268
+ "title": f"New chat {datetime.now().strftime('%H:%M')}",
269
+ "messages": []
270
+ }
271
+
272
+ # Function to update conversation title based on first message
273
+ def update_conversation_title(conv_id, user_message):
274
+ current_title = st.session_state.conversations[conv_id]["title"]
275
+ if current_title.startswith("New chat"):
276
+ # Limit title length to prevent overflow
277
+ new_title = user_message[:30] + "..." if len(user_message) > 30 else user_message
278
+ st.session_state.conversations[conv_id]["title"] = new_title
279
+
280
+ # Function to delete a conversation
281
+ def delete_conversation(conv_id):
282
+ if conv_id in st.session_state.conversations:
283
+ del st.session_state.conversations[conv_id]
284
+ # If we deleted the current conversation, set a new one
285
+ if conv_id == st.session_state.current_conversation_id:
286
+ if st.session_state.conversations:
287
+ st.session_state.current_conversation_id = next(iter(st.session_state.conversations))
288
+ else:
289
+ create_new_chat()
290
+
291
+ # Create a two-column layout
292
+ sidebar, main_content = st.columns([1, 3])
293
+
294
+ # Sidebar (conversation history)
295
+ with sidebar:
296
+ st.sidebar.title("Conversations")
297
+
298
+ # Add a new chat button
299
+ if st.sidebar.button("+ New Chat", use_container_width=True):
300
+ create_new_chat()
301
+ st.rerun()
302
+
303
+ st.sidebar.markdown("---")
304
+
305
+ # API token input in sidebar if not available
306
+ if not openai_api_key:
307
+ st.sidebar.info("⚠️ No OpenAI API key found.", icon="ℹ️")
308
+ entered_token = st.sidebar.text_input("Enter OpenAI API Key", type="password")
309
+ if entered_token:
310
+ openai_api_key = entered_token
311
+ client = openai.OpenAI(api_key=openai_api_key)
312
+
313
+ # Model selection dropdown
314
+ st.sidebar.subheader("Model Selection")
315
+ model_options = list(AVAILABLE_MODELS.keys())
316
+ model_descriptions = [f"{model} - {AVAILABLE_MODELS[model]['description']}" for model in model_options]
317
+ selected_model_index = model_options.index(st.session_state.selected_model) if st.session_state.selected_model in model_options else 0
318
+
319
+ selected_description = st.sidebar.selectbox(
320
+ "Choose a model:",
321
+ model_descriptions,
322
+ index=selected_model_index
323
+ )
324
+
325
+ # Extract model name from description
326
+ selected_model = model_options[model_descriptions.index(selected_description)]
327
+ if selected_model != st.session_state.selected_model:
328
+ st.session_state.selected_model = selected_model
329
+ st.sidebar.info(f"Model set to {selected_model}")
330
+
331
+ # Show model details
332
+ with st.sidebar.expander("Model Details"):
333
+ model_info = AVAILABLE_MODELS[selected_model]
334
+ st.write(f"**Description:** {model_info['description']}")
335
+ st.write(f"**Max tokens:** {model_info['max_tokens']}")
336
+ st.write(f"**Default temperature:** {model_info['temperature']}")
337
+ st.write("""
338
+ **Note:** Some models may not be available with your current API key.
339
+ If you encounter an error, try selecting a different model.
340
+ """)
341
+
342
+ st.sidebar.markdown("---")
343
+
344
+ # Display conversation history
345
+ for conv_id, conv_data in st.session_state.conversations.items():
346
+ col1, col2 = st.sidebar.columns([4, 1])
347
+ is_active = conv_id == st.session_state.current_conversation_id
348
+
349
+ with col1:
350
+ if st.button(
351
+ conv_data["title"],
352
+ key=f"conv_{conv_id}",
353
+ use_container_width=True,
354
+ type="secondary" if is_active else "tertiary"
355
+ ):
356
+ st.session_state.current_conversation_id = conv_id
357
+ st.rerun()
358
+
359
+ with col2:
360
+ if st.button("🗑️", key=f"del_{conv_id}"):
361
+ delete_conversation(conv_id)
362
+ st.rerun()
363
+
364
+ # Main content area
365
+ with main_content:
366
+ st.write("") # Add some space at the top
367
+
368
+ # Get current conversation
369
+ current_id = st.session_state.current_conversation_id
370
+ current_conv = st.session_state.conversations.get(current_id, {"messages": []})
371
+ messages = current_conv["messages"]
372
+
373
+ # Create a container for the chat area (scrollable)
374
+ chat_container = st.container()
375
+
376
+ # Display chat messages
377
+ with chat_container:
378
+ for i, message in enumerate(messages):
379
+ with st.chat_message(message["role"]):
380
+ st.markdown(message["content"])
381
+
382
+ # Chat input at the bottom
383
+ prompt = st.chat_input("What's on your mind?")
384
+
385
+ if prompt:
386
+ # Add user message to the current conversation
387
+ messages.append({"role": "user", "content": prompt})
388
+
389
+ # Update conversation title if this is the first message
390
+ if len(messages) == 1:
391
+ update_conversation_title(current_id, prompt)
392
+
393
+ # Display user message
394
+ with st.chat_message("user"):
395
+ st.markdown(prompt)
396
+
397
+ # Display assistant response with typing animation
398
+ with st.chat_message("assistant"):
399
+ message_placeholder = st.empty()
400
+
401
+ # Get response from AI
402
+ full_response = get_ai_response(prompt, messages[:-1])
403
+
404
+ # Simulate typing
405
+ displayed_response = ""
406
+ for i in range(len(full_response)):
407
+ displayed_response += full_response[i]
408
+ message_placeholder.markdown(displayed_response + "▌")
409
+ time.sleep(0.005) # Slightly faster typing
410
+
411
+ # Display final response
412
+ message_placeholder.markdown(full_response)
413
+
414
+ # Add assistant response to the conversation
415
+ messages.append({"role": "assistant", "content": full_response})
416
+
417
+ # Force a rerun to update the sidebar with the new conversation title
418
+ st.rerun()