neuralleap commited on
Commit
37d11e8
·
verified ·
1 Parent(s): 0cf126f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -64
app.py CHANGED
@@ -11,7 +11,7 @@ import random
11
  # Load environment variables
12
  load_dotenv()
13
 
14
- # Set page config with a wider layout
15
  st.set_page_config(
16
  page_title="GPT-Style Chat Assistant",
17
  page_icon="🤖",
@@ -21,7 +21,7 @@ st.set_page_config(
21
  # Initialize session state
22
  if "conversations" not in st.session_state:
23
  st.session_state.conversations = {}
24
-
25
  if "current_conversation_id" not in st.session_state:
26
  new_id = str(uuid.uuid4())
27
  st.session_state.current_conversation_id = new_id
@@ -30,14 +30,11 @@ if "current_conversation_id" not in st.session_state:
30
  "messages": []
31
  }
32
 
33
- # Initialize selected model
34
  if "selected_model" not in st.session_state:
35
  st.session_state.selected_model = "gpt-3.5-turbo"
36
 
37
- # Get OpenAI API key from environment or let user enter it
38
  openai_api_key = os.getenv("OPENAI_API_KEY_NEW_3")
39
-
40
- # Configure OpenAI client
41
  if openai_api_key:
42
  openai.api_key = openai_api_key
43
 
@@ -49,11 +46,65 @@ AVAILABLE_MODELS = {
49
  "output_tokens": 500,
50
  "temperature": 0.7
51
  },
 
 
 
 
 
 
52
  "gpt-4": {
53
  "description": "More capable but slower",
54
  "max_tokens": 8192,
55
  "output_tokens": 800,
56
  "temperature": 0.7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  }
58
  }
59
 
@@ -64,7 +115,7 @@ def get_ai_response(prompt, history):
64
 
65
  try:
66
  messages = [
67
- {"role": "system", "content": "You are a helpful assistant that provides clear, concise, and accurate information."}
68
  ]
69
  for msg in history:
70
  messages.append({"role": msg["role"], "content": msg["content"]})
@@ -72,18 +123,16 @@ def get_ai_response(prompt, history):
72
 
73
  model = st.session_state.selected_model
74
  model_config = AVAILABLE_MODELS.get(model, AVAILABLE_MODELS["gpt-3.5-turbo"])
75
- output_tokens = model_config["output_tokens"]
76
- temperature = model_config["temperature"]
77
 
78
  response = openai.ChatCompletion.create(
79
  model=model,
80
  messages=messages,
81
- temperature=temperature,
82
- max_tokens=output_tokens,
83
  stream=False
84
  )
85
  return response['choices'][0]['message']['content']
86
-
87
  except openai.error.AuthenticationError as auth_err:
88
  return f"Authentication error: {str(auth_err)}. Check your API key."
89
  except openai.error.InvalidRequestError as inv_err:
@@ -97,18 +146,6 @@ def get_ai_response(prompt, history):
97
  except Exception as e:
98
  return f"An unexpected error occurred: {str(e)}."
99
 
100
- # Demo mode responses for when no API key is available
101
- def get_demo_response(prompt):
102
- greetings = ["Hello! How can I assist you today?", "Hi there! What can I help you with?"]
103
- farewells = ["Goodbye! Have a great day!", "Take care! It was nice chatting with you."]
104
-
105
- if any(word in prompt.lower() for word in ["hello", "hi", "hey"]):
106
- return random.choice(greetings)
107
- elif any(word in prompt.lower() for word in ["bye", "goodbye", "farewell"]):
108
- return random.choice(farewells)
109
- else:
110
- return "I'm running in demo mode. Provide an OpenAI API key for full functionality."
111
-
112
  # Function to create a new conversation
113
  def create_new_chat():
114
  new_id = str(uuid.uuid4())
@@ -118,64 +155,43 @@ def create_new_chat():
118
  "messages": []
119
  }
120
 
121
- # Create a two-column layout
122
- sidebar, main_content = st.columns([1, 3])
123
-
124
- # Sidebar (conversation history)
125
- with sidebar:
126
- st.sidebar.title("Conversations")
127
-
128
- if st.sidebar.button("+ New Chat", use_container_width=True):
129
  create_new_chat()
130
  st.rerun()
131
-
132
- st.sidebar.markdown("---")
133
-
134
- if not openai_api_key:
135
- st.sidebar.info("⚠️ No OpenAI API key found.", icon="ℹ️")
136
- entered_token = st.sidebar.text_input("Enter OpenAI API Key", type="password")
137
- if entered_token:
138
- openai_api_key = entered_token
139
- openai.api_key = openai_api_key
140
-
141
- st.sidebar.subheader("Model Selection")
142
- model_options = list(AVAILABLE_MODELS.keys())
143
- selected_model = st.sidebar.selectbox("Choose a model:", model_options)
144
  st.session_state.selected_model = selected_model
145
 
146
- st.sidebar.markdown("---")
147
 
148
- # Main content area
149
- with main_content:
150
- st.write("") # Add some space at the top
151
-
152
  current_id = st.session_state.current_conversation_id
153
  current_conv = st.session_state.conversations.get(current_id, {"messages": []})
154
  messages = current_conv["messages"]
155
 
156
- chat_container = st.container()
157
-
158
- with chat_container:
159
- for message in messages:
160
- with st.chat_message(message["role"]):
161
- st.markdown(message["content"])
162
 
163
  prompt = st.chat_input("What's on your mind?")
164
 
165
  if prompt:
166
  messages.append({"role": "user", "content": prompt})
167
-
168
  with st.chat_message("user"):
169
  st.markdown(prompt)
170
 
171
  with st.chat_message("assistant"):
172
- message_placeholder = st.empty()
173
  full_response = get_ai_response(prompt, messages[:-1])
174
- displayed_response = ""
175
- for i in range(len(full_response)):
176
- displayed_response += full_response[i]
177
- message_placeholder.markdown(displayed_response + "▌")
178
- time.sleep(0.005)
179
- message_placeholder.markdown(full_response)
180
  messages.append({"role": "assistant", "content": full_response})
181
  st.rerun()
 
11
  # Load environment variables
12
  load_dotenv()
13
 
14
+ # Set page config
15
  st.set_page_config(
16
  page_title="GPT-Style Chat Assistant",
17
  page_icon="🤖",
 
21
  # Initialize session state
22
  if "conversations" not in st.session_state:
23
  st.session_state.conversations = {}
24
+
25
  if "current_conversation_id" not in st.session_state:
26
  new_id = str(uuid.uuid4())
27
  st.session_state.current_conversation_id = new_id
 
30
  "messages": []
31
  }
32
 
 
33
  if "selected_model" not in st.session_state:
34
  st.session_state.selected_model = "gpt-3.5-turbo"
35
 
36
+ # Get OpenAI API key from environment or user input
37
  openai_api_key = os.getenv("OPENAI_API_KEY_NEW_3")
 
 
38
  if openai_api_key:
39
  openai.api_key = openai_api_key
40
 
 
46
  "output_tokens": 500,
47
  "temperature": 0.7
48
  },
49
+ "gpt-3.5-turbo-16k": {
50
+ "description": "Longer context window",
51
+ "max_tokens": 16384,
52
+ "output_tokens": 1000,
53
+ "temperature": 0.7
54
+ },
55
  "gpt-4": {
56
  "description": "More capable but slower",
57
  "max_tokens": 8192,
58
  "output_tokens": 800,
59
  "temperature": 0.7
60
+ },
61
+ "gpt-4-turbo": {
62
+ "description": "Most powerful model (if available)",
63
+ "max_tokens": 128000,
64
+ "output_tokens": 1200,
65
+ "temperature": 0.7
66
+ },
67
+ "gpt-4o": {
68
+ "description": "Latest GPT-4 Omni model",
69
+ "max_tokens": 128000,
70
+ "output_tokens": 1200,
71
+ "temperature": 0.7
72
+ },
73
+ "gpt-4o-mini": {
74
+ "description": "Efficient version of GPT-4o",
75
+ "max_tokens": 128000,
76
+ "output_tokens": 1000,
77
+ "temperature": 0.7
78
+ },
79
+ "o1-mini": {
80
+ "description": "OpenAI Reasoning Model - Mini",
81
+ "max_tokens": 180000,
82
+ "output_tokens": 1000,
83
+ "temperature": 0.7
84
+ },
85
+ "o1": {
86
+ "description": "OpenAI Reasoning Model - Standard",
87
+ "max_tokens": 200000,
88
+ "output_tokens": 1200,
89
+ "temperature": 0.7
90
+ },
91
+ "o1-pro": {
92
+ "description": "OpenAI Reasoning Model - Professional",
93
+ "max_tokens": 200000,
94
+ "output_tokens": 1500,
95
+ "temperature": 0.7
96
+ },
97
+ "o3-mini": {
98
+ "description": "OpenAI Advanced Reasoning - Mini",
99
+ "max_tokens": 200000,
100
+ "output_tokens": 1000,
101
+ "temperature": 0.7
102
+ },
103
+ "o3-mini-2025-01-31": {
104
+ "description": "OpenAI Advanced Reasoning - Enhanced",
105
+ "max_tokens": 200000,
106
+ "output_tokens": 1200,
107
+ "temperature": 0.7
108
  }
109
  }
110
 
 
115
 
116
  try:
117
  messages = [
118
+ {"role": "system", "content": "You are a helpful assistant providing clear, concise, and accurate information."}
119
  ]
120
  for msg in history:
121
  messages.append({"role": msg["role"], "content": msg["content"]})
 
123
 
124
  model = st.session_state.selected_model
125
  model_config = AVAILABLE_MODELS.get(model, AVAILABLE_MODELS["gpt-3.5-turbo"])
 
 
126
 
127
  response = openai.ChatCompletion.create(
128
  model=model,
129
  messages=messages,
130
+ temperature=model_config["temperature"],
131
+ max_tokens=model_config["output_tokens"],
132
  stream=False
133
  )
134
  return response['choices'][0]['message']['content']
135
+
136
  except openai.error.AuthenticationError as auth_err:
137
  return f"Authentication error: {str(auth_err)}. Check your API key."
138
  except openai.error.InvalidRequestError as inv_err:
 
146
  except Exception as e:
147
  return f"An unexpected error occurred: {str(e)}."
148
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  # Function to create a new conversation
150
  def create_new_chat():
151
  new_id = str(uuid.uuid4())
 
155
  "messages": []
156
  }
157
 
158
+ # Sidebar for model selection and conversation management
159
+ with st.sidebar:
160
+ st.title("Conversations")
161
+ if st.button("+ New Chat"):
 
 
 
 
162
  create_new_chat()
163
  st.rerun()
164
+
165
+ st.markdown("---")
166
+ st.subheader("Model Selection")
167
+ selected_model = st.selectbox(
168
+ "Choose a model:",
169
+ list(AVAILABLE_MODELS.keys()),
170
+ index=list(AVAILABLE_MODELS.keys()).index(st.session_state.selected_model)
171
+ )
 
 
 
 
 
172
  st.session_state.selected_model = selected_model
173
 
174
+ st.markdown("---")
175
 
176
+ # Main chat window
177
+ with st.container():
 
 
178
  current_id = st.session_state.current_conversation_id
179
  current_conv = st.session_state.conversations.get(current_id, {"messages": []})
180
  messages = current_conv["messages"]
181
 
182
+ for message in messages:
183
+ with st.chat_message(message["role"]):
184
+ st.markdown(message["content"])
 
 
 
185
 
186
  prompt = st.chat_input("What's on your mind?")
187
 
188
  if prompt:
189
  messages.append({"role": "user", "content": prompt})
 
190
  with st.chat_message("user"):
191
  st.markdown(prompt)
192
 
193
  with st.chat_message("assistant"):
 
194
  full_response = get_ai_response(prompt, messages[:-1])
195
+ st.markdown(full_response)
 
 
 
 
 
196
  messages.append({"role": "assistant", "content": full_response})
197
  st.rerun()