add system prompt
Browse files
app.py
CHANGED
@@ -30,7 +30,11 @@ if "current_conversation_id" not in st.session_state:
|
|
30 |
}
|
31 |
|
32 |
if "selected_model" not in st.session_state:
|
33 |
-
st.session_state.selected_model = "gpt-
|
|
|
|
|
|
|
|
|
34 |
|
35 |
# Get OpenAI API key from environment or user input
|
36 |
openai_api_key = os.getenv("OPENAI_API_KEY_NEW_3")
|
@@ -44,75 +48,32 @@ if openai_api_key:
|
|
44 |
|
45 |
# Available models with descriptions and token limits
|
46 |
AVAILABLE_MODELS = {
|
47 |
-
"gpt-3.5-turbo": {
|
48 |
-
"description": "Fast and cost-effective",
|
49 |
-
"max_tokens": 4096,
|
50 |
-
"output_tokens": 500,
|
51 |
-
"temperature": 0.7
|
52 |
-
},
|
53 |
-
"gpt-3.5-turbo-16k": {
|
54 |
-
"description": "Longer context window",
|
55 |
-
"max_tokens": 16384,
|
56 |
-
"output_tokens": 1000,
|
57 |
-
"temperature": 0.7
|
58 |
-
},
|
59 |
-
"gpt-4": {
|
60 |
-
"description": "More capable but slower",
|
61 |
-
"max_tokens": 8192,
|
62 |
-
"output_tokens": 800,
|
63 |
-
"temperature": 0.7
|
64 |
-
},
|
65 |
-
"gpt-4-turbo": {
|
66 |
-
"description": "Most powerful model (if available)",
|
67 |
-
"max_tokens": 128000,
|
68 |
-
"output_tokens": 1200,
|
69 |
-
"temperature": 0.7
|
70 |
-
},
|
71 |
"gpt-4o": {
|
72 |
"description": "Latest GPT-4 Omni model",
|
73 |
"max_tokens": 128000,
|
74 |
"output_tokens": 1200,
|
75 |
-
"temperature":
|
76 |
-
},
|
77 |
-
"gpt-4o-mini": {
|
78 |
-
"description": "Efficient version of GPT-4o",
|
79 |
-
"max_tokens": 128000,
|
80 |
-
"output_tokens": 1000,
|
81 |
-
"temperature": 0.7
|
82 |
-
},
|
83 |
-
"o1-mini": {
|
84 |
-
"description": "OpenAI Reasoning Model - Mini",
|
85 |
-
"max_tokens": 180000,
|
86 |
-
"output_tokens": 1000,
|
87 |
-
"temperature": 0.7
|
88 |
},
|
89 |
"o1": {
|
90 |
"description": "OpenAI Reasoning Model - Standard",
|
91 |
"max_tokens": 200000,
|
92 |
"output_tokens": 1200,
|
93 |
-
"temperature":
|
94 |
-
},
|
95 |
-
"o1-pro": {
|
96 |
-
"description": "OpenAI Reasoning Model - Professional",
|
97 |
-
"max_tokens": 200000,
|
98 |
-
"output_tokens": 1500,
|
99 |
-
"temperature": 0.7
|
100 |
},
|
101 |
"o3-mini": {
|
102 |
"description": "OpenAI Advanced Reasoning - Mini",
|
103 |
"max_tokens": 200000,
|
104 |
"output_tokens": 1000,
|
105 |
-
"temperature":
|
106 |
-
},
|
107 |
-
"o3-mini-2025-01-31": {
|
108 |
-
"description": "OpenAI Advanced Reasoning - Enhanced",
|
109 |
-
"max_tokens": 200000,
|
110 |
-
"output_tokens": 1200,
|
111 |
-
"temperature": 0.7
|
112 |
}
|
113 |
}
|
114 |
|
115 |
-
|
|
|
|
|
|
|
|
|
|
|
116 |
|
117 |
# Function to call OpenAI API
|
118 |
def get_ai_response(prompt, history):
|
@@ -120,25 +81,22 @@ def get_ai_response(prompt, history):
|
|
120 |
return get_demo_response(prompt)
|
121 |
|
122 |
try:
|
|
|
|
|
|
|
|
|
123 |
messages = [
|
124 |
-
{"role": "system", "content":
|
125 |
]
|
126 |
for msg in history:
|
127 |
messages.append({"role": msg["role"], "content": msg["content"]})
|
128 |
messages.append({"role": "user", "content": prompt})
|
129 |
|
130 |
model = st.session_state.selected_model
|
131 |
-
model_config = AVAILABLE_MODELS.get(model
|
132 |
|
133 |
# Check if the model does NOT support temperature
|
134 |
-
|
135 |
-
"o3-mini",
|
136 |
-
"o1",
|
137 |
-
"gpt-4o",
|
138 |
-
"o3-mini-2025-01-31"
|
139 |
-
]
|
140 |
-
|
141 |
-
if any(model.startswith(prefix) for prefix in models_without_temperature):
|
142 |
# Models that do not support temperature
|
143 |
response = client.chat.completions.create(
|
144 |
model=model,
|
@@ -147,7 +105,7 @@ def get_ai_response(prompt, history):
|
|
147 |
stream=False
|
148 |
)
|
149 |
else:
|
150 |
-
# Models that support temperature
|
151 |
response = client.chat.completions.create(
|
152 |
model=model,
|
153 |
messages=messages,
|
@@ -161,8 +119,6 @@ def get_ai_response(prompt, history):
|
|
161 |
except Exception as e:
|
162 |
return f"An error occurred: {str(e)}."
|
163 |
|
164 |
-
|
165 |
-
|
166 |
# Function to create a new conversation
|
167 |
def create_new_chat():
|
168 |
new_id = str(uuid.uuid4())
|
@@ -172,10 +128,19 @@ def create_new_chat():
|
|
172 |
"messages": []
|
173 |
}
|
174 |
|
175 |
-
# Sidebar for model selection and conversation management
|
176 |
# Sidebar for model selection and conversation management
|
177 |
with st.sidebar:
|
178 |
st.title("Model Selection")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
179 |
selected_model = st.selectbox(
|
180 |
"Choose a model:",
|
181 |
list(AVAILABLE_MODELS.keys()),
|
@@ -190,27 +155,25 @@ with st.sidebar:
|
|
190 |
st.markdown(f"**Description:** {model_info['description']}")
|
191 |
st.markdown(f"**Max Tokens:** {model_info['max_tokens']}")
|
192 |
|
193 |
-
# Display temperature
|
194 |
-
|
195 |
-
"o3-mini",
|
196 |
-
"o1",
|
197 |
-
"gpt-4o",
|
198 |
-
"o3-mini-2025-01-31"
|
199 |
-
]
|
200 |
-
if not any(selected_model.startswith(prefix) for prefix in models_without_temperature):
|
201 |
-
st.markdown(f"**Temperature:** {model_info['temperature']}")
|
202 |
-
else:
|
203 |
st.markdown("**Temperature:** Not supported for this model")
|
|
|
|
|
204 |
|
205 |
st.markdown("---")
|
206 |
|
207 |
-
|
208 |
# Main chat window
|
209 |
with st.container():
|
210 |
current_id = st.session_state.current_conversation_id
|
211 |
current_conv = st.session_state.conversations.get(current_id, {"messages": []})
|
212 |
messages = current_conv["messages"]
|
213 |
|
|
|
|
|
|
|
|
|
|
|
214 |
for message in messages:
|
215 |
with st.chat_message(message["role"]):
|
216 |
st.markdown(message["content"])
|
|
|
30 |
}
|
31 |
|
32 |
if "selected_model" not in st.session_state:
|
33 |
+
st.session_state.selected_model = "gpt-4o" # Default to gpt-4o
|
34 |
+
|
35 |
+
# Initialize System Prompt
|
36 |
+
if "system_prompt" not in st.session_state:
|
37 |
+
st.session_state.system_prompt = "You are a helpful assistant providing clear, concise, and accurate information."
|
38 |
|
39 |
# Get OpenAI API key from environment or user input
|
40 |
openai_api_key = os.getenv("OPENAI_API_KEY_NEW_3")
|
|
|
48 |
|
49 |
# Available models with descriptions and token limits
|
50 |
AVAILABLE_MODELS = {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
"gpt-4o": {
|
52 |
"description": "Latest GPT-4 Omni model",
|
53 |
"max_tokens": 128000,
|
54 |
"output_tokens": 1200,
|
55 |
+
"temperature": None # Does not support temperature
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
},
|
57 |
"o1": {
|
58 |
"description": "OpenAI Reasoning Model - Standard",
|
59 |
"max_tokens": 200000,
|
60 |
"output_tokens": 1200,
|
61 |
+
"temperature": None # Does not support temperature
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
},
|
63 |
"o3-mini": {
|
64 |
"description": "OpenAI Advanced Reasoning - Mini",
|
65 |
"max_tokens": 200000,
|
66 |
"output_tokens": 1000,
|
67 |
+
"temperature": None # Does not support temperature
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
}
|
69 |
}
|
70 |
|
71 |
+
# List of models that do NOT support temperature
|
72 |
+
models_without_temperature = [
|
73 |
+
"gpt-4o",
|
74 |
+
"o1",
|
75 |
+
"o3-mini"
|
76 |
+
]
|
77 |
|
78 |
# Function to call OpenAI API
|
79 |
def get_ai_response(prompt, history):
|
|
|
81 |
return get_demo_response(prompt)
|
82 |
|
83 |
try:
|
84 |
+
# Use the system prompt from the session state
|
85 |
+
system_prompt = st.session_state.system_prompt
|
86 |
+
|
87 |
+
# Construct the messages with the system prompt first
|
88 |
messages = [
|
89 |
+
{"role": "system", "content": system_prompt}
|
90 |
]
|
91 |
for msg in history:
|
92 |
messages.append({"role": msg["role"], "content": msg["content"]})
|
93 |
messages.append({"role": "user", "content": prompt})
|
94 |
|
95 |
model = st.session_state.selected_model
|
96 |
+
model_config = AVAILABLE_MODELS.get(model)
|
97 |
|
98 |
# Check if the model does NOT support temperature
|
99 |
+
if model in models_without_temperature:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
# Models that do not support temperature
|
101 |
response = client.chat.completions.create(
|
102 |
model=model,
|
|
|
105 |
stream=False
|
106 |
)
|
107 |
else:
|
108 |
+
# Models that support temperature (not needed here but kept for future flexibility)
|
109 |
response = client.chat.completions.create(
|
110 |
model=model,
|
111 |
messages=messages,
|
|
|
119 |
except Exception as e:
|
120 |
return f"An error occurred: {str(e)}."
|
121 |
|
|
|
|
|
122 |
# Function to create a new conversation
|
123 |
def create_new_chat():
|
124 |
new_id = str(uuid.uuid4())
|
|
|
128 |
"messages": []
|
129 |
}
|
130 |
|
|
|
131 |
# Sidebar for model selection and conversation management
|
132 |
with st.sidebar:
|
133 |
st.title("Model Selection")
|
134 |
+
|
135 |
+
# Textbox for System Prompt
|
136 |
+
st.markdown("### System Prompt")
|
137 |
+
system_prompt = st.text_area(
|
138 |
+
"Enter System Prompt:",
|
139 |
+
value=st.session_state.system_prompt,
|
140 |
+
height=100
|
141 |
+
)
|
142 |
+
st.session_state.system_prompt = system_prompt # Update the session state
|
143 |
+
|
144 |
selected_model = st.selectbox(
|
145 |
"Choose a model:",
|
146 |
list(AVAILABLE_MODELS.keys()),
|
|
|
155 |
st.markdown(f"**Description:** {model_info['description']}")
|
156 |
st.markdown(f"**Max Tokens:** {model_info['max_tokens']}")
|
157 |
|
158 |
+
# Display temperature information
|
159 |
+
if selected_model in models_without_temperature:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
160 |
st.markdown("**Temperature:** Not supported for this model")
|
161 |
+
else:
|
162 |
+
st.markdown(f"**Temperature:** {model_info['temperature']}")
|
163 |
|
164 |
st.markdown("---")
|
165 |
|
|
|
166 |
# Main chat window
|
167 |
with st.container():
|
168 |
current_id = st.session_state.current_conversation_id
|
169 |
current_conv = st.session_state.conversations.get(current_id, {"messages": []})
|
170 |
messages = current_conv["messages"]
|
171 |
|
172 |
+
# Display system prompt at the top of the chat
|
173 |
+
with st.chat_message("system"):
|
174 |
+
st.markdown(f"**System Prompt:** {st.session_state.system_prompt}")
|
175 |
+
|
176 |
+
# Display chat messages
|
177 |
for message in messages:
|
178 |
with st.chat_message(message["role"]):
|
179 |
st.markdown(message["content"])
|