Added saved prompts functionality
Browse files- app.py +88 -8
- saved_prompts.json +4 -0
app.py
CHANGED
@@ -10,6 +10,34 @@ from dotenv import load_dotenv
|
|
10 |
from llama_index.core.settings import Settings
|
11 |
from llama_index.core.llms import ChatMessage, MessageRole
|
12 |
from llama_index.llms.groq import Groq
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
# Page config
|
15 |
st.set_page_config(
|
@@ -213,25 +241,77 @@ with tab2:
|
|
213 |
# System prompt editor in an expander with help text above
|
214 |
with options_container:
|
215 |
st.info("💡 The system prompt defines the AI's persona and behavior. It's like giving stage directions to an actor.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
216 |
with st.expander("System Prompt"):
|
217 |
-
st.text_area(
|
218 |
"Edit System Prompt",
|
219 |
value=st.session_state.system_prompt,
|
220 |
-
height=
|
221 |
help="This prompt sets the AI's personality and behavior. When RAG is enabled, relevant passages will be automatically added after this prompt.",
|
222 |
key="system_prompt",
|
223 |
on_change=lambda: setattr(st.session_state, 'system_prompt', st.session_state.system_prompt)
|
224 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
225 |
|
226 |
-
#
|
227 |
-
|
228 |
-
with
|
229 |
st.session_state.use_rag = st.toggle(
|
230 |
"Enable RAG (Retrieval Augmented Generation)",
|
231 |
value=st.session_state.get('use_rag', True),
|
232 |
key='rag_toggle'
|
233 |
)
|
234 |
-
with
|
235 |
if st.session_state.use_rag:
|
236 |
num_chunks = st.slider(
|
237 |
"Number of chunks to retrieve",
|
@@ -240,8 +320,8 @@ with tab2:
|
|
240 |
value=st.session_state.get('num_chunks', 1),
|
241 |
key='num_chunks_slider'
|
242 |
)
|
243 |
-
with
|
244 |
-
if st.button("Clear Chat", use_container_width=True):
|
245 |
st.session_state.messages = []
|
246 |
st.session_state.sources = {}
|
247 |
st.rerun()
|
|
|
10 |
from llama_index.core.settings import Settings
|
11 |
from llama_index.core.llms import ChatMessage, MessageRole
|
12 |
from llama_index.llms.groq import Groq
|
13 |
+
import json
|
14 |
+
from pathlib import Path
|
15 |
+
|
16 |
+
# Define helper functions first
|
17 |
+
def load_saved_prompts():
|
18 |
+
"""Load saved prompts from JSON file"""
|
19 |
+
prompts_file = Path("saved_prompts.json")
|
20 |
+
if prompts_file.exists():
|
21 |
+
with open(prompts_file, "r") as f:
|
22 |
+
return json.load(f)
|
23 |
+
return {"Default Freud": st.session_state.system_prompt}
|
24 |
+
|
25 |
+
def save_prompt(name, prompt):
|
26 |
+
"""Save a prompt to JSON file"""
|
27 |
+
prompts_file = Path("saved_prompts.json")
|
28 |
+
prompts = load_saved_prompts()
|
29 |
+
prompts[name] = prompt
|
30 |
+
with open(prompts_file, "w") as f:
|
31 |
+
json.dump(prompts, f, indent=2)
|
32 |
+
|
33 |
+
def delete_prompt(name):
|
34 |
+
"""Delete a prompt from JSON file"""
|
35 |
+
prompts_file = Path("saved_prompts.json")
|
36 |
+
prompts = load_saved_prompts()
|
37 |
+
if name in prompts:
|
38 |
+
del prompts[name]
|
39 |
+
with open(prompts_file, "w") as f:
|
40 |
+
json.dump(prompts, f, indent=2)
|
41 |
|
42 |
# Page config
|
43 |
st.set_page_config(
|
|
|
241 |
# System prompt editor in an expander with help text above
|
242 |
with options_container:
|
243 |
st.info("💡 The system prompt defines the AI's persona and behavior. It's like giving stage directions to an actor.")
|
244 |
+
|
245 |
+
# Load saved prompts
|
246 |
+
saved_prompts = load_saved_prompts()
|
247 |
+
|
248 |
+
# Create a container for the prompt selection controls
|
249 |
+
prompt_container = st.container()
|
250 |
+
|
251 |
+
# Use columns with better proportions and explicit spacing
|
252 |
+
prompt_col1, prompt_col2 = st.columns([4, 1], gap="small")
|
253 |
+
|
254 |
+
with prompt_col1:
|
255 |
+
selected_prompt = st.selectbox(
|
256 |
+
"", # Empty label
|
257 |
+
options=list(saved_prompts.keys()),
|
258 |
+
index=0,
|
259 |
+
label_visibility="collapsed" # Hide the label completely
|
260 |
+
)
|
261 |
+
|
262 |
+
with prompt_col2:
|
263 |
+
load_button = st.button("Load Selected", use_container_width=True, key="load_selected")
|
264 |
+
if load_button:
|
265 |
+
st.session_state.system_prompt = saved_prompts[selected_prompt]
|
266 |
+
st.rerun()
|
267 |
+
|
268 |
with st.expander("System Prompt"):
|
269 |
+
current_prompt = st.text_area(
|
270 |
"Edit System Prompt",
|
271 |
value=st.session_state.system_prompt,
|
272 |
+
height=200, # Increased height for better visibility
|
273 |
help="This prompt sets the AI's personality and behavior. When RAG is enabled, relevant passages will be automatically added after this prompt.",
|
274 |
key="system_prompt",
|
275 |
on_change=lambda: setattr(st.session_state, 'system_prompt', st.session_state.system_prompt)
|
276 |
)
|
277 |
+
|
278 |
+
st.markdown("---") # Add separator for better visual organization
|
279 |
+
|
280 |
+
# Adjusted save prompt layout
|
281 |
+
save_col1, save_col2 = st.columns([3, 1])
|
282 |
+
with save_col1:
|
283 |
+
new_prompt_name = st.text_input(
|
284 |
+
"Prompt name",
|
285 |
+
placeholder="Enter name to save current prompt",
|
286 |
+
label_visibility="collapsed" # Removes label for cleaner look
|
287 |
+
)
|
288 |
+
with save_col2:
|
289 |
+
if st.button("Save", use_container_width=True) and new_prompt_name:
|
290 |
+
save_prompt(new_prompt_name, current_prompt)
|
291 |
+
st.success(f"Saved prompt: {new_prompt_name}")
|
292 |
+
st.rerun()
|
293 |
+
|
294 |
+
st.markdown("---") # Add separator
|
295 |
+
|
296 |
+
# Center the delete button and make it smaller
|
297 |
+
delete_col1, delete_col2, delete_col3 = st.columns([1, 2, 1])
|
298 |
+
with delete_col2:
|
299 |
+
if st.button("Delete Selected Prompt", use_container_width=True, type="secondary") and selected_prompt != "Default Freud":
|
300 |
+
delete_prompt(selected_prompt)
|
301 |
+
st.success(f"Deleted prompt: {selected_prompt}")
|
302 |
+
st.rerun()
|
303 |
+
|
304 |
+
st.markdown("---") # Add separator before RAG controls
|
305 |
|
306 |
+
# Adjusted RAG controls layout
|
307 |
+
rag_col1, rag_col2, rag_col3 = st.columns([2, 2, 1])
|
308 |
+
with rag_col1:
|
309 |
st.session_state.use_rag = st.toggle(
|
310 |
"Enable RAG (Retrieval Augmented Generation)",
|
311 |
value=st.session_state.get('use_rag', True),
|
312 |
key='rag_toggle'
|
313 |
)
|
314 |
+
with rag_col2:
|
315 |
if st.session_state.use_rag:
|
316 |
num_chunks = st.slider(
|
317 |
"Number of chunks to retrieve",
|
|
|
320 |
value=st.session_state.get('num_chunks', 1),
|
321 |
key='num_chunks_slider'
|
322 |
)
|
323 |
+
with rag_col3:
|
324 |
+
if st.button("Clear Chat", use_container_width=True, type="secondary"):
|
325 |
st.session_state.messages = []
|
326 |
st.session_state.sources = {}
|
327 |
st.rerun()
|
saved_prompts.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"Default Freud": "You are Sigmund Freud, speaking from your historical context and perspective. As the founder of psychoanalysis, you should:\n\n1. Only engage with topics related to:\n - Psychoanalysis and its theories\n - Dreams and their interpretation\n - The unconscious mind\n - Human sexuality and development\n - Your published works and case studies\n - Your historical context and contemporaries\n\n2. Politely decline to answer:\n - Questions about events after your death in 1939\n - Medical advice or diagnosis\n - Topics outside your expertise or historical context\n - Personal matters unrelated to psychoanalysis\n\n3. Maintain your characteristic style:\n - Speak with authority on psychoanalytic matters\n - Use psychoanalytic terminology when appropriate\n - Reference your own works and theories\n - Interpret questions through a psychoanalytic lens\n\nIf a question is inappropriate or outside your scope, explain why you cannot answer it from your perspective as Freud.",
|
3 |
+
"Freud test haikus": "You are Sigmund Freud but you answer only through haikus"
|
4 |
+
}
|