Geraldine commited on
Commit
939a951
·
verified ·
1 Parent(s): 267b371

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +262 -262
app.py CHANGED
@@ -1,262 +1,262 @@
1
- import json
2
- import streamlit as st
3
- from streamlit_modal import Modal
4
- import streamlit.components.v1 as components
5
- from clients import OllamaClient, NvidiaClient, GroqClient
6
-
7
- st.set_page_config(
8
- page_title="Prompts Library",
9
- layout="wide",
10
- )
11
-
12
- # Cache the header of the app to prevent re-rendering on each load
13
- @st.cache_resource
14
- def display_app_header():
15
- """Display the header of the Streamlit app."""
16
- st.title("Prompts Library")
17
- st.subheader("ChatBot with prompt templates")
18
-
19
- # Display the header of the app
20
- display_app_header()
21
-
22
- # Some style
23
- st.markdown(
24
- '<style>div[key^="edit-modal"] {top: 25px;}</style>', unsafe_allow_html=True
25
- )
26
-
27
-
28
- # UI sidebar ##########################################
29
- def ui_llm(provider):
30
- if api_token := st.sidebar.text_input("Enter your API Key", key=f"API_{provider}"):
31
- provider_models = llm_providers[st.session_state["llm_provider"]](
32
- api_key=api_token
33
- ).list_models_names()
34
- if provider_models:
35
- llm = st.sidebar.radio(
36
- "Select your model", provider_models, key="llm"
37
- )
38
- else:
39
- st.sidebar.error("Ollama is not running, or there is a problem with the selected LLM provider")
40
- else:
41
- st.sidebar.warning("You must enter your API key")
42
-
43
- st.sidebar.subheader("Models")
44
-
45
- # LLM
46
- llm_providers = {
47
- "Cloud Groq": GroqClient,
48
- "Cloud Nvidia": NvidiaClient,
49
- "Local Ollama": OllamaClient,
50
- }
51
- if llm_provider := st.sidebar.radio(
52
- "Choose your LLM Provider", llm_providers.keys(), key="llm_provider"
53
- ):
54
- ui_llm(st.session_state["llm_provider"])
55
-
56
- # LLM parameters
57
- st.sidebar.subheader("Parameters")
58
- max_tokens = st.sidebar.number_input("Token numbers", value=1024, key="max_tokens")
59
- temperature = st.sidebar.slider(
60
- "Temperature", min_value=0.0, max_value=1.0, value=0.5, step=0.1, key="temperature"
61
- )
62
- top_p = st.sidebar.slider(
63
- "Top P", min_value=0.0, max_value=1.0, value=0.7, step=0.1, key="top_p"
64
- )
65
-
66
- # helpers functions ########################################
67
-
68
- def edit_form(form_name, title=None, source=None, system=None, user=None):
69
- """
70
- Creates a form for editing a prompt template.
71
-
72
- Args:
73
- form_name: The name of the form.
74
- title: The title of the prompt template (optional).
75
- source: The source of the prompt template (optional).
76
- system: The system example instruction (optional).
77
- user: The user example instruction (optional).
78
-
79
- Returns:
80
- None
81
- """
82
- with st.form(form_name, clear_on_submit=False, border=True):
83
- new_title = st.text_input("Name", value=title)
84
- new_source = st.text_input("Source", value=source)
85
- new_system = st.text_area("System example instruction", value=system)
86
- new_user = st.text_area("User example instruction", value=user)
87
- if st.form_submit_button("Submit"):
88
- rec = {
89
- "title": new_title,
90
- "source": new_source,
91
- "messages": [
92
- {"role": "system", "content": new_system},
93
- {"role": "user", "content": new_user},
94
- ],
95
- }
96
- if title is not None:
97
- delete_prompt(title)
98
- add_prompt(rec)
99
-
100
-
101
- def read_prompts_file():
102
- """
103
- Loads the prompts from the "prompts.json" file.
104
-
105
- Returns:
106
- A list of prompt templates.
107
- """
108
- prompts_file = open("prompts.json", encoding="utf-8")
109
- return json.load(prompts_file)
110
-
111
-
112
- def add_prompt(rec):
113
- """
114
- Adds a new prompt to the "prompts.json" file.
115
-
116
- Args:
117
- rec: The new prompt to add.
118
-
119
- Returns:
120
- None
121
- """
122
- with open("prompts.json", "r", encoding="utf-8") as fp:
123
- listObj = json.load(fp)
124
- listObj.append(rec)
125
- print(listObj)
126
- with open("prompts.json", "w") as outfile:
127
- outfile.write(json.dumps(listObj, indent=4, sort_keys=True))
128
- st.rerun()
129
-
130
-
131
- def edit_prompt(title):
132
- """
133
- Edits a prompt template.
134
-
135
- Args:
136
- title: The title of the prompt to edit.
137
-
138
- Returns:
139
- A dictionary containing the edited prompt information.
140
- """
141
- with open("prompts.json", "r", encoding="utf-8") as fp:
142
- listObj = json.load(fp)
143
- rec = [i for i in listObj if i["title"].strip() == title.strip()]
144
- rec_messages = rec[0]["messages"]
145
- return edit_form(
146
- "prompt_edit",
147
- title=title,
148
- source=[x["source"] for x in rec][0],
149
- system=[x["content"] for x in rec_messages if x["role"] == "system"][0],
150
- user=[x["content"] for x in rec_messages if x["role"] == "user"][0],
151
- )
152
-
153
-
154
- def delete_prompt(title):
155
- """
156
- Removes a prompt template from the "prompts.json" file.
157
-
158
- Args:
159
- title: The title of the prompt to delete.
160
- """
161
- with open("prompts.json", "r", encoding="utf-8") as fp:
162
- listObj = json.load(fp)
163
- recs = [i for i in listObj if not (i["title"].strip() == title.strip())]
164
- with open("prompts.json", "w") as outfile:
165
- outfile.write(json.dumps(recs, indent=4, sort_keys=True))
166
-
167
-
168
- def get_llm_response(system, prompt):
169
- """
170
- Generates a response from the selected LLM.
171
-
172
- Args:
173
- system: The system input from the user.
174
- prompt: The user prompt.
175
-
176
- Returns:
177
- The response from the LLM.
178
- """
179
- options = dict(
180
- max_tokens=st.session_state["max_tokens"],
181
- top_p=st.session_state["top_p"],
182
- temperature=st.session_state["temperature"],
183
- )
184
- return llm_providers[st.session_state["llm_provider"]](
185
- api_key=st.session_state[f"API_{st.session_state['llm_provider']}"],
186
- model=st.session_state["llm"],
187
- ).api_chat_completion(system, prompt, **options)
188
-
189
-
190
- def generate(system, prompt):
191
- st.session_state.messages.append({"role": "system", "content": system})
192
- st.session_state.messages.append({"role": "user", "content": prompt})
193
- with st.chat_message("assistant"):
194
- response = get_llm_response(
195
- llm_providers[st.session_state["llm_provider"]], system, prompt
196
- )
197
- st.markdown(response)
198
- # Add assistant response to chat history
199
- st.session_state.messages.append({"role": "assistant", "content": response})
200
-
201
-
202
- # UI main #####################################################
203
-
204
- tab1, tab2 = st.tabs(["Prompts Library", "Chatbot"])
205
- with tab1:
206
- new_modal = Modal(
207
- "Add prompt",
208
- key="edit-modal",
209
- )
210
- if new_prompt_modal := st.button("➕ Add a prompt template"):
211
- new_modal.open()
212
- if new_modal.is_open():
213
- with new_modal.container():
214
- edit_form("prompt_add")
215
- prompts = read_prompts_file()
216
- grids = range(1, len(prompts) + 1)
217
- cols = st.columns([1, 1])
218
- wcol = 2
219
- for f, b in zip(prompts, grids):
220
- col = cols[b % wcol]
221
- with col:
222
- with st.expander(f["title"].upper()):
223
- if st.button(f"✔️ Select prompt {f['title'].upper()} and go to Chatbot tab", type="secondary"):
224
- # can do better here
225
- st.session_state["init_messages"] = f["messages"]
226
- st.session_state.init_system = f["messages"][0]["content"]
227
- st.session_state.init_user = f["messages"][1]["content"]
228
- edit_modal = Modal(
229
- f"Edit prompt {f['title'].upper()}",
230
- key=f"edit-modal_{f['title']}",
231
- )
232
- if edit_prompt_modal := st.button(
233
- f"✏️ Edit {f['title'].upper()}", type="secondary"
234
- ):
235
- edit_modal.open()
236
- if edit_modal.is_open():
237
- with edit_modal.container():
238
- edit_prompt(f["title"])
239
- st.write(f"Source : {f['source']}")
240
- st.markdown(f"- System : {f['messages'][0]['content']}")
241
- st.markdown(f"- User: {f['messages'][1]['content']}")
242
- st.divider()
243
- if st.button(f"❌ Delete prompt {f['title'].upper()}", type="primary"):
244
- delete_prompt(f["title"])
245
- st.rerun()
246
- with tab2:
247
- # Initialize chat history
248
- if "messages" not in st.session_state:
249
- st.session_state.messages = []
250
- # Display chat messages from history on app rerun
251
- for message in st.session_state.messages:
252
- with st.chat_message(message["role"]):
253
- st.markdown(message["content"])
254
- # React to user input
255
- if "init_messages" in st.session_state:
256
- system = st.text_area(":blue[System]", key="init_system")
257
- prompt = st.text_area(":blue[User]", key="init_user")
258
- else:
259
- system = st.text_area(":blue[System]")
260
- prompt = st.text_area(":blue[User]")
261
- if st.button("Generate", type="primary"):
262
- generate(system, prompt)
 
1
+ import json
2
+ import streamlit as st
3
+ from streamlit_modal import Modal
4
+ import streamlit.components.v1 as components
5
+ from clients import OllamaClient, NvidiaClient, GroqClient
6
+
7
+ st.set_page_config(
8
+ page_title="Prompts Library",
9
+ layout="wide",
10
+ )
11
+
12
+ # Cache the header of the app to prevent re-rendering on each load
13
+ @st.cache_resource
14
+ def display_app_header():
15
+ """Display the header of the Streamlit app."""
16
+ st.title("Prompts Library")
17
+ st.subheader("ChatBot with prompt templates")
18
+
19
+ # Display the header of the app
20
+ display_app_header()
21
+
22
+ # Some style
23
+ st.markdown(
24
+ '<style>div[key^="edit-modal"] {top: 25px;}</style>', unsafe_allow_html=True
25
+ )
26
+
27
+
28
+ # UI sidebar ##########################################
29
+ def ui_llm(provider):
30
+ if api_token := st.sidebar.text_input("Enter your API Key", type="password", key=f"API_{provider}"):
31
+ provider_models = llm_providers[st.session_state["llm_provider"]](
32
+ api_key=api_token
33
+ ).list_models_names()
34
+ if provider_models:
35
+ llm = st.sidebar.radio(
36
+ "Select your model", provider_models, key="llm"
37
+ )
38
+ else:
39
+ st.sidebar.error("Ollama is not running, or there is a problem with the selected LLM provider")
40
+ else:
41
+ st.sidebar.warning("You must enter your API key")
42
+
43
+ st.sidebar.subheader("Models")
44
+
45
+ # LLM
46
+ llm_providers = {
47
+ "Cloud Groq": GroqClient,
48
+ "Cloud Nvidia": NvidiaClient,
49
+ "Local Ollama": OllamaClient,
50
+ }
51
+ if llm_provider := st.sidebar.radio(
52
+ "Choose your LLM Provider", llm_providers.keys(), key="llm_provider"
53
+ ):
54
+ ui_llm(st.session_state["llm_provider"])
55
+
56
+ # LLM parameters
57
+ st.sidebar.subheader("Parameters")
58
+ max_tokens = st.sidebar.number_input("Token numbers", value=1024, key="max_tokens")
59
+ temperature = st.sidebar.slider(
60
+ "Temperature", min_value=0.0, max_value=1.0, value=0.5, step=0.1, key="temperature"
61
+ )
62
+ top_p = st.sidebar.slider(
63
+ "Top P", min_value=0.0, max_value=1.0, value=0.7, step=0.1, key="top_p"
64
+ )
65
+
66
+ # helpers functions ########################################
67
+
68
+ def edit_form(form_name, title=None, source=None, system=None, user=None):
69
+ """
70
+ Creates a form for editing a prompt template.
71
+
72
+ Args:
73
+ form_name: The name of the form.
74
+ title: The title of the prompt template (optional).
75
+ source: The source of the prompt template (optional).
76
+ system: The system example instruction (optional).
77
+ user: The user example instruction (optional).
78
+
79
+ Returns:
80
+ None
81
+ """
82
+ with st.form(form_name, clear_on_submit=False, border=True):
83
+ new_title = st.text_input("Name", value=title)
84
+ new_source = st.text_input("Source", value=source)
85
+ new_system = st.text_area("System example instruction", value=system)
86
+ new_user = st.text_area("User example instruction", value=user)
87
+ if st.form_submit_button("Submit"):
88
+ rec = {
89
+ "title": new_title,
90
+ "source": new_source,
91
+ "messages": [
92
+ {"role": "system", "content": new_system},
93
+ {"role": "user", "content": new_user},
94
+ ],
95
+ }
96
+ if title is not None:
97
+ delete_prompt(title)
98
+ add_prompt(rec)
99
+
100
+
101
+ def read_prompts_file():
102
+ """
103
+ Loads the prompts from the "prompts.json" file.
104
+
105
+ Returns:
106
+ A list of prompt templates.
107
+ """
108
+ prompts_file = open("prompts.json", encoding="utf-8")
109
+ return json.load(prompts_file)
110
+
111
+
112
+ def add_prompt(rec):
113
+ """
114
+ Adds a new prompt to the "prompts.json" file.
115
+
116
+ Args:
117
+ rec: The new prompt to add.
118
+
119
+ Returns:
120
+ None
121
+ """
122
+ with open("prompts.json", "r", encoding="utf-8") as fp:
123
+ listObj = json.load(fp)
124
+ listObj.append(rec)
125
+ print(listObj)
126
+ with open("prompts.json", "w") as outfile:
127
+ outfile.write(json.dumps(listObj, indent=4, sort_keys=True))
128
+ st.rerun()
129
+
130
+
131
+ def edit_prompt(title):
132
+ """
133
+ Edits a prompt template.
134
+
135
+ Args:
136
+ title: The title of the prompt to edit.
137
+
138
+ Returns:
139
+ A dictionary containing the edited prompt information.
140
+ """
141
+ with open("prompts.json", "r", encoding="utf-8") as fp:
142
+ listObj = json.load(fp)
143
+ rec = [i for i in listObj if i["title"].strip() == title.strip()]
144
+ rec_messages = rec[0]["messages"]
145
+ return edit_form(
146
+ "prompt_edit",
147
+ title=title,
148
+ source=[x["source"] for x in rec][0],
149
+ system=[x["content"] for x in rec_messages if x["role"] == "system"][0],
150
+ user=[x["content"] for x in rec_messages if x["role"] == "user"][0],
151
+ )
152
+
153
+
154
+ def delete_prompt(title):
155
+ """
156
+ Removes a prompt template from the "prompts.json" file.
157
+
158
+ Args:
159
+ title: The title of the prompt to delete.
160
+ """
161
+ with open("prompts.json", "r", encoding="utf-8") as fp:
162
+ listObj = json.load(fp)
163
+ recs = [i for i in listObj if not (i["title"].strip() == title.strip())]
164
+ with open("prompts.json", "w") as outfile:
165
+ outfile.write(json.dumps(recs, indent=4, sort_keys=True))
166
+
167
+
168
+ def get_llm_response(system, prompt):
169
+ """
170
+ Generates a response from the selected LLM.
171
+
172
+ Args:
173
+ system: The system input from the user.
174
+ prompt: The user prompt.
175
+
176
+ Returns:
177
+ The response from the LLM.
178
+ """
179
+ options = dict(
180
+ max_tokens=st.session_state["max_tokens"],
181
+ top_p=st.session_state["top_p"],
182
+ temperature=st.session_state["temperature"],
183
+ )
184
+ return llm_providers[st.session_state["llm_provider"]](
185
+ api_key=st.session_state[f"API_{st.session_state['llm_provider']}"],
186
+ model=st.session_state["llm"],
187
+ ).api_chat_completion(system, prompt, **options)
188
+
189
+
190
+ def generate(system, prompt):
191
+ st.session_state.messages.append({"role": "system", "content": system})
192
+ st.session_state.messages.append({"role": "user", "content": prompt})
193
+ with st.chat_message("assistant"):
194
+ response = get_llm_response(
195
+ llm_providers[st.session_state["llm_provider"]], system, prompt
196
+ )
197
+ st.markdown(response)
198
+ # Add assistant response to chat history
199
+ st.session_state.messages.append({"role": "assistant", "content": response})
200
+
201
+
202
+ # UI main #####################################################
203
+
204
+ tab1, tab2 = st.tabs(["Prompts Library", "Chatbot"])
205
+ with tab1:
206
+ new_modal = Modal(
207
+ "Add prompt",
208
+ key="edit-modal",
209
+ )
210
+ if new_prompt_modal := st.button("➕ Add a prompt template"):
211
+ new_modal.open()
212
+ if new_modal.is_open():
213
+ with new_modal.container():
214
+ edit_form("prompt_add")
215
+ prompts = read_prompts_file()
216
+ grids = range(1, len(prompts) + 1)
217
+ cols = st.columns([1, 1])
218
+ wcol = 2
219
+ for f, b in zip(prompts, grids):
220
+ col = cols[b % wcol]
221
+ with col:
222
+ with st.expander(f["title"].upper()):
223
+ if st.button(f"✔️ Select prompt {f['title'].upper()} and go to Chatbot tab", type="secondary"):
224
+ # can do better here
225
+ st.session_state["init_messages"] = f["messages"]
226
+ st.session_state.init_system = f["messages"][0]["content"]
227
+ st.session_state.init_user = f["messages"][1]["content"]
228
+ edit_modal = Modal(
229
+ f"Edit prompt {f['title'].upper()}",
230
+ key=f"edit-modal_{f['title']}",
231
+ )
232
+ if edit_prompt_modal := st.button(
233
+ f"✏️ Edit {f['title'].upper()}", type="secondary"
234
+ ):
235
+ edit_modal.open()
236
+ if edit_modal.is_open():
237
+ with edit_modal.container():
238
+ edit_prompt(f["title"])
239
+ st.write(f"Source : {f['source']}")
240
+ st.markdown(f"- System : {f['messages'][0]['content']}")
241
+ st.markdown(f"- User: {f['messages'][1]['content']}")
242
+ st.divider()
243
+ if st.button(f"❌ Delete prompt {f['title'].upper()}", type="primary"):
244
+ delete_prompt(f["title"])
245
+ st.rerun()
246
+ with tab2:
247
+ # Initialize chat history
248
+ if "messages" not in st.session_state:
249
+ st.session_state.messages = []
250
+ # Display chat messages from history on app rerun
251
+ for message in st.session_state.messages:
252
+ with st.chat_message(message["role"]):
253
+ st.markdown(message["content"])
254
+ # React to user input
255
+ if "init_messages" in st.session_state:
256
+ system = st.text_area(":blue[System]", key="init_system")
257
+ prompt = st.text_area(":blue[User]", key="init_user")
258
+ else:
259
+ system = st.text_area(":blue[System]")
260
+ prompt = st.text_area(":blue[User]")
261
+ if st.button("Generate", type="primary"):
262
+ generate(system, prompt)