Update app.py
Browse files
app.py
CHANGED
@@ -126,67 +126,22 @@ tabs = st.tabs(["Chat", "URL and Tools", "User Description", "Developers"])
|
|
126 |
# Tab 1: Chat
|
127 |
if tabs[0]:
|
128 |
with st.expander("Chat"):
|
129 |
-
#
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
# Handle submission with the selected inference URL
|
146 |
-
response = handle_submission(user_message, selected_tools, url_endpoint)
|
147 |
-
|
148 |
-
with st.chat_message("assistant"):
|
149 |
-
if response is None:
|
150 |
-
st.warning("The agent's response is None. Please try again. Generate an image of a flying horse.")
|
151 |
-
elif isinstance(response, Image.Image):
|
152 |
-
st.image(response)
|
153 |
-
elif isinstance(response, AudioSegment):
|
154 |
-
st.audio(response)
|
155 |
-
elif isinstance(response, int):
|
156 |
-
st.markdown(response)
|
157 |
-
elif isinstance(response, str):
|
158 |
-
if "emojified_text" in response:
|
159 |
-
st.markdown(f"{response['emojified_text']}")
|
160 |
-
else:
|
161 |
-
st.markdown(response)
|
162 |
-
elif isinstance(response, list):
|
163 |
-
for item in response:
|
164 |
-
st.markdown(item) # Assuming the list contains strings
|
165 |
-
elif isinstance(response, pd.DataFrame):
|
166 |
-
st.dataframe(response)
|
167 |
-
elif isinstance(response, pd.Series):
|
168 |
-
st.table(response.iloc[0:10])
|
169 |
-
elif isinstance(response, dict):
|
170 |
-
st.json(response)
|
171 |
-
elif isinstance(response, st.graphics_altair.AltairChart):
|
172 |
-
st.altair_chart(response)
|
173 |
-
elif isinstance(response, st.graphics_bokeh.BokehChart):
|
174 |
-
st.bokeh_chart(response)
|
175 |
-
elif isinstance(response, st.graphics_graphviz.GraphvizChart):
|
176 |
-
st.graphviz_chart(response)
|
177 |
-
elif isinstance(response, st.graphics_plotly.PlotlyChart):
|
178 |
-
st.plotly_chart(response)
|
179 |
-
elif isinstance(response, st.graphics_pydeck.PydeckChart):
|
180 |
-
st.pydeck_chart(response)
|
181 |
-
elif isinstance(response, matplotlib.figure.Figure):
|
182 |
-
st.pyplot(response)
|
183 |
-
elif isinstance(response, streamlit.graphics_vega_lite.VegaLiteChart):
|
184 |
-
st.vega_lite_chart(response)
|
185 |
-
else:
|
186 |
-
st.warning("Unrecognized response type. Please try again. e.g. Generate an image of a flying horse.")
|
187 |
-
|
188 |
-
st.session_state.messages.append({"role": "assistant", "content": response})
|
189 |
-
|
190 |
# Tab 2: URL and Tools
|
191 |
elif tabs[1]:
|
192 |
with st.expander("URL and Tools"):
|
@@ -330,4 +285,64 @@ elif tabs[3]:
|
|
330 |
- Ensure proper configuration, such as setting the Hugging Face token as an environment variable.
|
331 |
|
332 |
''')
|
333 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
# Tab 1: Chat
|
127 |
if tabs[0]:
|
128 |
with st.expander("Chat"):
|
129 |
+
# Code for URL and Tools checkboxes
|
130 |
+
|
131 |
+
# Examples for the user perspective
|
132 |
+
st.markdown("### Examples:")
|
133 |
+
st.markdown("1. **Generate a Random Character**:")
|
134 |
+
st.markdown(" - Choose the desired URL and the 'Random Character Tool'.")
|
135 |
+
|
136 |
+
st.markdown("2. **Sentiment Analysis**:")
|
137 |
+
st.markdown(" - Choose the desired URL and the 'Sentiment Analysis Tool'.")
|
138 |
+
st.markdown(" - Sample: What is the sentiment for \"Hello, I am happy\"?")
|
139 |
+
|
140 |
+
st.markdown("3. **Word Count**:")
|
141 |
+
st.markdown(" - Choose the desired URL and the 'Word Counter Tool'.")
|
142 |
+
st.markdown(" - Sample: Count the words in \"Hello, I am Christof\".")
|
143 |
+
|
144 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
145 |
# Tab 2: URL and Tools
|
146 |
elif tabs[1]:
|
147 |
with st.expander("URL and Tools"):
|
|
|
285 |
- Ensure proper configuration, such as setting the Hugging Face token as an environment variable.
|
286 |
|
287 |
''')
|
288 |
+
# Chat code (user input, agent responses, etc.)
|
289 |
+
if "messages" not in st.session_state:
|
290 |
+
st.session_state.messages = []
|
291 |
+
|
292 |
+
for message in st.session_state.messages:
|
293 |
+
with st.chat_message(message["role"]):
|
294 |
+
st.markdown(message["content"])
|
295 |
+
|
296 |
+
with st.chat_message("assistant"):
|
297 |
+
st.markdown("Hello there! How can I assist you today?")
|
298 |
+
|
299 |
+
if user_message := st.chat_input("Enter message"):
|
300 |
+
st.chat_message("user").markdown(user_message)
|
301 |
+
st.session_state.messages.append({"role": "user", "content": user_message})
|
302 |
+
|
303 |
+
selected_tools = [tool_loader.tools[idx] for idx, checkbox in enumerate(tool_checkboxes) if checkbox]
|
304 |
+
# Handle submission with the selected inference URL
|
305 |
+
response = handle_submission(user_message, selected_tools, url_endpoint)
|
306 |
+
|
307 |
+
with st.chat_message("assistant"):
|
308 |
+
if response is None:
|
309 |
+
st.warning("The agent's response is None. Please try again. Generate an image of a flying horse.")
|
310 |
+
elif isinstance(response, Image.Image):
|
311 |
+
st.image(response)
|
312 |
+
elif isinstance(response, AudioSegment):
|
313 |
+
st.audio(response)
|
314 |
+
elif isinstance(response, int):
|
315 |
+
st.markdown(response)
|
316 |
+
elif isinstance(response, str):
|
317 |
+
if "emojified_text" in response:
|
318 |
+
st.markdown(f"{response['emojified_text']}")
|
319 |
+
else:
|
320 |
+
st.markdown(response)
|
321 |
+
elif isinstance(response, list):
|
322 |
+
for item in response:
|
323 |
+
st.markdown(item) # Assuming the list contains strings
|
324 |
+
elif isinstance(response, pd.DataFrame):
|
325 |
+
st.dataframe(response)
|
326 |
+
elif isinstance(response, pd.Series):
|
327 |
+
st.table(response.iloc[0:10])
|
328 |
+
elif isinstance(response, dict):
|
329 |
+
st.json(response)
|
330 |
+
elif isinstance(response, st.graphics_altair.AltairChart):
|
331 |
+
st.altair_chart(response)
|
332 |
+
elif isinstance(response, st.graphics_bokeh.BokehChart):
|
333 |
+
st.bokeh_chart(response)
|
334 |
+
elif isinstance(response, st.graphics_graphviz.GraphvizChart):
|
335 |
+
st.graphviz_chart(response)
|
336 |
+
elif isinstance(response, st.graphics_plotly.PlotlyChart):
|
337 |
+
st.plotly_chart(response)
|
338 |
+
elif isinstance(response, st.graphics_pydeck.PydeckChart):
|
339 |
+
st.pydeck_chart(response)
|
340 |
+
elif isinstance(response, matplotlib.figure.Figure):
|
341 |
+
st.pyplot(response)
|
342 |
+
elif isinstance(response, streamlit.graphics_vega_lite.VegaLiteChart):
|
343 |
+
st.vega_lite_chart(response)
|
344 |
+
else:
|
345 |
+
st.warning("Unrecognized response type. Please try again. e.g. Generate an image of a flying horse.")
|
346 |
+
|
347 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
348 |
+
|