Chris4K commited on
Commit
6052994
·
verified ·
1 Parent(s): 120d4a1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -13
app.py CHANGED
@@ -1,19 +1,49 @@
1
  import streamlit as st
2
  import os
3
- import requests
4
  import base64
5
  import io
6
- import time
7
  from PIL import Image
8
  from pydub import AudioSegment
9
  import IPython
10
  import soundfile as sf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  from transformers import load_tool, Agent
12
  import torch
13
 
 
14
  class ToolLoader:
15
  def __init__(self, tool_names):
16
- self.tools = [load_tool(tool_name) for tool_name in tool_names]
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  class CustomHfAgent(Agent):
19
  def __init__(self, url_endpoint, token, chat_prompt_template=None, run_prompt_template=None, additional_tools=None, input_params=None):
@@ -56,6 +86,9 @@ def load_tools(tool_names):
56
  tool_names = [
57
  "Chris4K/random-character-tool",
58
  "Chris4K/text-generation-tool",
 
 
 
59
  # Add other tool names as needed
60
  ]
61
 
@@ -100,22 +133,43 @@ if user_message := st.chat_input("Enter message"):
100
 
101
  with st.chat_message("assistant"):
102
  if response is None:
103
- st.warning("The agent's response is None. Please try again.")
104
- elif "emojified_text" in response:
105
- st.markdown(f"Emojified Text: {response['emojified_text']}")
106
  elif isinstance(response, Image.Image):
107
  st.image(response)
108
- elif "audio" in str(response):
109
- audio_data = base64.b64decode(response.split(",")[1])
110
- audio = AudioSegment.from_file(io.BytesIO(audio_data))
111
- st.audio(audio)
112
  elif isinstance(response, AudioSegment):
113
  st.audio(response)
114
- elif isinstance(response, str):
115
- st.markdown(response)
116
  elif isinstance(response, int):
117
  st.markdown(response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  else:
119
- st.warning("Unrecognized response type. Please try again.")
 
120
 
121
  st.session_state.messages.append({"role": "assistant", "content": response})
 
1
  import streamlit as st
2
  import os
 
3
  import base64
4
  import io
 
5
  from PIL import Image
6
  from pydub import AudioSegment
7
  import IPython
8
  import soundfile as sf
9
+ import requests
10
+ import pandas as pd # If you're working with DataFrames
11
+ import matplotlib.figure # If you're using matplotlib figures
12
+
13
+ # For Altair charts
14
+ import streamlit.graphics_altair
15
+ # For Bokeh charts
16
+ import streamlit.graphics_bokeh
17
+
18
+ # For Plotly charts
19
+ import streamlit.graphics_plotly
20
+
21
+ # For Pydeck charts
22
+ import streamlit.graphics_pydeck
23
+ # For Vega-Lite charts
24
+ import streamlit.graphics_vega_lite
25
+
26
+
27
+ import time
28
  from transformers import load_tool, Agent
29
  import torch
30
 
31
+
32
  class ToolLoader:
33
  def __init__(self, tool_names):
34
+ self.tools = self.load_tools(tool_names)
35
+
36
+ def load_tools(self, tool_names):
37
+ loaded_tools = []
38
+ for tool_name in tool_names:
39
+ try:
40
+ tool = load_tool(tool_name)
41
+ loaded_tools.append(tool)
42
+ except Exception as e:
43
+ print(f"Error loading tool '{tool_name}': {e}")
44
+ # Handle the error as needed, e.g., continue with other tools or take corrective action
45
+
46
+ return loaded_tools
47
 
48
  class CustomHfAgent(Agent):
49
  def __init__(self, url_endpoint, token, chat_prompt_template=None, run_prompt_template=None, additional_tools=None, input_params=None):
 
86
  tool_names = [
87
  "Chris4K/random-character-tool",
88
  "Chris4K/text-generation-tool",
89
+ "Chris4K/sentiment-tool",
90
+ "Chris4K/EmojifyTextTool",
91
+
92
  # Add other tool names as needed
93
  ]
94
 
 
133
 
134
  with st.chat_message("assistant"):
135
  if response is None:
136
+ st.warning("The agent's response is None. Please try again. Generate an image of a flying horse.")
 
 
137
  elif isinstance(response, Image.Image):
138
  st.image(response)
 
 
 
 
139
  elif isinstance(response, AudioSegment):
140
  st.audio(response)
 
 
141
  elif isinstance(response, int):
142
  st.markdown(response)
143
+ elif isinstance(response, str):
144
+ if "emojified_text" in response:
145
+ st.markdown(f"{response['emojified_text']}")
146
+ else:
147
+ st.markdown(response)
148
+ elif isinstance(response, list):
149
+ for item in response:
150
+ st.markdown(item) # Assuming the list contains strings
151
+ elif isinstance(response, pd.DataFrame):
152
+ st.dataframe(response)
153
+ elif isinstance(response, pd.Series):
154
+ st.table(response.iloc[0:10])
155
+ elif isinstance(response, dict):
156
+ st.json(response)
157
+ elif isinstance(response, streamlit.graphics_altair.AltairChart):
158
+ st.altair_chart(response)
159
+ elif isinstance(response, streamlit.graphics_bokeh.BokehChart):
160
+ st.bokeh_chart(response)
161
+ elif isinstance(response, streamlit.graphics_graphviz.GraphvizChart):
162
+ st.graphviz_chart(response)
163
+ elif isinstance(response, streamlit.graphics_plotly.PlotlyChart):
164
+ st.plotly_chart(response)
165
+ elif isinstance(response, streamlit.graphics_pydeck.PydeckChart):
166
+ st.pydeck_chart(response)
167
+ elif isinstance(response, matplotlib.figure.Figure):
168
+ st.pyplot(response)
169
+ elif isinstance(response, streamlit.graphics_vega_lite.VegaLiteChart):
170
+ st.vega_lite_chart(response)
171
  else:
172
+ st.warning("Unrecognized response type. Please try again. e.g. Generate an image of a flying horse.")
173
+
174
 
175
  st.session_state.messages.append({"role": "assistant", "content": response})