Chris4K commited on
Commit
1c5a6c6
·
verified ·
1 Parent(s): 17e8776

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -377
app.py CHANGED
@@ -1,155 +1,20 @@
1
  import streamlit as st
2
- import os
3
- import base64
4
- import io
5
- from PIL import Image
6
- from pydub import AudioSegment
7
- import IPython
8
- import soundfile as sf
9
- import requests
10
- import pandas as pd # If you're working with DataFrames
11
- import matplotlib.figure # If you're using matplotlib figures
12
- import numpy as np
13
 
14
- # For Altair charts
15
- import altair as alt
 
 
 
 
 
16
 
17
- # For Bokeh charts
18
- from bokeh.models import Plot
19
-
20
- # For Plotly charts
21
- import plotly.express as px
22
-
23
- # For Pydeck charts
24
- import pydeck as pdk
25
-
26
-
27
- import logging
28
- import streamlit as st
29
- from transformers import load_tool, Agent
30
-
31
- # Configure the logging settings for transformers
32
- transformers_logger = logging.getLogger("transformers.file_utils")
33
- transformers_logger.setLevel(logging.INFO) # Set the desired logging level
34
-
35
-
36
-
37
-
38
-
39
-
40
-
41
-
42
-
43
-
44
-
45
-
46
-
47
- import time
48
- from transformers import load_tool, Agent
49
- import torch
50
-
51
- class ToolLoader:
52
- def __init__(self, tool_names):
53
- self.tools = self.load_tools(tool_names)
54
-
55
- def load_tools(self, tool_names):
56
- loaded_tools = []
57
- for tool_name in tool_names:
58
- try:
59
- tool = load_tool(tool_name)
60
- loaded_tools.append(tool)
61
- except Exception as e:
62
- log_response(f"Error loading tool '{tool_name}': {e}")
63
- return loaded_tools
64
-
65
- class CustomHfAgent(Agent):
66
- def __init__(self, url_endpoint, token, chat_prompt_template=None, run_prompt_template=None, additional_tools=None, input_params=None):
67
- super().__init__(
68
- chat_prompt_template=chat_prompt_template,
69
- run_prompt_template=run_prompt_template,
70
- additional_tools=additional_tools,
71
- )
72
- self.url_endpoint = url_endpoint
73
- self.token = token
74
- self.input_params = input_params
75
-
76
- def generate_one(self, prompt, stop):
77
- headers = {"Authorization": self.token}
78
- max_new_tokens = self.input_params.get("max_new_tokens", 192)
79
- parameters = {"max_new_tokens": max_new_tokens, "return_full_text": False, "stop": stop, "padding": True, "truncation": True}
80
- inputs = {
81
- "inputs": prompt,
82
- "parameters": parameters,
83
- }
84
- response = requests.post(self.url_endpoint, json=inputs, headers=headers)
85
-
86
- if response.status_code == 429:
87
- log_response("Getting rate-limited, waiting a tiny bit before trying again.")
88
- time.sleep(1)
89
- return self._generate_one(prompt)
90
- elif response.status_code != 200:
91
- raise ValueError(f"Errors {inputs} {response.status_code}: {response.json()}")
92
- log_response(response)
93
- result = response.json()[0]["generated_text"]
94
- for stop_seq in stop:
95
- if result.endswith(stop_seq):
96
- return result[: -len(stop_seq)]
97
- return result
98
-
99
- def handle_submission(user_message, selected_tools, url_endpoint):
100
-
101
- log_response("User input \n {}".format(user_message))
102
- log_response("selected_tools \n {}".format(selected_tools))
103
- log_response("url_endpoint \n {}".format(url_endpoint))
104
-
105
- agent = CustomHfAgent(
106
- url_endpoint=url_endpoint,
107
- token=os.environ['HF_token'],
108
- additional_tools=selected_tools,
109
- input_params={"max_new_tokens": 192},
110
- )
111
-
112
- response = agent.run(user_message)
113
-
114
- log_response("Agent Response\n {}".format(response))
115
-
116
- return response
117
-
118
- # Declare global variable
119
- global log_enabled
120
  log_enabled = False
 
121
 
122
- def log_response(response):
123
- if log_enabled:
124
- with st.chat_message("ai"):
125
- st.markdown("Agent Response\n {}".format(response))
126
- print(response)
127
-
128
-
129
- # Define the tool names to load
130
- tool_names = [
131
- "Chris4K/random-character-tool",
132
- "Chris4K/text-generation-tool",
133
- "Chris4K/sentiment-tool",
134
- "Chris4K/token-counter-tool",
135
- "Chris4K/most-downloaded-model",
136
- "Chris4K/rag-tool",
137
- "Chris4K/word-counter-tool",
138
- "Chris4K/sentence-counter-tool",
139
- "Chris4K/EmojifyTextTool",
140
- "Chris4K/NamedEntityRecognitionTool",
141
- "Chris4K/TextDownloadTool",
142
- "Chris4K/source-code-retriever-tool",
143
- "Chris4K/text-to-image",
144
- "Chris4K/text-to-video",
145
- "Chris4K/image-transformation",
146
- "Chris4K/latent-upscaler-tool"
147
- # More cool tools to come
148
- ]
149
-
150
- # Create tool loader instance
151
- tool_loader = ToolLoader(tool_names)
152
-
153
  st.title("Hugging Face Agent and tools")
154
 
155
  ## LB https://huggingface.co/spaces/qiantong-xu/toolbench-leaderboard
@@ -163,243 +28,22 @@ tabs = st.tabs(["Chat", "URL, Tools and logging", "User Description", "Developer
163
  with tabs[0]:
164
 
165
  # Code for URL and Tools checkboxes
166
-
167
  # Examples for the user perspective
168
  st.markdown("Stat to chat. e.g. Generate an image of a boat. This will make the agent use the tool text2image to generate an image.")
169
 
170
-
171
  # Tab 2: URL and Tools
172
  with tabs[1]:
173
-
174
- # Code for URL and Tools checkboxes
175
-
176
-
177
- # Add a dropdown for selecting the inference URL
178
- url_endpoint = st.selectbox("Select Inference URL", [
179
- "https://api-inference.huggingface.co/models/bigcode/starcoder",
180
- "https://api-inference.huggingface.co/models/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
181
- "https://api-inference.huggingface.co/models/gpt2"
182
- ])
183
-
184
- # Add a checkbox for enabling logging
185
- log_enabled = st.checkbox("Enable Logging")
186
-
187
- tool_checkboxes = [st.checkbox(f"{tool.name} --- {tool.description} ") for tool in tool_loader.tools]
188
 
189
  # Tab 3: User Description
190
  with tabs[2]:
191
-
192
- # User description content and tool descriptions
193
- # Add a section for the app's description
194
- st.markdown('''
195
- # Hugging Face Agent and Tools App
196
-
197
- ## Description
198
- Welcome to the Hugging Face Agent and Tools app! This app provides an interactive interface for utilizing various tools through the Hugging Face API. You can choose an inference URL and select from a variety of tools to perform different tasks.
199
-
200
- ## Examples
201
- 1. **Generate a Random Character**:
202
- - Choose the desired URL and the 'Random Character Tool'.
203
- - Then type 'Genarate random Character'
204
-
205
- 2. **Sentiment Analysis**:
206
- - Choose the desired URL and the 'Sentiment Analysis Tool'.
207
- - Sample: What is the sentiment for "Hello, I am happy"?
208
-
209
- 3. **Word Count**:
210
- - Choose the desired URL and the 'Word Counter Tool'.
211
- - Sample: Count the words in "Hello, I am Christof".
212
-
213
- Other samples:
214
-
215
- - Generate a random character.
216
- - What is the sentiment for "Hello I am happy"
217
- - Count the words of "Hello I am Christof”
218
- - What is the most downloaded model for text2image
219
- - Use ner_tool to find the information in the following text:"Hello I am Christof.".
220
- - Download the text from https://docs.streamlit.io/get-started/installation
221
- - Scrape source code from https://docs.streamlit.io/get-started/installation
222
- - label for text="Hello I am Christof" classifies greeting
223
-
224
-
225
- ## Tools
226
- To interact with the tools, expand the section below to see tool descriptions and select the tools you want to use.
227
-
228
- Expand to see tool descriptions
229
-
230
- ### Tool Descriptions
231
- - **random-character-tool:** Generates a random character.
232
- - **text-generation-tool:** Generates text based on a prompt.
233
- - **sentiment-tool:** Analyzes the sentiment of a given text.
234
- - **token-counter-tool:** Counts the tokens in a text.
235
- - **most-downloaded-model:** Provides information about the most downloaded model.
236
- - **rag-tool:** Utilizes Retrieval-Augmented Generation (RAG) for text generation.
237
- - **word-counter-tool:** Counts the words in a text.
238
- - **sentence-counter-tool:** Counts the sentences in a text.
239
- - **EmojifyTextTool:** Emojifies the given text.
240
- - **NamedEntityRecognitionTool:** Identifies named entities in a text.
241
- - **TextDownloadTool:** Downloads text from a given URL.
242
- - **source-code-retriever-tool:** Retrieves source code from a given URL.
243
- - **text-to-image:** Generates an image from text.
244
- - **text-to-video:** Generates a video from text.
245
- - **image-transformation:** Applies transformations to images.
246
- - **latent-upscaler-tool:** Upscales images using latent space.
247
-
248
-
249
- ## Usage
250
- 1. Choose the desired inference URL from the dropdown.
251
- 2. Expand the tool selection section and choose the tools you want to use.
252
- 3. Enter a message in the chat input to interact with the Hugging Face Agent.
253
- 4. View the assistant's responses, which may include images, audio, text, or other visualizations based on the selected tools.
254
-
255
- Feel free to explore and experiment with different tools to achieve various tasks!
256
-
257
- ''')
258
-
259
  # Tab 4: Developers
260
  with tabs[3]:
261
-
262
- # Developer-related content
263
- st.markdown('''
264
-
265
- # Hugging Face Agent and Tools Code Overview
266
-
267
- ## Overview
268
- The provided Python code implements an interactive Streamlit web application that allows users to interact with various tools through the Hugging Face API. The app integrates Hugging Face models and tools, enabling users to perform tasks such as text generation, sentiment analysis, and more.
269
-
270
- ## Imports
271
- The code imports several external libraries and modules, including:
272
- - `streamlit`: For building the web application.
273
- - `os`: For interacting with the operating system.
274
- - `base64`, `io`, `Image` (from `PIL`), `AudioSegment` (from `pydub`), `IPython`, `sf`: For handling images and audio.
275
- - `requests`: For making HTTP requests.
276
- - `pandas`: For working with DataFrames.
277
- - `matplotlib.figure`, `numpy`: For visualization.
278
- - `altair`, `Plot` (from `bokeh.models`), `px` (from `plotly.express`), `pdk` (from `pydeck`): For different charting libraries.
279
- - `time`: For handling time-related operations.
280
- - `transformers`: For loading tools and agents.
281
-
282
- ## ToolLoader Class
283
- The `ToolLoader` class is responsible for loading tools based on their names. It has methods to load tools from a list of tool names and handles potential errors during loading.
284
-
285
- ## CustomHfAgent Class
286
- The `CustomHfAgent` class extends the base `Agent` class from the `transformers` module. It is designed to interact with a remote inference API and includes methods for generating text based on a given prompt.
287
-
288
- ## Tool Loading and Customization
289
- - Tool names are defined in the `tool_names` list.
290
- - The `ToolLoader` instance (`tool_loader`) loads tools based on the provided names.
291
- - The `CustomHfAgent` instance (`agent`) is created with a specified URL endpoint, token, and additional tools.
292
- - New tools can be added by appending their names to the `tool_names` list.
293
-
294
- ## Streamlit App
295
- The Streamlit app is structured as follows:
296
- 1. Tool selection dropdown for choosing the inference URL.
297
- 2. An expander for displaying tool descriptions.
298
- 3. An expander for selecting tools.
299
- 4. Examples and instructions for the user.
300
- 5. A chat interface for user interactions.
301
- 6. Handling of user inputs, tool selection, and agent responses.
302
-
303
- ## Handling of Responses
304
- The code handles various types of responses from the agent, including images, audio, text, DataFrames, and charts. The responses are displayed in the Streamlit app based on their types.
305
-
306
- ## How to Run
307
- 1. Install required dependencies with `pip install -r requirements.txt`.
308
- 2. Run the app with `streamlit run <filename.py>`.
309
-
310
- ## Notes
311
- - The code emphasizes customization and extensibility, allowing developers to easily add new tools and interact with the Hugging Face API.
312
- - Ensure proper configuration, such as setting the Hugging Face token as an environment variable.
313
-
314
- ''')
315
-
316
-
317
- # Display logs in the frontend
318
- logs_expander = st.expander("Logs")
319
- with logs_expander:
320
- log_output = st.empty()
321
-
322
- # Custom logging handler to append log messages to the chat
323
- class ChatHandler(logging.Handler):
324
- def __init__(self):
325
- super().__init__()
326
-
327
- def emit(self, record):
328
- log_message = self.format(record)
329
- with st.chat_message("ai"):
330
- st.markdown(f"Log: {log_message}")
331
-
332
- # Add the custom handler to the transformers_logger
333
- chat_handler = ChatHandler()
334
- transformers_logger.addHandler(chat_handler)
335
-
336
- # Function to update logs in the frontend
337
- def update_logs():
338
- log_output.code("") # Clear previous logs
339
- # Do nothing here since logs are appended to the chat
340
-
341
- # Update logs when the button is clicked
342
- if st.button("Update Logs"):
343
- update_logs()
344
-
345
-
346
- # Chat code (user input, agent responses, etc.)
347
- if "messages" not in st.session_state:
348
- st.session_state.messages = []
349
-
350
- for message in st.session_state.messages:
351
- with st.chat_message(message["role"]):
352
- st.markdown(message["content"])
353
-
354
- with st.chat_message("assistant"):
355
- st.markdown("Hello there! How can I assist you today?")
356
-
357
- if user_message := st.chat_input("Enter message"):
358
- st.chat_message("user").markdown(user_message)
359
- st.session_state.messages.append({"role": "user", "content": user_message})
360
-
361
- selected_tools = [tool_loader.tools[idx] for idx, checkbox in enumerate(tool_checkboxes) if checkbox]
362
- # Handle submission with the selected inference URL
363
- response = handle_submission(user_message, selected_tools, url_endpoint)
364
-
365
- with st.chat_message("assistant"):
366
- if response is None:
367
- st.warning("The agent's response is None. Please try again. Generate an image of a flying horse.")
368
- elif isinstance(response, Image.Image):
369
- st.image(response)
370
- elif isinstance(response, AudioSegment):
371
- st.audio(response)
372
- elif isinstance(response, int):
373
- st.markdown(response)
374
- elif isinstance(response, str):
375
- if "emojified_text" in response:
376
- st.markdown(f"{response['emojified_text']}")
377
- else:
378
- st.markdown(response)
379
- elif isinstance(response, list):
380
- for item in response:
381
- st.markdown(item) # Assuming the list contains strings
382
- elif isinstance(response, pd.DataFrame):
383
- st.dataframe(response)
384
- elif isinstance(response, pd.Series):
385
- st.table(response.iloc[0:10])
386
- elif isinstance(response, dict):
387
- st.json(response)
388
- elif isinstance(response, st.graphics_altair.AltairChart):
389
- st.altair_chart(response)
390
- elif isinstance(response, st.graphics_bokeh.BokehChart):
391
- st.bokeh_chart(response)
392
- elif isinstance(response, st.graphics_graphviz.GraphvizChart):
393
- st.graphviz_chart(response)
394
- elif isinstance(response, st.graphics_plotly.PlotlyChart):
395
- st.plotly_chart(response)
396
- elif isinstance(response, st.graphics_pydeck.PydeckChart):
397
- st.pydeck_chart(response)
398
- elif isinstance(response, matplotlib.figure.Figure):
399
- st.pyplot(response)
400
- elif isinstance(response, streamlit.graphics_vega_lite.VegaLiteChart):
401
- st.vega_lite_chart(response)
402
- else:
403
- st.warning("Unrecognized response type. Please try again. e.g. Generate an image of a flying horse.")
404
-
405
- st.session_state.messages.append({"role": "assistant", "content": response})
 
1
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
2
 
3
+ from tool_loader import ToolLoader
4
+ from app_agent_config import app_agent_config
5
+ from app_user_desc import app_user_desc
6
+ from app_dev_desc import app_dev_desc
7
+ from logger import log_response
8
+ from logger import log_enabled
9
+ from app_chat import app_chat
10
 
11
+ tool_checkboxes = []
12
+ url_endpoint = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  log_enabled = False
14
+ #from transformers import load_tool, Agent
15
 
16
+ # Declare global variable
17
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  st.title("Hugging Face Agent and tools")
19
 
20
  ## LB https://huggingface.co/spaces/qiantong-xu/toolbench-leaderboard
 
28
  with tabs[0]:
29
 
30
  # Code for URL and Tools checkboxes
31
+ #chat_description()
32
  # Examples for the user perspective
33
  st.markdown("Stat to chat. e.g. Generate an image of a boat. This will make the agent use the tool text2image to generate an image.")
34
 
 
35
  # Tab 2: URL and Tools
36
  with tabs[1]:
37
+ #
38
+ app_agent_config()
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
  # Tab 3: User Description
41
  with tabs[2]:
42
+ #
43
+ app_user_desc()
44
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  # Tab 4: Developers
46
  with tabs[3]:
47
+ app_dev_desc()
48
+
49
+ app_chat()