File size: 15,231 Bytes
fdfb0c4 45f17fe 120d4a1 d7ca359 6cc2332 fb5ba89 6052994 918fcdb 6052994 918fcdb 6052994 b0fad1c 6052994 e55eeac 6052994 e55eeac 6052994 120d4a1 fb5ba89 120d4a1 6052994 89880a6 6052994 b066a4d 7755f96 120d4a1 7755f96 6bab521 fb5ba89 7755f96 fb5ba89 c218a80 7755f96 c218a80 7755f96 c218a80 7755f96 89880a6 7755f96 89880a6 7755f96 788d9fe 89880a6 788d9fe 89880a6 788d9fe 120d4a1 afcb77e 89880a6 120d4a1 e8f0d97 788d9fe e8f0d97 120d4a1 cddf298 58bb7f3 e8f0d97 f259d93 944514e b671676 944514e 1466171 346bac9 f033509 b671676 346bac9 944514e 1466171 f033509 f259d93 f033509 f259d93 89880a6 f033509 944514e 1466171 f033509 b671676 f033509 b671676 f033509 b671676 f033509 b671676 f033509 f259d93 944514e 1466171 f033509 944514e 346bac9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 |
import streamlit as st
import os
import base64
import io
from PIL import Image
from pydub import AudioSegment
import IPython
import soundfile as sf
import requests
import pandas as pd # If you're working with DataFrames
import matplotlib.figure # If you're using matplotlib figures
import numpy as np
# For Altair charts
import altair as alt
# For Bokeh charts
from bokeh.models import Plot
# For Plotly charts
import plotly.express as px
# For Pydeck charts
import pydeck as pdk
import time
from transformers import load_tool, Agent
import torch
class ToolLoader:
def __init__(self, tool_names):
self.tools = self.load_tools(tool_names)
def load_tools(self, tool_names):
loaded_tools = []
for tool_name in tool_names:
try:
tool = load_tool(tool_name)
loaded_tools.append(tool)
except Exception as e:
log_response(f"Error loading tool '{tool_name}': {e}")
return loaded_tools
class CustomHfAgent(Agent):
def __init__(self, url_endpoint, token, chat_prompt_template=None, run_prompt_template=None, additional_tools=None, input_params=None):
super().__init__(
chat_prompt_template=chat_prompt_template,
run_prompt_template=run_prompt_template,
additional_tools=additional_tools,
)
self.url_endpoint = url_endpoint
self.token = token
self.input_params = input_params
def generate_one(self, prompt, stop):
headers = {"Authorization": self.token}
max_new_tokens = self.input_params.get("max_new_tokens", 192)
parameters = {"max_new_tokens": max_new_tokens, "return_full_text": False, "stop": stop, "padding": True, "truncation": True}
inputs = {
"inputs": prompt,
"parameters": parameters,
}
response = requests.post(self.url_endpoint, json=inputs, headers=headers)
if response.status_code == 429:
log_response("Getting rate-limited, waiting a tiny bit before trying again.")
time.sleep(1)
return self._generate_one(prompt)
elif response.status_code != 200:
raise ValueError(f"Errors {inputs} {response.status_code}: {response.json()}")
log_response(response)
result = response.json()[0]["generated_text"]
for stop_seq in stop:
if result.endswith(stop_seq):
return result[: -len(stop_seq)]
return result
def handle_submission(user_message, selected_tools, url_endpoint):
log_response("User input \n {}".format(user_message))
log_response("selected_tools \n {}".format(selected_tools))
log_response("url_endpoint \n {}".format(url_endpoint))
agent = CustomHfAgent(
url_endpoint=url_endpoint,
token=os.environ['HF_token'],
additional_tools=selected_tools,
input_params={"max_new_tokens": 192},
)
response = agent.run(user_message)
log_response("Agent Response\n {}".format(response))
return response
# Declare global variable
global log_enabled
log_enabled = False
def log_response(response):
if log_enabled:
with st.chat_message("ai"):
st.markdown("Agent Response\n {}".format(response))
print(response)
# Define the tool names to load
tool_names = [
"Chris4K/random-character-tool",
"Chris4K/text-generation-tool",
"Chris4K/sentiment-tool",
"Chris4K/token-counter-tool",
"Chris4K/most-downloaded-model",
"Chris4K/rag-tool",
"Chris4K/word-counter-tool",
"Chris4K/sentence-counter-tool",
"Chris4K/EmojifyTextTool",
"Chris4K/NamedEntityRecognitionTool",
"Chris4K/TextDownloadTool",
"Chris4K/source-code-retriever-tool",
"Chris4K/text-to-image",
"Chris4K/text-to-video",
"Chris4K/image-transformation",
"Chris4K/latent-upscaler-tool"
# More cool tools to come
]
# Create tool loader instance
tool_loader = ToolLoader(tool_names)
st.title("Hugging Face Agent and tools")
## LB https://huggingface.co/spaces/qiantong-xu/toolbench-leaderboard
st.markdown("Welcome to the Hugging Face Agent and Tools app! This app allows you to interact with various tools using the Hugging Face API.")
# Create a page with tabs
tabs = st.tabs(["Chat", "URL, Tools and logging", "User Description", "Developers"])
# Tab 1: Chat
with tabs[0]:
# Code for URL and Tools checkboxes
# Examples for the user perspective
st.markdown("Stat to chat. e.g. Generate an image of a boat. This will make the agent use the tool text2image to generate an image.")
# Tab 2: URL and Tools
with tabs[1]:
# Code for URL and Tools checkboxes
# Add a dropdown for selecting the inference URL
url_endpoint = st.selectbox("Select Inference URL", [
"https://api-inference.huggingface.co/models/bigcode/starcoder",
"https://api-inference.huggingface.co/models/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
"https://api-inference.huggingface.co/models/gpt2"
])
# Add a checkbox for enabling logging
log_enabled = st.checkbox("Enable Logging")
tool_checkboxes = [st.checkbox(f"{tool.name} --- {tool.description} ") for tool in tool_loader.tools]
# Tab 3: User Description
with tabs[2]:
# User description content and tool descriptions
# Add a section for the app's description
st.markdown('''
# Hugging Face Agent and Tools App
## Description
Welcome to the Hugging Face Agent and Tools app! This app provides an interactive interface for utilizing various tools through the Hugging Face API. You can choose an inference URL and select from a variety of tools to perform different tasks.
## Examples
1. **Generate a Random Character**:
- Choose the desired URL and the 'Random Character Tool'.
- Then type 'Genarate random Character'
2. **Sentiment Analysis**:
- Choose the desired URL and the 'Sentiment Analysis Tool'.
- Sample: What is the sentiment for "Hello, I am happy"?
3. **Word Count**:
- Choose the desired URL and the 'Word Counter Tool'.
- Sample: Count the words in "Hello, I am Christof".
Other samples:
- Generate a random character.
- What is the sentiment for "Hello I am happy"
- Count the words of "Hello I am Christof”
- What is the most downloaded model for text2image
- Use ner_tool to find the information in the following text:"Hello I am Christof.".
- Download the text from https://docs.streamlit.io/get-started/installation
- Scrape source code from https://docs.streamlit.io/get-started/installation
- label for text="Hello I am Christof" classifies greeting
## Tools
To interact with the tools, expand the section below to see tool descriptions and select the tools you want to use.
Expand to see tool descriptions
### Tool Descriptions
- **random-character-tool:** Generates a random character.
- **text-generation-tool:** Generates text based on a prompt.
- **sentiment-tool:** Analyzes the sentiment of a given text.
- **token-counter-tool:** Counts the tokens in a text.
- **most-downloaded-model:** Provides information about the most downloaded model.
- **rag-tool:** Utilizes Retrieval-Augmented Generation (RAG) for text generation.
- **word-counter-tool:** Counts the words in a text.
- **sentence-counter-tool:** Counts the sentences in a text.
- **EmojifyTextTool:** Emojifies the given text.
- **NamedEntityRecognitionTool:** Identifies named entities in a text.
- **TextDownloadTool:** Downloads text from a given URL.
- **source-code-retriever-tool:** Retrieves source code from a given URL.
- **text-to-image:** Generates an image from text.
- **text-to-video:** Generates a video from text.
- **image-transformation:** Applies transformations to images.
- **latent-upscaler-tool:** Upscales images using latent space.
## Usage
1. Choose the desired inference URL from the dropdown.
2. Expand the tool selection section and choose the tools you want to use.
3. Enter a message in the chat input to interact with the Hugging Face Agent.
4. View the assistant's responses, which may include images, audio, text, or other visualizations based on the selected tools.
Feel free to explore and experiment with different tools to achieve various tasks!
''')
# Tab 4: Developers
with tabs[3]:
# Developer-related content
st.markdown('''
# Hugging Face Agent and Tools Code Overview
## Overview
The provided Python code implements an interactive Streamlit web application that allows users to interact with various tools through the Hugging Face API. The app integrates Hugging Face models and tools, enabling users to perform tasks such as text generation, sentiment analysis, and more.
## Imports
The code imports several external libraries and modules, including:
- `streamlit`: For building the web application.
- `os`: For interacting with the operating system.
- `base64`, `io`, `Image` (from `PIL`), `AudioSegment` (from `pydub`), `IPython`, `sf`: For handling images and audio.
- `requests`: For making HTTP requests.
- `pandas`: For working with DataFrames.
- `matplotlib.figure`, `numpy`: For visualization.
- `altair`, `Plot` (from `bokeh.models`), `px` (from `plotly.express`), `pdk` (from `pydeck`): For different charting libraries.
- `time`: For handling time-related operations.
- `transformers`: For loading tools and agents.
## ToolLoader Class
The `ToolLoader` class is responsible for loading tools based on their names. It has methods to load tools from a list of tool names and handles potential errors during loading.
## CustomHfAgent Class
The `CustomHfAgent` class extends the base `Agent` class from the `transformers` module. It is designed to interact with a remote inference API and includes methods for generating text based on a given prompt.
## Tool Loading and Customization
- Tool names are defined in the `tool_names` list.
- The `ToolLoader` instance (`tool_loader`) loads tools based on the provided names.
- The `CustomHfAgent` instance (`agent`) is created with a specified URL endpoint, token, and additional tools.
- New tools can be added by appending their names to the `tool_names` list.
## Streamlit App
The Streamlit app is structured as follows:
1. Tool selection dropdown for choosing the inference URL.
2. An expander for displaying tool descriptions.
3. An expander for selecting tools.
4. Examples and instructions for the user.
5. A chat interface for user interactions.
6. Handling of user inputs, tool selection, and agent responses.
## Handling of Responses
The code handles various types of responses from the agent, including images, audio, text, DataFrames, and charts. The responses are displayed in the Streamlit app based on their types.
## How to Run
1. Install required dependencies with `pip install -r requirements.txt`.
2. Run the app with `streamlit run <filename.py>`.
## Notes
- The code emphasizes customization and extensibility, allowing developers to easily add new tools and interact with the Hugging Face API.
- Ensure proper configuration, such as setting the Hugging Face token as an environment variable.
''')
# Chat code (user input, agent responses, etc.)
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
with st.chat_message("assistant"):
st.markdown("Hello there! How can I assist you today?")
if user_message := st.chat_input("Enter message"):
st.chat_message("user").markdown(user_message)
st.session_state.messages.append({"role": "user", "content": user_message})
selected_tools = [tool_loader.tools[idx] for idx, checkbox in enumerate(tool_checkboxes) if checkbox]
# Handle submission with the selected inference URL
response = handle_submission(user_message, selected_tools, url_endpoint)
with st.chat_message("assistant"):
if response is None:
st.warning("The agent's response is None. Please try again. Generate an image of a flying horse.")
elif isinstance(response, Image.Image):
st.image(response)
elif isinstance(response, AudioSegment):
st.audio(response)
elif isinstance(response, int):
st.markdown(response)
elif isinstance(response, str):
if "emojified_text" in response:
st.markdown(f"{response['emojified_text']}")
else:
st.markdown(response)
elif isinstance(response, list):
for item in response:
st.markdown(item) # Assuming the list contains strings
elif isinstance(response, pd.DataFrame):
st.dataframe(response)
elif isinstance(response, pd.Series):
st.table(response.iloc[0:10])
elif isinstance(response, dict):
st.json(response)
elif isinstance(response, st.graphics_altair.AltairChart):
st.altair_chart(response)
elif isinstance(response, st.graphics_bokeh.BokehChart):
st.bokeh_chart(response)
elif isinstance(response, st.graphics_graphviz.GraphvizChart):
st.graphviz_chart(response)
elif isinstance(response, st.graphics_plotly.PlotlyChart):
st.plotly_chart(response)
elif isinstance(response, st.graphics_pydeck.PydeckChart):
st.pydeck_chart(response)
elif isinstance(response, matplotlib.figure.Figure):
st.pyplot(response)
elif isinstance(response, streamlit.graphics_vega_lite.VegaLiteChart):
st.vega_lite_chart(response)
else:
st.warning("Unrecognized response type. Please try again. e.g. Generate an image of a flying horse.")
st.session_state.messages.append({"role": "assistant", "content": response})
|