|
import streamlit as st |
|
import os |
|
import base64 |
|
import io |
|
from PIL import Image |
|
from pydub import AudioSegment |
|
import IPython |
|
import soundfile as sf |
|
import requests |
|
import pandas as pd |
|
import matplotlib.figure |
|
import numpy as np |
|
|
|
from custom_agent import CustomHfAgent |
|
from tool_loader import ToolLoader |
|
from tool_config import tool_names |
|
from app_description import show_app_description |
|
from logger import log_response |
|
|
|
|
|
import altair as alt |
|
|
|
|
|
from bokeh.models import Plot |
|
|
|
|
|
import plotly.express as px |
|
|
|
|
|
import pydeck as pdk |
|
|
|
|
|
import logging |
|
import streamlit as st |
|
from transformers import load_tool, Agent |
|
from tool_loader import ToolLoader |
|
|
|
|
|
transformers_logger = logging.getLogger("transformers.file_utils") |
|
transformers_logger.setLevel(logging.INFO) |
|
|
|
|
|
import time |
|
import torch |
|
|
|
|
|
def handle_submission(user_message, selected_tools, url_endpoint): |
|
|
|
log_response("User input \n {}".format(user_message)) |
|
log_response("selected_tools \n {}".format(selected_tools)) |
|
log_response("url_endpoint \n {}".format(url_endpoint)) |
|
|
|
agent = CustomHfAgent( |
|
url_endpoint=url_endpoint, |
|
token=os.environ['HF_token'], |
|
additional_tools=selected_tools, |
|
input_params={"max_new_tokens": 192}, |
|
) |
|
|
|
response = agent.run(user_message) |
|
|
|
log_response("Agent Response\n {}".format(response)) |
|
|
|
return response |
|
|
|
|
|
global log_enabled |
|
log_enabled = False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tool_loader = ToolLoader(tool_names) |
|
|
|
st.title("Hugging Face Agent and tools") |
|
|
|
|
|
|
|
st.markdown("Welcome to the Hugging Face Agent and Tools app! This app allows you to interact with various tools using the Hugging Face API.") |
|
|
|
|
|
tabs = st.tabs(["Chat", "URL, Tools and logging", "User Description", "Developers"]) |
|
|
|
|
|
with tabs[0]: |
|
|
|
|
|
|
|
|
|
st.markdown("Stat to chat. e.g. Generate an image of a boat. This will make the agent use the tool text2image to generate an image.") |
|
|
|
|
|
|
|
with tabs[1]: |
|
|
|
app_config() |
|
|
|
|
|
with tabs[2]: |
|
|
|
app_user_description() |
|
|
|
|
|
|
|
|
|
with tabs[3]: |
|
app_dev_desc() |
|
|
|
|
|
|
|
|
|
if "messages" not in st.session_state: |
|
st.session_state.messages = [] |
|
|
|
for message in st.session_state.messages: |
|
with st.chat_message(message["role"]): |
|
st.markdown(message["content"]) |
|
|
|
with st.chat_message("assistant"): |
|
st.markdown("Hello there! How can I assist you today?") |
|
|
|
if user_message := st.chat_input("Enter message"): |
|
st.chat_message("user").markdown(user_message) |
|
st.session_state.messages.append({"role": "user", "content": user_message}) |
|
|
|
selected_tools = [tool_loader.tools[idx] for idx, checkbox in enumerate(tool_checkboxes) if checkbox] |
|
|
|
response = handle_submission(user_message, selected_tools, url_endpoint) |
|
|
|
with st.chat_message("assistant"): |
|
if response is None: |
|
st.warning("The agent's response is None. Please try again. Generate an image of a flying horse.") |
|
elif isinstance(response, Image.Image): |
|
st.image(response) |
|
elif isinstance(response, AudioSegment): |
|
st.audio(response) |
|
elif isinstance(response, int): |
|
st.markdown(response) |
|
elif isinstance(response, str): |
|
if "emojified_text" in response: |
|
st.markdown(f"{response['emojified_text']}") |
|
else: |
|
st.markdown(response) |
|
elif isinstance(response, list): |
|
for item in response: |
|
st.markdown(item) |
|
elif isinstance(response, pd.DataFrame): |
|
st.dataframe(response) |
|
elif isinstance(response, pd.Series): |
|
st.table(response.iloc[0:10]) |
|
elif isinstance(response, dict): |
|
st.json(response) |
|
elif isinstance(response, st.graphics_altair.AltairChart): |
|
st.altair_chart(response) |
|
elif isinstance(response, st.graphics_bokeh.BokehChart): |
|
st.bokeh_chart(response) |
|
elif isinstance(response, st.graphics_graphviz.GraphvizChart): |
|
st.graphviz_chart(response) |
|
elif isinstance(response, st.graphics_plotly.PlotlyChart): |
|
st.plotly_chart(response) |
|
elif isinstance(response, st.graphics_pydeck.PydeckChart): |
|
st.pydeck_chart(response) |
|
elif isinstance(response, matplotlib.figure.Figure): |
|
st.pyplot(response) |
|
elif isinstance(response, streamlit.graphics_vega_lite.VegaLiteChart): |
|
st.vega_lite_chart(response) |
|
else: |
|
st.warning("Unrecognized response type. Please try again. e.g. Generate an image of a flying horse.") |
|
|
|
st.session_state.messages.append({"role": "assistant", "content": response}) |
|
|
|
|