|
import streamlit as st |
|
|
|
from transformers import HfAgent, load_tool |
|
|
|
import torch |
|
from transformers import AutoModelForCausalLM, AutoTokenizer, LocalAgent |
|
|
|
|
|
checkpoint = "THUDM/agentlm-7b" |
|
model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map="auto", torch_dtype=torch.bfloat16) |
|
tokenizer = AutoTokenizer.from_pretrained(checkpoint) |
|
|
|
agent = LocalAgent(model, tokenizer) |
|
agent.run("Draw me a picture of rivers and lakes.") |
|
|
|
print(agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!")) |
|
|
|
|
|
controlnet_transformer = load_tool("huggingface-tools/text-to-image") |
|
upscaler = load_tool("diffusers/latent-upscaler-tool") |
|
|
|
tools = [controlnet_transformer, upscaler ] |
|
|
|
|
|
|
|
|
|
|
|
|
|
st.title("Hugging Face Agent") |
|
|
|
|
|
message_input = st.text_input("Enter your message:", "") |
|
|
|
|
|
tool_checkboxes = [st.checkbox(f"Use {tool}") for tool in tools] |
|
|
|
|
|
|
|
|
|
|
|
|
|
def handle_submission(): |
|
|
|
message = message_input |
|
selected_tools = [tool for tool, checkbox in zip(tools, tool_checkboxes) if checkbox] |
|
|
|
|
|
|
|
|
|
agent = HfAgent("https://api-inference.huggingface.co/models/THUDM/agentlm-7b", additional_tools=tools) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
response = agent.run(message) |
|
st.text(f"{response:.4f}") |
|
return "done" |
|
|
|
|
|
|
|
submit_button = st.button("Submit", on_click=handle_submission) |
|
|
|
|