import streamlit as st #from transformers import BertModel, BertTokenizer from transformers import HfAgent, load_tool import torch from transformers import AutoModelForCausalLM, AutoTokenizer, LocalAgent checkpoint = "THUDM/agentlm-7b" model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map="auto", torch_dtype=torch.bfloat16) tokenizer = AutoTokenizer.from_pretrained(checkpoint) agent = LocalAgent(model, tokenizer) agent.run("Draw me a picture of rivers and lakes.") print(agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!")) # Load tools controlnet_transformer = load_tool("huggingface-tools/text-to-image") upscaler = load_tool("diffusers/latent-upscaler-tool") tools = [controlnet_transformer, upscaler ] # Define the model and tokenizer #model = BertModel.from_pretrained('bert-base-uncased') #tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') # Create the Streamlit app st.title("Hugging Face Agent") # Input field for the user's message message_input = st.text_input("Enter your message:", "") # Checkboxes for the tools to be used by the agent tool_checkboxes = [st.checkbox(f"Use {tool}") for tool in tools] # Submit button #submit_button = st.button("Submit") # Define the callback function to handle the form submission def handle_submission(): # Get the user's message and the selected tools message = message_input selected_tools = [tool for tool, checkbox in zip(tools, tool_checkboxes) if checkbox] # Initialize the agent with the selected tools #agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder", additional_tools=tools) #agent = HfAgent("https://api-inference.huggingface.co/models/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", additional_tools=tools) agent = HfAgent("https://api-inference.huggingface.co/models/THUDM/agentlm-7b", additional_tools=tools) # agent.config.tokenizer = tokenizer # agent.config.tools = selected_tools # Process the user's message # inputs = tokenizer.encode_plus(message, add_special_tokens=True, return_tensors="pt") # outputs = agent(inputs['input_ids'], attention_mask=inputs['attention_mask']) # Display the agent's response response = agent.run(message) st.text(f"{response:.4f}") return "done" # Add the callback function to the Streamlit app submit_button = st.button("Submit", on_click=handle_submission)