File size: 2,467 Bytes
fdfb0c4
bd2daac
b066a4d
 
3709e0d
 
 
 
bef5d49
0bde6fd
2371ba5
 
 
 
4dc413a
 
 
b066a4d
 
 
 
 
 
 
bb77c30
 
b066a4d
 
 
 
 
 
 
 
 
 
 
6c04e26
 
b066a4d
 
 
 
 
 
 
 
19b814d
337b12f
0bde6fd
337b12f
b066a4d
bb77c30
f8cb833
b066a4d
 
bb77c30
 
b066a4d
 
8f6fc3b
 
f8cb833
b066a4d
8f6fc3b
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import streamlit as st
#from transformers import BertModel, BertTokenizer
from transformers import HfAgent, load_tool

import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, LocalAgent


checkpoint = "THUDM/agentlm-7b"
model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map="auto", torch_dtype=torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(checkpoint)

agent = LocalAgent(model, tokenizer)
agent.run("Draw me a picture of rivers and lakes.")

print(agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!"))

# Load tools
controlnet_transformer = load_tool("huggingface-tools/text-to-image")
upscaler = load_tool("diffusers/latent-upscaler-tool")

tools = [controlnet_transformer, upscaler ]

# Define the model and tokenizer
#model = BertModel.from_pretrained('bert-base-uncased')
#tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')

# Create the Streamlit app
st.title("Hugging Face Agent")

# Input field for the user's message
message_input = st.text_input("Enter your message:", "")

# Checkboxes for the tools to be used by the agent
tool_checkboxes = [st.checkbox(f"Use {tool}") for tool in tools]

# Submit button
#submit_button = st.button("Submit")


# Define the callback function to handle the form submission
def handle_submission():
    # Get the user's message and the selected tools
    message = message_input
    selected_tools = [tool for tool, checkbox in zip(tools, tool_checkboxes) if checkbox]

    # Initialize the agent with the selected tools
    #agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder", additional_tools=tools)
    #agent = HfAgent("https://api-inference.huggingface.co/models/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", additional_tools=tools)
    agent = HfAgent("https://api-inference.huggingface.co/models/THUDM/agentlm-7b", additional_tools=tools)
    

 #   agent.config.tokenizer = tokenizer
  #  agent.config.tools = selected_tools

    # Process the user's message
   # inputs = tokenizer.encode_plus(message, add_special_tokens=True, return_tensors="pt")
   # outputs = agent(inputs['input_ids'], attention_mask=inputs['attention_mask'])

    # Display the agent's response
    response = agent.run(message)
    st.text(f"{response:.4f}")
    return "done"


# Add the callback function to the Streamlit app
submit_button = st.button("Submit", on_click=handle_submission)