Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,15 +4,11 @@ import torch
|
|
4 |
import gradio as gr
|
5 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
6 |
|
7 |
-
title = """
|
8 |
-
description = """
|
9 |
-
🎬 Large Action Models (LAMs) are advanced large language models designed to enhance decision-making and translate user intentions into executable actions that interact with the world. LAMs autonomously plan and execute tasks to achieve specific goals, serving as the brains of AI agents. They have the potential to automate workflow processes across various domains, making them invaluable for a wide range of applications.Check our the Salesforce/xLAM models : [🤗 xLAM-1b-fc-r](https://huggingface.co/Salesforce/xLAM-1b-fc-r) | [🤗 xLAM-1b-fc-r-GGUF](https://huggingface.co/Salesforce/xLAM-1b-fc-r-gguf) [🤗 xLAM-7b-fc-r](https://huggingface.co/Salesforce/xLAM-7b-fc-r) | [🤗 xLAM-7b-fc-r-GGUF](https://huggingface.co/Salesforce/xLAM-7b-fc-r-gguf) [🤗 xLAM-7b-r ](https://huggingface.co/Salesforce/xLAM-7b-r) | [🤗 xLAM-8x7b-r](https://huggingface.co/Salesforce/xLAM-8x7b-r) [🤗 xLAM-8x22b-r](https://huggingface.co/Salesforce/xLAM-8x22b-r) |
|
10 |
-
### Join us :
|
11 |
-
🌟TeamTonic🌟 is always making cool demos! Join our active builder's 🛠️community 👻 [](https://discord.gg/GWpVpekp) On 🤗Huggingface:[MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to🌟 [Build Tonic](https://git.tonic-ai.com/)🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
|
12 |
-
"""
|
13 |
|
14 |
# Load model and tokenizer
|
15 |
-
model_name = "Salesforce/xLAM-
|
16 |
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype="auto", trust_remote_code=True)
|
17 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
18 |
|
@@ -133,14 +129,6 @@ with gr.Blocks() as demo:
|
|
133 |
output = gr.Code(label="🎬 xLam :", lines=10, language="json")
|
134 |
|
135 |
submit_button.click(generate_response, inputs=[tools_input, query_input], outputs=output)
|
136 |
-
|
137 |
-
gr.Examples(
|
138 |
-
examples=[
|
139 |
-
[example_tools, "What's the weather like in San Francisco in celsius?"],
|
140 |
-
[example_tools, "Search for the latest news on artificial intelligence"],
|
141 |
-
],
|
142 |
-
inputs=[tools_input, query_input],
|
143 |
-
)
|
144 |
|
145 |
if __name__ == "__main__":
|
146 |
demo.launch()
|
|
|
4 |
import gradio as gr
|
5 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
6 |
|
7 |
+
title = """"""
|
8 |
+
description = """"""
|
|
|
|
|
|
|
|
|
9 |
|
10 |
# Load model and tokenizer
|
11 |
+
model_name = "Salesforce/xLAM-8x7b-r"
|
12 |
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype="auto", trust_remote_code=True)
|
13 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
14 |
|
|
|
129 |
output = gr.Code(label="🎬 xLam :", lines=10, language="json")
|
130 |
|
131 |
submit_button.click(generate_response, inputs=[tools_input, query_input], outputs=output)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
|
133 |
if __name__ == "__main__":
|
134 |
demo.launch()
|