Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,6 +4,13 @@ import torch
|
|
4 |
import gradio as gr
|
5 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
# Load model and tokenizer
|
8 |
model_name = "Salesforce/xLAM-7b-r"
|
9 |
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype="auto", trust_remote_code=True)
|
@@ -87,7 +94,7 @@ iface = gr.Interface(
|
|
87 |
inputs=[
|
88 |
gr.Textbox(
|
89 |
label="Available Tools (JSON format)",
|
90 |
-
lines=
|
91 |
value=json.dumps([
|
92 |
{
|
93 |
"name": "get_weather",
|
@@ -126,9 +133,9 @@ iface = gr.Interface(
|
|
126 |
),
|
127 |
gr.Textbox(label="User Query", lines=2, value="What's the weather like in New York in fahrenheit?")
|
128 |
],
|
129 |
-
outputs=gr.Textbox(label="Generated Response", lines=
|
130 |
-
title=
|
131 |
-
description=
|
132 |
)
|
133 |
|
134 |
if __name__ == "__main__":
|
|
|
4 |
import gradio as gr
|
5 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
6 |
|
7 |
+
title = """# 🙋🏻♂️ Welcome to Tonic's Salesforce/Xlam-7B-r"""
|
8 |
+
description = """
|
9 |
+
Large Action Models (LAMs) are advanced large language models designed to enhance decision-making and translate user intentions into executable actions that interact with the world. LAMs autonomously plan and execute tasks to achieve specific goals, serving as the brains of AI agents. They have the potential to automate workflow processes across various domains, making them invaluable for a wide range of applications.
|
10 |
+
### Join us :
|
11 |
+
🌟TeamTonic🌟 is always making cool demos! Join our active builder's 🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface:[MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to🌟 [Build Tonic](https://git.tonic-ai.com/)🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
|
12 |
+
"""
|
13 |
+
|
14 |
# Load model and tokenizer
|
15 |
model_name = "Salesforce/xLAM-7b-r"
|
16 |
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype="auto", trust_remote_code=True)
|
|
|
94 |
inputs=[
|
95 |
gr.Textbox(
|
96 |
label="Available Tools (JSON format)",
|
97 |
+
lines=20,
|
98 |
value=json.dumps([
|
99 |
{
|
100 |
"name": "get_weather",
|
|
|
133 |
),
|
134 |
gr.Textbox(label="User Query", lines=2, value="What's the weather like in New York in fahrenheit?")
|
135 |
],
|
136 |
+
outputs=gr.Textbox(label="Generated Response", lines=5),
|
137 |
+
title=title,
|
138 |
+
description=description,
|
139 |
)
|
140 |
|
141 |
if __name__ == "__main__":
|