Spaces:
Runtime error
Runtime error
Update Llama 3.1 8B robot planning space with improvements
Browse files- README.md +2 -2
- app.py +34 -26
- requirements.txt +1 -2
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: 🤖
|
|
4 |
colorFrom: blue
|
5 |
colorTo: green
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: llama3.1
|
@@ -68,7 +68,7 @@ Convert natural language commands into structured task sequences for constructio
|
|
68 |
- **Architecture**: Llama 3.1 8B + QLoRA adapters
|
69 |
- **Quantization**: 4-bit (NF4) with double quantization
|
70 |
- **Framework**: Transformers + PEFT + BitsAndBytesConfig
|
71 |
-
- **Interface**: Gradio
|
72 |
- **Hardware**: T4-MEDIUM (16GB VRAM)
|
73 |
|
74 |
## ⚡ Performance Notes
|
|
|
4 |
colorFrom: blue
|
5 |
colorTo: green
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.32.2
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: llama3.1
|
|
|
68 |
- **Architecture**: Llama 3.1 8B + QLoRA adapters
|
69 |
- **Quantization**: 4-bit (NF4) with double quantization
|
70 |
- **Framework**: Transformers + PEFT + BitsAndBytesConfig
|
71 |
+
- **Interface**: Gradio 4.32.2 (stable version)
|
72 |
- **Hardware**: T4-MEDIUM (16GB VRAM)
|
73 |
|
74 |
## ⚡ Performance Notes
|
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
@@ -135,21 +136,24 @@ def chat_interface(message, history, max_tokens, temperature, top_p):
|
|
135 |
|
136 |
try:
|
137 |
response = generate_response(message, max_tokens, temperature, top_p)
|
138 |
-
history.append(
|
139 |
return history, ""
|
140 |
except Exception as chat_error:
|
141 |
error_msg = f"❌ Chat Error: {str(chat_error)}"
|
142 |
-
history.append(
|
143 |
return history, ""
|
144 |
|
145 |
-
# 创建 Gradio 应用
|
146 |
with gr.Blocks(
|
147 |
-
title="Robot Task Planning - Llama 3.1 8B",
|
148 |
-
theme=
|
149 |
-
css="
|
|
|
|
|
|
|
|
|
|
|
150 |
) as demo:
|
151 |
-
|
152 |
-
# 标题和说明
|
153 |
gr.Markdown("""
|
154 |
# 🤖 Llama 3.1 8B - Robot Task Planning
|
155 |
|
@@ -164,35 +168,38 @@ with gr.Blocks(
|
|
164 |
|
165 |
with gr.Row():
|
166 |
with gr.Column(scale=3):
|
167 |
-
# 聊天界面 - 简化参数
|
168 |
chatbot = gr.Chatbot(
|
169 |
label="Task Planning Results",
|
170 |
-
height=500
|
|
|
|
|
|
|
|
|
171 |
)
|
172 |
|
173 |
-
# 输入框 - 简化参数
|
174 |
msg = gr.Textbox(
|
175 |
label="Robot Command",
|
176 |
placeholder="Enter robot task command (e.g., 'Deploy Excavator 1 to Soil Area 1')...",
|
177 |
lines=2,
|
178 |
-
max_lines=5
|
|
|
|
|
179 |
)
|
180 |
|
181 |
-
# 按钮
|
182 |
with gr.Row():
|
183 |
-
send_btn = gr.Button("🚀 Generate Tasks", variant="primary")
|
184 |
-
clear_btn = gr.Button("🗑️ Clear")
|
185 |
|
186 |
with gr.Column(scale=1):
|
187 |
gr.Markdown("### ⚙️ Generation Settings")
|
188 |
|
189 |
-
# 参数控制 - 简化配置
|
190 |
max_tokens = gr.Slider(
|
191 |
minimum=50,
|
192 |
maximum=500,
|
193 |
value=200,
|
194 |
step=10,
|
195 |
-
label="Max Tokens"
|
|
|
196 |
)
|
197 |
|
198 |
temperature = gr.Slider(
|
@@ -200,7 +207,8 @@ with gr.Blocks(
|
|
200 |
maximum=2.0,
|
201 |
value=0.7,
|
202 |
step=0.1,
|
203 |
-
label="Temperature"
|
|
|
204 |
)
|
205 |
|
206 |
top_p = gr.Slider(
|
@@ -208,7 +216,8 @@ with gr.Blocks(
|
|
208 |
maximum=1.0,
|
209 |
value=0.9,
|
210 |
step=0.05,
|
211 |
-
label="Top-p"
|
|
|
212 |
)
|
213 |
|
214 |
gr.Markdown("""
|
@@ -217,29 +226,28 @@ with gr.Blocks(
|
|
217 |
Loading time: ~3-5 minutes
|
218 |
""")
|
219 |
|
220 |
-
#
|
221 |
-
examples_data = ['Deploy Excavator 1 to Soil Area 1 for excavation.', 'Send Dump Truck 1 to collect material from Excavator 1, then unload at storage area.', 'Move all robots to avoid Puddle 1 after inspection.', 'Deploy multiple excavators to different soil areas simultaneously.', 'Coordinate dump trucks to transport materials from excavation site to storage.', 'Send robot to inspect rock area, then avoid with all other robots if dangerous.', 'Return all robots to start position after completing tasks.', 'Create a sequence: excavate, load, transport, unload, repeat.']
|
222 |
gr.Examples(
|
223 |
-
examples=
|
224 |
inputs=msg,
|
225 |
label="💡 Example Robot Commands"
|
226 |
)
|
227 |
|
228 |
-
#
|
229 |
msg.submit(
|
230 |
-
|
231 |
inputs=[msg, chatbot, max_tokens, temperature, top_p],
|
232 |
outputs=[chatbot, msg]
|
233 |
)
|
234 |
|
235 |
send_btn.click(
|
236 |
-
|
237 |
inputs=[msg, chatbot, max_tokens, temperature, top_p],
|
238 |
outputs=[chatbot, msg]
|
239 |
)
|
240 |
|
241 |
clear_btn.click(
|
242 |
-
|
243 |
outputs=[chatbot, msg]
|
244 |
)
|
245 |
|
|
|
1 |
+
|
2 |
import gradio as gr
|
3 |
import torch
|
4 |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
|
|
136 |
|
137 |
try:
|
138 |
response = generate_response(message, max_tokens, temperature, top_p)
|
139 |
+
history.append((message, response))
|
140 |
return history, ""
|
141 |
except Exception as chat_error:
|
142 |
error_msg = f"❌ Chat Error: {str(chat_error)}"
|
143 |
+
history.append((message, error_msg))
|
144 |
return history, ""
|
145 |
|
146 |
+
# 创建 Gradio 应用
|
147 |
with gr.Blocks(
|
148 |
+
title="Robot Task Planning - Llama 3.1 8B",
|
149 |
+
theme=gr.themes.Soft(),
|
150 |
+
css="""
|
151 |
+
.gradio-container {
|
152 |
+
max-width: 1200px;
|
153 |
+
margin: auto;
|
154 |
+
}
|
155 |
+
"""
|
156 |
) as demo:
|
|
|
|
|
157 |
gr.Markdown("""
|
158 |
# 🤖 Llama 3.1 8B - Robot Task Planning
|
159 |
|
|
|
168 |
|
169 |
with gr.Row():
|
170 |
with gr.Column(scale=3):
|
|
|
171 |
chatbot = gr.Chatbot(
|
172 |
label="Task Planning Results",
|
173 |
+
height=500,
|
174 |
+
show_label=True,
|
175 |
+
container=True,
|
176 |
+
bubble_full_width=False,
|
177 |
+
show_copy_button=True
|
178 |
)
|
179 |
|
|
|
180 |
msg = gr.Textbox(
|
181 |
label="Robot Command",
|
182 |
placeholder="Enter robot task command (e.g., 'Deploy Excavator 1 to Soil Area 1')...",
|
183 |
lines=2,
|
184 |
+
max_lines=5,
|
185 |
+
show_label=True,
|
186 |
+
container=True
|
187 |
)
|
188 |
|
|
|
189 |
with gr.Row():
|
190 |
+
send_btn = gr.Button("🚀 Generate Tasks", variant="primary", size="sm")
|
191 |
+
clear_btn = gr.Button("🗑️ Clear", variant="secondary", size="sm")
|
192 |
|
193 |
with gr.Column(scale=1):
|
194 |
gr.Markdown("### ⚙️ Generation Settings")
|
195 |
|
|
|
196 |
max_tokens = gr.Slider(
|
197 |
minimum=50,
|
198 |
maximum=500,
|
199 |
value=200,
|
200 |
step=10,
|
201 |
+
label="Max Tokens",
|
202 |
+
info="Maximum number of tokens to generate"
|
203 |
)
|
204 |
|
205 |
temperature = gr.Slider(
|
|
|
207 |
maximum=2.0,
|
208 |
value=0.7,
|
209 |
step=0.1,
|
210 |
+
label="Temperature",
|
211 |
+
info="Controls randomness (lower = more focused)"
|
212 |
)
|
213 |
|
214 |
top_p = gr.Slider(
|
|
|
216 |
maximum=1.0,
|
217 |
value=0.9,
|
218 |
step=0.05,
|
219 |
+
label="Top-p",
|
220 |
+
info="Nucleus sampling threshold"
|
221 |
)
|
222 |
|
223 |
gr.Markdown("""
|
|
|
226 |
Loading time: ~3-5 minutes
|
227 |
""")
|
228 |
|
229 |
+
# 示例对话
|
|
|
230 |
gr.Examples(
|
231 |
+
examples=['Deploy Excavator 1 to Soil Area 1 for excavation.', 'Send Dump Truck 1 to collect material from Excavator 1, then unload at storage area.', 'Move all robots to avoid Puddle 1 after inspection.', 'Deploy multiple excavators to different soil areas simultaneously.', 'Coordinate dump trucks to transport materials from excavation site to storage.', 'Send robot to inspect rock area, then avoid with all other robots if dangerous.', 'Return all robots to start position after completing tasks.', 'Create a sequence: excavate, load, transport, unload, repeat.'],
|
232 |
inputs=msg,
|
233 |
label="💡 Example Robot Commands"
|
234 |
)
|
235 |
|
236 |
+
# 事件处理
|
237 |
msg.submit(
|
238 |
+
chat_interface,
|
239 |
inputs=[msg, chatbot, max_tokens, temperature, top_p],
|
240 |
outputs=[chatbot, msg]
|
241 |
)
|
242 |
|
243 |
send_btn.click(
|
244 |
+
chat_interface,
|
245 |
inputs=[msg, chatbot, max_tokens, temperature, top_p],
|
246 |
outputs=[chatbot, msg]
|
247 |
)
|
248 |
|
249 |
clear_btn.click(
|
250 |
+
lambda: ([], ""),
|
251 |
outputs=[chatbot, msg]
|
252 |
)
|
253 |
|
requirements.txt
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
-
gradio==
|
2 |
-
pydantic==1.10.12
|
3 |
transformers==4.44.2
|
4 |
torch==2.1.0
|
5 |
peft==0.7.1
|
|
|
1 |
+
gradio==4.32.2
|
|
|
2 |
transformers==4.44.2
|
3 |
torch==2.1.0
|
4 |
peft==0.7.1
|