Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -12,6 +12,7 @@ model = MllamaForConditionalGeneration.from_pretrained(ckpt,
|
|
12 |
torch_dtype=torch.bfloat16).to("cuda")
|
13 |
processor = AutoProcessor.from_pretrained(ckpt)
|
14 |
|
|
|
15 |
@spaces.GPU
|
16 |
def bot_streaming(message, history, max_new_tokens=250):
|
17 |
|
@@ -66,4 +67,34 @@ def bot_streaming(message, history, max_new_tokens=250):
|
|
66 |
buffer += new_text
|
67 |
generated_text_without_prompt = buffer
|
68 |
time.sleep(0.01)
|
69 |
-
yield buffer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
torch_dtype=torch.bfloat16).to("cuda")
|
13 |
processor = AutoProcessor.from_pretrained(ckpt)
|
14 |
|
15 |
+
|
16 |
@spaces.GPU
|
17 |
def bot_streaming(message, history, max_new_tokens=250):
|
18 |
|
|
|
67 |
buffer += new_text
|
68 |
generated_text_without_prompt = buffer
|
69 |
time.sleep(0.01)
|
70 |
+
yield buffer
|
71 |
+
|
72 |
+
|
73 |
+
demo = gr.ChatInterface(fn=bot_streaming, title="Multimodal Llama", examples=[
|
74 |
+
[{"text": "Which era does this piece belong to? Give details about the era.", "files":["./examples/rococo.jpg"]},
|
75 |
+
200],
|
76 |
+
[{"text": "Where do the droughts happen according to this diagram?", "files":["./examples/weather_events.png"]},
|
77 |
+
250],
|
78 |
+
[{"text": "What happens when you take out white cat from this chain?", "files":["./examples/ai2d_test.jpg"]},
|
79 |
+
250],
|
80 |
+
[{"text": "How long does it take from invoice date to due date? Be short and concise.", "files":["./examples/invoice.png"]},
|
81 |
+
250],
|
82 |
+
[{"text": "Where to find this monument? Can you give me other recommendations around the area?", "files":["./examples/wat_arun.jpg"]},
|
83 |
+
250],
|
84 |
+
],
|
85 |
+
textbox=gr.MultimodalTextbox(),
|
86 |
+
additional_inputs = [gr.Slider(
|
87 |
+
minimum=10,
|
88 |
+
maximum=500,
|
89 |
+
value=250,
|
90 |
+
step=10,
|
91 |
+
label="Maximum number of new tokens to generate",
|
92 |
+
)
|
93 |
+
],
|
94 |
+
cache_examples=False,
|
95 |
+
description="Try Multimodal Llama by Meta with transformers in this demo. Upload an image, and start chatting about it, or simply try one of the examples below. To learn more about Llama Vision, visit [our blog post](https://huggingface.co/blog/llama32). ",
|
96 |
+
stop_btn="Stop Generation",
|
97 |
+
fill_height=True,
|
98 |
+
multimodal=True)
|
99 |
+
|
100 |
+
demo.launch(debug=True)
|