Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -10,15 +10,6 @@ from transformers import TextIteratorStreamer
|
|
10 |
import spaces
|
11 |
|
12 |
|
13 |
-
PLACEHOLDER = """
|
14 |
-
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
|
15 |
-
<img src="https://cdn-uploads.huggingface.co/production/uploads/64ccdc322e592905f922a06e/DDIW0kbWmdOQWwy4XMhwX.png" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; ">
|
16 |
-
<h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">LLaVA-Llama-3-8B</h1>
|
17 |
-
<p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Llava-Llama-3-8b is a LLaVA model fine-tuned from Meta-Llama-3-8B-Instruct and CLIP-ViT-Large-patch14-336 with ShareGPT4V-PT and InternVL-SFT by XTuner</p>
|
18 |
-
</div>
|
19 |
-
"""
|
20 |
-
|
21 |
-
|
22 |
model_id = "xtuner/llava-llama-3-8b-v1_1-transformers"
|
23 |
|
24 |
processor = AutoProcessor.from_pretrained(model_id)
|
@@ -36,26 +27,22 @@ model.generation_config.eos_token_id = 128009
|
|
36 |
@spaces.GPU
|
37 |
def bot_streaming(message, history):
|
38 |
print(message)
|
39 |
-
image = None
|
40 |
if message["files"]:
|
41 |
-
# message["files"][-1] is a Dict or just a string
|
42 |
if type(message["files"][-1]) == dict:
|
43 |
image = message["files"][-1]["path"]
|
44 |
else:
|
45 |
image = message["files"][-1]
|
46 |
else:
|
47 |
-
# if there's no image uploaded for this turn, look for images in the past turns
|
48 |
-
# kept inside tuples, take the last one
|
49 |
for hist in history:
|
50 |
if type(hist[0]) == tuple:
|
51 |
image = hist[0][0]
|
52 |
-
break
|
53 |
|
54 |
if image is None:
|
55 |
image = "ignore.png"
|
56 |
|
57 |
prompt = f"<|start_header_id|>user<|end_header_id|>\n\n<image>\n{message['text']}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
|
58 |
-
# print(f"prompt: {prompt}")
|
59 |
image = Image.open(image)
|
60 |
inputs = processor(prompt, image, return_tensors='pt').to(0, torch.float16)
|
61 |
|
@@ -66,25 +53,20 @@ def bot_streaming(message, history):
|
|
66 |
thread.start()
|
67 |
|
68 |
text_prompt = f"<|start_header_id|>user<|end_header_id|>\n\n{message['text']}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
|
69 |
-
# print(f"text_prompt: {text_prompt}")
|
70 |
|
71 |
buffer = ""
|
72 |
time.sleep(0.5)
|
73 |
for new_text in streamer:
|
74 |
-
# find <|eot_id|> and remove it from the new_text
|
75 |
if "<|eot_id|>" in new_text:
|
76 |
new_text = new_text.split("<|eot_id|>")[0]
|
77 |
buffer += new_text
|
78 |
|
79 |
-
# generated_text_without_prompt = buffer[len(text_prompt):]
|
80 |
generated_text_without_prompt = buffer
|
81 |
-
# print(generated_text_without_prompt)
|
82 |
time.sleep(0.06)
|
83 |
-
# print(f"new_text: {generated_text_without_prompt}")
|
84 |
yield generated_text_without_prompt
|
85 |
|
86 |
|
87 |
-
chatbot=gr.Chatbot(
|
88 |
chat_input = gr.MultimodalTextbox(interactive=True, file_types=["image"], placeholder="Enter message or upload file...", show_label=False)
|
89 |
with gr.Blocks(fill_height=True, ) as demo:
|
90 |
gr.ChatInterface(
|
|
|
10 |
import spaces
|
11 |
|
12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
model_id = "xtuner/llava-llama-3-8b-v1_1-transformers"
|
14 |
|
15 |
processor = AutoProcessor.from_pretrained(model_id)
|
|
|
27 |
@spaces.GPU
|
28 |
def bot_streaming(message, history):
|
29 |
print(message)
|
30 |
+
image = None
|
31 |
if message["files"]:
|
|
|
32 |
if type(message["files"][-1]) == dict:
|
33 |
image = message["files"][-1]["path"]
|
34 |
else:
|
35 |
image = message["files"][-1]
|
36 |
else:
|
|
|
|
|
37 |
for hist in history:
|
38 |
if type(hist[0]) == tuple:
|
39 |
image = hist[0][0]
|
40 |
+
break
|
41 |
|
42 |
if image is None:
|
43 |
image = "ignore.png"
|
44 |
|
45 |
prompt = f"<|start_header_id|>user<|end_header_id|>\n\n<image>\n{message['text']}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
|
|
|
46 |
image = Image.open(image)
|
47 |
inputs = processor(prompt, image, return_tensors='pt').to(0, torch.float16)
|
48 |
|
|
|
53 |
thread.start()
|
54 |
|
55 |
text_prompt = f"<|start_header_id|>user<|end_header_id|>\n\n{message['text']}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
|
|
|
56 |
|
57 |
buffer = ""
|
58 |
time.sleep(0.5)
|
59 |
for new_text in streamer:
|
|
|
60 |
if "<|eot_id|>" in new_text:
|
61 |
new_text = new_text.split("<|eot_id|>")[0]
|
62 |
buffer += new_text
|
63 |
|
|
|
64 |
generated_text_without_prompt = buffer
|
|
|
65 |
time.sleep(0.06)
|
|
|
66 |
yield generated_text_without_prompt
|
67 |
|
68 |
|
69 |
+
chatbot=gr.Chatbot(scale=1)
|
70 |
chat_input = gr.MultimodalTextbox(interactive=True, file_types=["image"], placeholder="Enter message or upload file...", show_label=False)
|
71 |
with gr.Blocks(fill_height=True, ) as demo:
|
72 |
gr.ChatInterface(
|