Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -24,7 +24,6 @@ tokenizer, llava_model, image_processor, context_len = load_pretrained_model(
|
|
24 |
device=device
|
25 |
)
|
26 |
|
27 |
-
|
28 |
@spaces.GPU
|
29 |
def bot_streaming(message, history):
|
30 |
print(message)
|
@@ -53,20 +52,39 @@ def bot_streaming(message, history):
|
|
53 |
# Generate the prompt for the model
|
54 |
prompt = message['text']
|
55 |
|
56 |
-
#
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
# Stream the output
|
68 |
buffer = ""
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
buffer += new_text
|
71 |
yield buffer
|
72 |
|
|
|
24 |
device=device
|
25 |
)
|
26 |
|
|
|
27 |
@spaces.GPU
|
28 |
def bot_streaming(message, history):
|
29 |
print(message)
|
|
|
52 |
# Generate the prompt for the model
|
53 |
prompt = message['text']
|
54 |
|
55 |
+
# Use a streamer to generate the output in a streaming fashion
|
56 |
+
streamer = []
|
57 |
+
|
58 |
+
# Define a function to call chat_llava in a separate thread
|
59 |
+
def generate_output():
|
60 |
+
output = chat_llava(
|
61 |
+
args=None,
|
62 |
+
image_file=image,
|
63 |
+
text=prompt,
|
64 |
+
tokenizer=tokenizer,
|
65 |
+
model=llava_model,
|
66 |
+
image_processor=image_processor,
|
67 |
+
context_len=context_len
|
68 |
+
)
|
69 |
+
for new_text in output:
|
70 |
+
streamer.append(new_text)
|
71 |
+
|
72 |
+
# Start the generation in a separate thread
|
73 |
+
thread = Thread(target=generate_output)
|
74 |
+
thread.start()
|
75 |
|
76 |
# Stream the output
|
77 |
buffer = ""
|
78 |
+
while thread.is_alive() or streamer:
|
79 |
+
while streamer:
|
80 |
+
new_text = streamer.pop(0)
|
81 |
+
buffer += new_text
|
82 |
+
yield buffer
|
83 |
+
time.sleep(0.1)
|
84 |
+
|
85 |
+
# Ensure any remaining text is yielded after the thread completes
|
86 |
+
while streamer:
|
87 |
+
new_text = streamer.pop(0)
|
88 |
buffer += new_text
|
89 |
yield buffer
|
90 |
|