Update app.py
Browse files
app.py
CHANGED
@@ -1,10 +1,9 @@
|
|
1 |
-
from threading import Thread
|
2 |
-
from transformers import Qwen2VLForConditionalGeneration, Qwen2VLProcessor, TextIteratorStreamer, AutoProcessor, BatchFeature
|
3 |
-
|
4 |
-
from gradio import ChatInterface, Textbox, Slider
|
5 |
from spaces import GPU
|
6 |
|
|
|
|
|
7 |
from qwen_vl_utils import process_vision_info
|
|
|
8 |
|
9 |
model_path = "Pectics/Softie-VL-7B-250123"
|
10 |
|
@@ -34,8 +33,7 @@ def infer(
|
|
34 |
temperature=temperature,
|
35 |
top_p=top_p,
|
36 |
)
|
37 |
-
|
38 |
-
thread.start()
|
39 |
response = ""
|
40 |
for token in streamer:
|
41 |
response += token
|
|
|
|
|
|
|
|
|
|
|
1 |
from spaces import GPU
|
2 |
|
3 |
+
from threading import Thread
|
4 |
+
from transformers import Qwen2VLForConditionalGeneration, Qwen2VLProcessor, TextIteratorStreamer, AutoProcessor, BatchFeature
|
5 |
from qwen_vl_utils import process_vision_info
|
6 |
+
from gradio import ChatInterface, Textbox, Slider
|
7 |
|
8 |
model_path = "Pectics/Softie-VL-7B-250123"
|
9 |
|
|
|
33 |
temperature=temperature,
|
34 |
top_p=top_p,
|
35 |
)
|
36 |
+
Thread(target=model.generate, kwargs=kwargs).start()
|
|
|
37 |
response = ""
|
38 |
for token in streamer:
|
39 |
response += token
|