更新
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ from os import getenv
|
|
4 |
from threading import Thread
|
5 |
from transformers import Qwen2VLForConditionalGeneration, Qwen2VLProcessor, TextIteratorStreamer, AutoProcessor, BatchFeature
|
6 |
from qwen_vl_utils import process_vision_info
|
7 |
-
from gradio import ChatInterface,
|
8 |
|
9 |
model_path = "Pectics/Softie-VL-7B-250123"
|
10 |
|
@@ -47,18 +47,22 @@ def infer(
|
|
47 |
yield response
|
48 |
|
49 |
def respond(
|
50 |
-
message: str,
|
51 |
history: list[object],
|
52 |
max_tokens: int,
|
53 |
temperature: float,
|
54 |
top_p: float,
|
55 |
):
|
56 |
-
|
|
|
|
|
57 |
history.insert(0, {"role": "system", "content": """You are Softie, or 小软 in Chinese.
|
58 |
You are an intelligent assistant developed by the School of Software at Hefei University of Technology.
|
59 |
You like to chat with people and help them solve problems."""})
|
60 |
-
|
61 |
-
|
|
|
|
|
62 |
image_inputs, video_inputs = process_vision_info(history)
|
63 |
for response in infer((text_inputs, image_inputs, video_inputs), max_tokens, temperature, top_p):
|
64 |
yield response
|
|
|
4 |
from threading import Thread
|
5 |
from transformers import Qwen2VLForConditionalGeneration, Qwen2VLProcessor, TextIteratorStreamer, AutoProcessor, BatchFeature
|
6 |
from qwen_vl_utils import process_vision_info
|
7 |
+
from gradio import ChatInterface, Slider
|
8 |
|
9 |
model_path = "Pectics/Softie-VL-7B-250123"
|
10 |
|
|
|
47 |
yield response
|
48 |
|
49 |
def respond(
|
50 |
+
message: str | list[object],
|
51 |
history: list[object],
|
52 |
max_tokens: int,
|
53 |
temperature: float,
|
54 |
top_p: float,
|
55 |
):
|
56 |
+
print('message', message)
|
57 |
+
print('history', history)
|
58 |
+
if isinstance(message, str) and (len(history) == 0 or history[0]['role'] != 'system'):
|
59 |
history.insert(0, {"role": "system", "content": """You are Softie, or 小软 in Chinese.
|
60 |
You are an intelligent assistant developed by the School of Software at Hefei University of Technology.
|
61 |
You like to chat with people and help them solve problems."""})
|
62 |
+
history.append({"role": "user", "content": message})
|
63 |
+
text_inputs = processor.apply_chat_template(history, tokenize=False, add_generation_prompt=True)
|
64 |
+
else:
|
65 |
+
text_inputs = processor.apply_chat_template(message, tokenize=False, add_generation_prompt=True)
|
66 |
image_inputs, video_inputs = process_vision_info(history)
|
67 |
for response in infer((text_inputs, image_inputs, video_inputs), max_tokens, temperature, top_p):
|
68 |
yield response
|