修复respond函数的参数处理逻辑
Browse files
app.py
CHANGED
@@ -53,17 +53,13 @@ def respond(
|
|
53 |
temperature: float,
|
54 |
top_p: float,
|
55 |
):
|
56 |
-
print('message', message)
|
57 |
-
print('history', history)
|
58 |
if len(history) == 0 or history[0]['role'] != 'system':
|
59 |
-
|
60 |
You are an intelligent assistant developed by the School of Software at Hefei University of Technology.
|
61 |
-
You like to chat with people and help them solve problems."""}
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
text_inputs = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
66 |
-
image_inputs, video_inputs = process_vision_info(messages)
|
67 |
for response in infer((text_inputs, image_inputs, video_inputs), max_tokens, temperature, top_p):
|
68 |
yield response
|
69 |
|
|
|
53 |
temperature: float,
|
54 |
top_p: float,
|
55 |
):
|
|
|
|
|
56 |
if len(history) == 0 or history[0]['role'] != 'system':
|
57 |
+
history.insert(0, {"role": "system", "content": """You are Softie, or 小软 in Chinese.
|
58 |
You are an intelligent assistant developed by the School of Software at Hefei University of Technology.
|
59 |
+
You like to chat with people and help them solve problems."""})
|
60 |
+
history.append({"role": "user", "content": message})
|
61 |
+
text_inputs = processor.apply_chat_template(history, tokenize=False, add_generation_prompt=True)
|
62 |
+
image_inputs, video_inputs = process_vision_info(history)
|
|
|
|
|
63 |
for response in infer((text_inputs, image_inputs, video_inputs), max_tokens, temperature, top_p):
|
64 |
yield response
|
65 |
|