Pectics commited on
Commit
98891aa
·
1 Parent(s): b934231
Files changed (2) hide show
  1. .gitignore +1 -0
  2. app.py +3 -5
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ test.py
app.py CHANGED
@@ -1,6 +1,5 @@
1
  from spaces import GPU
2
 
3
- from os import getenv
4
  from threading import Thread
5
  from transformers import Qwen2VLForConditionalGeneration, Qwen2VLProcessor, TextIteratorStreamer, AutoProcessor, BatchFeature
6
  from qwen_vl_utils import process_vision_info
@@ -60,10 +59,9 @@ def respond(
60
  You are an intelligent assistant developed by the School of Software at Hefei University of Technology.
61
  You like to chat with people and help them solve problems."""})
62
  history.append({"role": "user", "content": message})
63
- text_inputs = processor.apply_chat_template(history, tokenize=False, add_generation_prompt=True)
64
- else:
65
- text_inputs = processor.apply_chat_template(message, tokenize=False, add_generation_prompt=True)
66
- image_inputs, video_inputs = process_vision_info(history)
67
  for response in infer((text_inputs, image_inputs, video_inputs), max_tokens, temperature, top_p):
68
  yield response
69
 
 
1
  from spaces import GPU
2
 
 
3
  from threading import Thread
4
  from transformers import Qwen2VLForConditionalGeneration, Qwen2VLProcessor, TextIteratorStreamer, AutoProcessor, BatchFeature
5
  from qwen_vl_utils import process_vision_info
 
59
  You are an intelligent assistant developed by the School of Software at Hefei University of Technology.
60
  You like to chat with people and help them solve problems."""})
61
  history.append({"role": "user", "content": message})
62
+ message = history
63
+ text_inputs = processor.apply_chat_template(message, tokenize=False, add_generation_prompt=True)
64
+ image_inputs, video_inputs = process_vision_info(message)
 
65
  for response in infer((text_inputs, image_inputs, video_inputs), max_tokens, temperature, top_p):
66
  yield response
67