yejunliang23 commited on
Commit
a19e6cc
Β·
unverified Β·
1 Parent(s): f601d69

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -2,7 +2,7 @@ import os
2
  import torch
3
  from threading import Thread
4
  import gradio as gr
5
- from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor,TextIteratorStreamer
6
  from qwen_vl_utils import process_vision_info
7
 
8
  # 3D mesh dependencies
@@ -35,7 +35,7 @@ model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
35
  trust_remote_code=True
36
  )
37
  processor = AutoProcessor.from_pretrained(MODEL_DIR)
38
-
39
 
40
  def chat_qwen_vl(message: str, history: list, temperature: float = 0.1, max_new_tokens: int = 1024):
41
  # β€”β€” εŽŸζœ‰ε€šζ¨‘ζ€θΎ“ε…₯ζž„ι€  β€”β€” #
@@ -62,7 +62,7 @@ def chat_qwen_vl(message: str, history: list, temperature: float = 0.1, max_new_
62
 
63
  # 2. 把 streamer ε’Œη”Ÿζˆε‚ζ•°δΈ€θ΅·δΌ η»™ model.generate
64
  streamer = TextIteratorStreamer(
65
- tokenizer,
66
  timeout=100.0,
67
  skip_prompt=True,
68
  skip_special_tokens=True
 
2
  import torch
3
  from threading import Thread
4
  import gradio as gr
5
+ from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor,TextIteratorStreamer,AutoTokenizer
6
  from qwen_vl_utils import process_vision_info
7
 
8
  # 3D mesh dependencies
 
35
  trust_remote_code=True
36
  )
37
  processor = AutoProcessor.from_pretrained(MODEL_DIR)
38
+ terminators = [tokenizer.eos_token_id]
39
 
40
  def chat_qwen_vl(message: str, history: list, temperature: float = 0.1, max_new_tokens: int = 1024):
41
  # β€”β€” εŽŸζœ‰ε€šζ¨‘ζ€θΎ“ε…₯ζž„ι€  β€”β€” #
 
62
 
63
  # 2. 把 streamer ε’Œη”Ÿζˆε‚ζ•°δΈ€θ΅·δΌ η»™ model.generate
64
  streamer = TextIteratorStreamer(
65
+ processor,
66
  timeout=100.0,
67
  skip_prompt=True,
68
  skip_special_tokens=True