Fancy-MLLM commited on
Commit
bbdfb03
·
verified ·
1 Parent(s): 4ed3940

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +119 -26
app.py CHANGED
@@ -1,28 +1,29 @@
1
  import gradio as gr
2
- from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
 
3
  from qwen_vl_utils import process_vision_info
4
  import torch
 
5
 
6
  # Specify the local cache path for models
7
- local_path = "Qwen/Qwen2.5-VL-7B-Instruct"
8
 
9
  # Load model and processor
10
  model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
11
- local_path, torch_dtype="auto", device_map="auto"
12
  )
 
13
 
14
  processor = AutoProcessor.from_pretrained(local_path)
15
 
16
- print("load successfully")
17
  # Function to process image and text and generate the output
18
- @torch.inference_mode()
19
  def generate_output(image, text, button_click):
20
  # Prepare input data
21
  messages = [
22
  {
23
  "role": "user",
24
  "content": [
25
- {"type": "image", "image": image},
26
  {"type": "text", "text": text},
27
  ],
28
  }
@@ -30,6 +31,8 @@ def generate_output(image, text, button_click):
30
 
31
  # Prepare inputs for the model
32
  text_input = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
 
 
33
  image_inputs, video_inputs = process_vision_info(messages)
34
  inputs = processor(
35
  text=[text_input],
@@ -40,26 +43,116 @@ def generate_output(image, text, button_click):
40
  )
41
  inputs = inputs.to("cuda")
42
 
43
- # Generate the output
44
- generated_ids = model.generate(**inputs, max_new_tokens=128)
45
- generated_ids_trimmed = [
46
- out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
47
- ]
48
- output_text = processor.batch_decode(
49
- generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
 
 
50
  )
51
- return output_text[0]
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
- # Create Gradio interface
54
- iface = gr.Interface(
55
- fn=generate_output,
56
- inputs=[
57
- gr.Image(type="pil", label="Upload Image"),
58
- gr.Textbox(lines=2, placeholder="Enter a question related to the image", label="Input Text"),
59
-
60
- ],
61
- outputs=gr.Textbox(label="Model Output"),
62
- )
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
- # Launch the Gradio interface
65
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor, TextIteratorStreamer
3
+ from threading import Thread
4
  from qwen_vl_utils import process_vision_info
5
  import torch
6
+ import time
7
 
8
  # Specify the local cache path for models
9
+ local_path = "Fancy-MLLM/R1-OneVision/R1-OneVision/R1-OneVison-7B"
10
 
11
  # Load model and processor
12
  model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
13
+ local_path, torch_dtype="auto", device_map="cpu"
14
  )
15
+ model.cuda().eval()
16
 
17
  processor = AutoProcessor.from_pretrained(local_path)
18
 
 
19
  # Function to process image and text and generate the output
 
20
  def generate_output(image, text, button_click):
21
  # Prepare input data
22
  messages = [
23
  {
24
  "role": "user",
25
  "content": [
26
+ {"type": "image", "image": image, 'min_pixels': 1003520, 'max_pixels': 12845056},
27
  {"type": "text", "text": text},
28
  ],
29
  }
 
31
 
32
  # Prepare inputs for the model
33
  text_input = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
34
+ # print(text_input)
35
+ # import pdb; pdb.set_trace()
36
  image_inputs, video_inputs = process_vision_info(messages)
37
  inputs = processor(
38
  text=[text_input],
 
43
  )
44
  inputs = inputs.to("cuda")
45
 
46
+ streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
47
+ generation_kwargs = dict(
48
+ **inputs,
49
+ streamer=streamer,
50
+ max_new_tokens=4096,
51
+ top_p=0.001,
52
+ top_k=1,
53
+ temperature=0.01,
54
+ repetition_penalty=1.0,
55
  )
56
+ thread = Thread(target=model.generate, kwargs=generation_kwargs)
57
+ thread.start()
58
+ generated_text = ''
59
+
60
+ try:
61
+ for new_text in streamer:
62
+ generated_text += new_text
63
+ yield f"‎{generated_text}"
64
+ # print(f"Current text: {generated_text}") # 调试输出
65
+ # yield generated_text # 直接输出原始文本
66
+ except Exception as e:
67
+ print(f"Error: {e}")
68
+ yield f"Error occurred: {str(e)}"
69
 
70
+ Css = """
71
+ #output-markdown {
72
+ overflow-y: auto;
73
+ white-space: pre-wrap;
74
+ word-wrap: break-word;
75
+ }
76
+
77
+ #output-markdown .math {
78
+ overflow-x: auto;
79
+ max-width: 100%;
80
+ }
81
+ .markdown-text {
82
+ white-space: pre-wrap;
83
+ word-wrap: break-word;
84
+ }
85
+ #qwen-md .katex-display { display: inline; }
86
+ #qwen-md .katex-display>.katex { display: inline; }
87
+ #qwen-md .katex-display>.katex>.katex-html { display: inline; }
88
+ """
89
+
90
+ with gr.Blocks(css=Css) as demo:
91
+ gr.HTML("""<center><font size=8>🦖 R1-Onevision Demo</center>""")
92
 
93
+ with gr.Row():
94
+ with gr.Column():
95
+ input_image = gr.Image(type="pil", label="Upload"),
96
+ input_text = gr.Textbox(label="input your question")
97
+ with gr.Row():
98
+ with gr.Column():
99
+ clear_btn = gr.ClearButton([*input_image, input_text])
100
+ with gr.Column():
101
+ submit_btn = gr.Button("Submit", variant="primary")
102
+
103
+ gr.Examples(
104
+ examples=[
105
+ ["20250208-205626.jpeg", "How many plums (see the picture) weigh as much as an apple?"],
106
+ ["38.jpg", "Each of the digits 2, 3, 4 and 5 will be placed in a square. Then there will be two numbers, which will be added together. What is the biggest number that they could make?"],
107
+ ["64.jpg", "Four of the numbers 1,3,4,5 and 7 are written into the boxes so that the calculation is correct.\nWhich number was not used?"],
108
+ ],
109
+ inputs=[input_image[0], input_text],
110
+ label="Example Inputs"
111
+ )
112
+ with gr.Column():
113
+ output_text = gr.Markdown(
114
+ label="Generated Response",
115
+ max_height="80vh",
116
+ min_height="50vh",
117
+ container=True,
118
+ latex_delimiters=[{
119
+ "left": "\\(",
120
+ "right": "\\)",
121
+ "display": True
122
+ }, {
123
+ "left": "\\begin\{equation\}",
124
+ "right": "\\end\{equation\}",
125
+ "display": True
126
+ }, {
127
+ "left": "\\begin\{align\}",
128
+ "right": "\\end\{align\}",
129
+ "display": True
130
+ }, {
131
+ "left": "\\begin\{alignat\}",
132
+ "right": "\\end\{alignat\}",
133
+ "display": True
134
+ }, {
135
+ "left": "\\begin\{gather\}",
136
+ "right": "\\end\{gather\}",
137
+ "display": True
138
+ }, {
139
+ "left": "\\begin\{CD\}",
140
+ "right": "\\end\{CD\}",
141
+ "display": True
142
+ }, {
143
+ "left": "\\[",
144
+ "right": "\\]",
145
+ "display": True
146
+ }],
147
+ elem_id="qwen-md")
148
+
149
+
150
+
151
+ submit_btn.click(
152
+ fn=generate_output,
153
+ inputs=[*input_image, input_text],
154
+ outputs=output_text,
155
+ queue=True
156
+ )
157
+ demo.launch(share=True)
158
+