Update README.md
#16
by
hiko1999
- opened
README.md
CHANGED
@@ -2,494 +2,114 @@
|
|
2 |
license: apache-2.0
|
3 |
language:
|
4 |
- en
|
|
|
|
|
5 |
pipeline_tag: image-text-to-text
|
6 |
tags:
|
|
|
|
|
7 |
- multimodal
|
8 |
library_name: transformers
|
9 |
---
|
10 |
|
11 |
-
# Qwen2-
|
12 |
|
13 |
-
##
|
14 |
|
15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
-
|
18 |
|
19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
|
|
21 |
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
-
|
25 |
|
26 |
-
|
27 |
|
28 |
-
|
|
|
29 |
|
|
|
|
|
30 |
|
31 |
-
|
|
|
32 |
|
33 |
-
|
34 |
|
35 |
-
|
36 |
-
<img src="https://qianwen-res.oss-accelerate-overseas.aliyuncs.com/Qwen2-VL/qwen2_vl.jpg" width="80%"/>
|
37 |
-
<p>
|
38 |
|
39 |
-
|
40 |
|
41 |
-
|
42 |
-
|
43 |
-
<p>
|
44 |
|
45 |
-
|
|
|
46 |
|
|
|
|
|
47 |
|
|
|
48 |
|
49 |
-
|
|
|
50 |
|
51 |
-
|
|
|
52 |
|
53 |
-
|
54 |
-
|
55 |
-
| MMMU<sub>val</sub> | 36.3 | 38.2 | **41.1** |
|
56 |
-
| DocVQA<sub>test</sub> | 86.9 | - | **90.1** |
|
57 |
-
| InfoVQA<sub>test</sub> | 58.9 | - | **65.5** |
|
58 |
-
| ChartQA<sub>test</sub> | **76.2** | - | 73.5 |
|
59 |
-
| TextVQA<sub>val</sub> | 73.4 | - | **79.7** |
|
60 |
-
| OCRBench | 781 | 605 | **794** |
|
61 |
-
| MTVQA | - | - | **20.0** |
|
62 |
-
| VCR<sub>en easy</sub> | - | - | **81.45**
|
63 |
-
| VCR<sub>zh easy</sub> | - | - | **46.16**
|
64 |
-
| RealWorldQA | 57.3 | 55.8 | **62.9** |
|
65 |
-
| MME<sub>sum</sub> | **1876.8** | 1808.6 | 1872.0 |
|
66 |
-
| MMBench-EN<sub>test</sub> | 73.2 | 69.1 | **74.9** |
|
67 |
-
| MMBench-CN<sub>test</sub> | 70.9 | 66.5 | **73.5** |
|
68 |
-
| MMBench-V1.1<sub>test</sub> | 69.6 | 65.8 | **72.2** |
|
69 |
-
| MMT-Bench<sub>test</sub> | - | - | **54.5** |
|
70 |
-
| MMStar | **49.8** | 39.1 | 48.0 |
|
71 |
-
| MMVet<sub>GPT-4-Turbo</sub> | 39.7 | 41.0 | **49.5** |
|
72 |
-
| HallBench<sub>avg</sub> | 38.0 | 36.1 | **41.7** |
|
73 |
-
| MathVista<sub>testmini</sub> | **46.0** | 39.8 | 43.0 |
|
74 |
-
| MathVision | - | - | **12.4** |
|
75 |
|
76 |
-
|
77 |
|
78 |
-
|
79 |
-
|
80 |
-
| MVBench | **63.2** |
|
81 |
-
| PerceptionTest<sub>test</sub> | **53.9** |
|
82 |
-
| EgoSchema<sub>test</sub> | **54.9** |
|
83 |
-
| Video-MME<sub>wo/w subs</sub> | **55.6**/**60.4** |
|
84 |
|
|
|
|
|
85 |
|
86 |
-
|
87 |
-
|
88 |
-
```
|
89 |
-
KeyError: 'qwen2_vl'
|
90 |
-
```
|
91 |
|
92 |
-
##
|
93 |
-
We offer a toolkit to help you handle various types of visual input more conveniently. This includes base64, URLs, and interleaved images and videos. You can install it using the following command:
|
94 |
|
95 |
-
|
96 |
-
|
97 |
-
```
|
98 |
|
99 |
-
|
|
|
100 |
|
101 |
-
|
102 |
-
|
103 |
-
from qwen_vl_utils import process_vision_info
|
104 |
-
|
105 |
-
# default: Load the model on the available device(s)
|
106 |
-
model = Qwen2VLForConditionalGeneration.from_pretrained(
|
107 |
-
"Qwen/Qwen2-VL-2B-Instruct", torch_dtype="auto", device_map="auto"
|
108 |
-
)
|
109 |
-
|
110 |
-
# We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios.
|
111 |
-
# model = Qwen2VLForConditionalGeneration.from_pretrained(
|
112 |
-
# "Qwen/Qwen2-VL-2B-Instruct",
|
113 |
-
# torch_dtype=torch.bfloat16,
|
114 |
-
# attn_implementation="flash_attention_2",
|
115 |
-
# device_map="auto",
|
116 |
-
# )
|
117 |
-
|
118 |
-
# default processer
|
119 |
-
processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct")
|
120 |
-
|
121 |
-
# The default range for the number of visual tokens per image in the model is 4-16384. You can set min_pixels and max_pixels according to your needs, such as a token count range of 256-1280, to balance speed and memory usage.
|
122 |
-
# min_pixels = 256*28*28
|
123 |
-
# max_pixels = 1280*28*28
|
124 |
-
# processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels)
|
125 |
-
|
126 |
-
messages = [
|
127 |
-
{
|
128 |
-
"role": "user",
|
129 |
-
"content": [
|
130 |
-
{
|
131 |
-
"type": "image",
|
132 |
-
"image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg",
|
133 |
-
},
|
134 |
-
{"type": "text", "text": "Describe this image."},
|
135 |
-
],
|
136 |
-
}
|
137 |
-
]
|
138 |
-
|
139 |
-
# Preparation for inference
|
140 |
-
text = processor.apply_chat_template(
|
141 |
-
messages, tokenize=False, add_generation_prompt=True
|
142 |
-
)
|
143 |
-
image_inputs, video_inputs = process_vision_info(messages)
|
144 |
-
inputs = processor(
|
145 |
-
text=[text],
|
146 |
-
images=image_inputs,
|
147 |
-
videos=video_inputs,
|
148 |
-
padding=True,
|
149 |
-
return_tensors="pt",
|
150 |
-
)
|
151 |
-
inputs = inputs.to("cuda")
|
152 |
-
|
153 |
-
# Inference: Generation of the output
|
154 |
-
generated_ids = model.generate(**inputs, max_new_tokens=128)
|
155 |
-
generated_ids_trimmed = [
|
156 |
-
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
157 |
-
]
|
158 |
-
output_text = processor.batch_decode(
|
159 |
-
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
160 |
-
)
|
161 |
-
print(output_text)
|
162 |
-
```
|
163 |
-
<details>
|
164 |
-
<summary>Without qwen_vl_utils</summary>
|
165 |
-
|
166 |
-
```python
|
167 |
-
from PIL import Image
|
168 |
-
import requests
|
169 |
-
import torch
|
170 |
-
from torchvision import io
|
171 |
-
from typing import Dict
|
172 |
-
from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
|
173 |
-
|
174 |
-
# Load the model in half-precision on the available device(s)
|
175 |
-
model = Qwen2VLForConditionalGeneration.from_pretrained(
|
176 |
-
"Qwen/Qwen2-VL-2B-Instruct", torch_dtype="auto", device_map="auto"
|
177 |
-
)
|
178 |
-
processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct")
|
179 |
-
|
180 |
-
# Image
|
181 |
-
url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg"
|
182 |
-
image = Image.open(requests.get(url, stream=True).raw)
|
183 |
-
|
184 |
-
conversation = [
|
185 |
-
{
|
186 |
-
"role": "user",
|
187 |
-
"content": [
|
188 |
-
{
|
189 |
-
"type": "image",
|
190 |
-
},
|
191 |
-
{"type": "text", "text": "Describe this image."},
|
192 |
-
],
|
193 |
-
}
|
194 |
-
]
|
195 |
-
|
196 |
-
|
197 |
-
# Preprocess the inputs
|
198 |
-
text_prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
|
199 |
-
# Excepted output: '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>Describe this image.<|im_end|>\n<|im_start|>assistant\n'
|
200 |
-
|
201 |
-
inputs = processor(
|
202 |
-
text=[text_prompt], images=[image], padding=True, return_tensors="pt"
|
203 |
-
)
|
204 |
-
inputs = inputs.to("cuda")
|
205 |
-
|
206 |
-
# Inference: Generation of the output
|
207 |
-
output_ids = model.generate(**inputs, max_new_tokens=128)
|
208 |
-
generated_ids = [
|
209 |
-
output_ids[len(input_ids) :]
|
210 |
-
for input_ids, output_ids in zip(inputs.input_ids, output_ids)
|
211 |
-
]
|
212 |
-
output_text = processor.batch_decode(
|
213 |
-
generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
|
214 |
-
)
|
215 |
-
print(output_text)
|
216 |
-
```
|
217 |
-
</details>
|
218 |
-
|
219 |
-
<details>
|
220 |
-
<summary>Multi image inference</summary>
|
221 |
-
|
222 |
-
```python
|
223 |
-
# Messages containing multiple images and a text query
|
224 |
-
messages = [
|
225 |
-
{
|
226 |
-
"role": "user",
|
227 |
-
"content": [
|
228 |
-
{"type": "image", "image": "file:///path/to/image1.jpg"},
|
229 |
-
{"type": "image", "image": "file:///path/to/image2.jpg"},
|
230 |
-
{"type": "text", "text": "Identify the similarities between these images."},
|
231 |
-
],
|
232 |
-
}
|
233 |
-
]
|
234 |
-
|
235 |
-
# Preparation for inference
|
236 |
-
text = processor.apply_chat_template(
|
237 |
-
messages, tokenize=False, add_generation_prompt=True
|
238 |
-
)
|
239 |
-
image_inputs, video_inputs = process_vision_info(messages)
|
240 |
-
inputs = processor(
|
241 |
-
text=[text],
|
242 |
-
images=image_inputs,
|
243 |
-
videos=video_inputs,
|
244 |
-
padding=True,
|
245 |
-
return_tensors="pt",
|
246 |
-
)
|
247 |
-
inputs = inputs.to("cuda")
|
248 |
-
|
249 |
-
# Inference
|
250 |
-
generated_ids = model.generate(**inputs, max_new_tokens=128)
|
251 |
-
generated_ids_trimmed = [
|
252 |
-
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
253 |
-
]
|
254 |
-
output_text = processor.batch_decode(
|
255 |
-
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
256 |
-
)
|
257 |
-
print(output_text)
|
258 |
-
```
|
259 |
-
</details>
|
260 |
-
|
261 |
-
<details>
|
262 |
-
<summary>Video inference</summary>
|
263 |
-
|
264 |
-
```python
|
265 |
-
# Messages containing a images list as a video and a text query
|
266 |
-
messages = [
|
267 |
-
{
|
268 |
-
"role": "user",
|
269 |
-
"content": [
|
270 |
-
{
|
271 |
-
"type": "video",
|
272 |
-
"video": [
|
273 |
-
"file:///path/to/frame1.jpg",
|
274 |
-
"file:///path/to/frame2.jpg",
|
275 |
-
"file:///path/to/frame3.jpg",
|
276 |
-
"file:///path/to/frame4.jpg",
|
277 |
-
],
|
278 |
-
"fps": 1.0,
|
279 |
-
},
|
280 |
-
{"type": "text", "text": "Describe this video."},
|
281 |
-
],
|
282 |
-
}
|
283 |
-
]
|
284 |
-
# Messages containing a video and a text query
|
285 |
-
messages = [
|
286 |
-
{
|
287 |
-
"role": "user",
|
288 |
-
"content": [
|
289 |
-
{
|
290 |
-
"type": "video",
|
291 |
-
"video": "file:///path/to/video1.mp4",
|
292 |
-
"max_pixels": 360 * 420,
|
293 |
-
"fps": 1.0,
|
294 |
-
},
|
295 |
-
{"type": "text", "text": "Describe this video."},
|
296 |
-
],
|
297 |
-
}
|
298 |
-
]
|
299 |
-
|
300 |
-
# Preparation for inference
|
301 |
-
text = processor.apply_chat_template(
|
302 |
-
messages, tokenize=False, add_generation_prompt=True
|
303 |
-
)
|
304 |
-
image_inputs, video_inputs = process_vision_info(messages)
|
305 |
-
inputs = processor(
|
306 |
-
text=[text],
|
307 |
-
images=image_inputs,
|
308 |
-
videos=video_inputs,
|
309 |
-
padding=True,
|
310 |
-
return_tensors="pt",
|
311 |
-
)
|
312 |
-
inputs = inputs.to("cuda")
|
313 |
-
|
314 |
-
# Inference
|
315 |
-
generated_ids = model.generate(**inputs, max_new_tokens=128)
|
316 |
-
generated_ids_trimmed = [
|
317 |
-
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
318 |
-
]
|
319 |
-
output_text = processor.batch_decode(
|
320 |
-
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
321 |
-
)
|
322 |
-
print(output_text)
|
323 |
-
```
|
324 |
-
</details>
|
325 |
-
|
326 |
-
<details>
|
327 |
-
<summary>Batch inference</summary>
|
328 |
-
|
329 |
-
```python
|
330 |
-
# Sample messages for batch inference
|
331 |
-
messages1 = [
|
332 |
-
{
|
333 |
-
"role": "user",
|
334 |
-
"content": [
|
335 |
-
{"type": "image", "image": "file:///path/to/image1.jpg"},
|
336 |
-
{"type": "image", "image": "file:///path/to/image2.jpg"},
|
337 |
-
{"type": "text", "text": "What are the common elements in these pictures?"},
|
338 |
-
],
|
339 |
-
}
|
340 |
-
]
|
341 |
-
messages2 = [
|
342 |
-
{"role": "system", "content": "You are a helpful assistant."},
|
343 |
-
{"role": "user", "content": "Who are you?"},
|
344 |
-
]
|
345 |
-
# Combine messages for batch processing
|
346 |
-
messages = [messages1, messages1]
|
347 |
-
|
348 |
-
# Preparation for batch inference
|
349 |
-
texts = [
|
350 |
-
processor.apply_chat_template(msg, tokenize=False, add_generation_prompt=True)
|
351 |
-
for msg in messages
|
352 |
-
]
|
353 |
-
image_inputs, video_inputs = process_vision_info(messages)
|
354 |
-
inputs = processor(
|
355 |
-
text=texts,
|
356 |
-
images=image_inputs,
|
357 |
-
videos=video_inputs,
|
358 |
-
padding=True,
|
359 |
-
return_tensors="pt",
|
360 |
-
)
|
361 |
-
inputs = inputs.to("cuda")
|
362 |
-
|
363 |
-
# Batch Inference
|
364 |
-
generated_ids = model.generate(**inputs, max_new_tokens=128)
|
365 |
-
generated_ids_trimmed = [
|
366 |
-
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
367 |
-
]
|
368 |
-
output_texts = processor.batch_decode(
|
369 |
-
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
370 |
-
)
|
371 |
-
print(output_texts)
|
372 |
-
```
|
373 |
-
</details>
|
374 |
-
|
375 |
-
### More Usage Tips
|
376 |
-
|
377 |
-
For input images, we support local files, base64, and URLs. For videos, we currently only support local files.
|
378 |
-
|
379 |
-
```python
|
380 |
-
# You can directly insert a local file path, a URL, or a base64-encoded image into the position where you want in the text.
|
381 |
-
## Local file path
|
382 |
-
messages = [
|
383 |
-
{
|
384 |
-
"role": "user",
|
385 |
-
"content": [
|
386 |
-
{"type": "image", "image": "file:///path/to/your/image.jpg"},
|
387 |
-
{"type": "text", "text": "Describe this image."},
|
388 |
-
],
|
389 |
-
}
|
390 |
-
]
|
391 |
-
## Image URL
|
392 |
-
messages = [
|
393 |
-
{
|
394 |
-
"role": "user",
|
395 |
-
"content": [
|
396 |
-
{"type": "image", "image": "http://path/to/your/image.jpg"},
|
397 |
-
{"type": "text", "text": "Describe this image."},
|
398 |
-
],
|
399 |
-
}
|
400 |
-
]
|
401 |
-
## Base64 encoded image
|
402 |
-
messages = [
|
403 |
-
{
|
404 |
-
"role": "user",
|
405 |
-
"content": [
|
406 |
-
{"type": "image", "image": "data:image;base64,/9j/..."},
|
407 |
-
{"type": "text", "text": "Describe this image."},
|
408 |
-
],
|
409 |
-
}
|
410 |
-
]
|
411 |
-
```
|
412 |
-
#### Image Resolution for performance boost
|
413 |
-
|
414 |
-
The model supports a wide range of resolution inputs. By default, it uses the native resolution for input, but higher resolutions can enhance performance at the cost of more computation. Users can set the minimum and maximum number of pixels to achieve an optimal configuration for their needs, such as a token count range of 256-1280, to balance speed and memory usage.
|
415 |
-
|
416 |
-
```python
|
417 |
-
min_pixels = 256 * 28 * 28
|
418 |
-
max_pixels = 1280 * 28 * 28
|
419 |
-
processor = AutoProcessor.from_pretrained(
|
420 |
-
"Qwen/Qwen2-VL-2B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels
|
421 |
-
)
|
422 |
-
```
|
423 |
-
|
424 |
-
Besides, We provide two methods for fine-grained control over the image size input to the model:
|
425 |
-
|
426 |
-
1. Define min_pixels and max_pixels: Images will be resized to maintain their aspect ratio within the range of min_pixels and max_pixels.
|
427 |
-
|
428 |
-
2. Specify exact dimensions: Directly set `resized_height` and `resized_width`. These values will be rounded to the nearest multiple of 28.
|
429 |
-
|
430 |
-
```python
|
431 |
-
# min_pixels and max_pixels
|
432 |
-
messages = [
|
433 |
-
{
|
434 |
-
"role": "user",
|
435 |
-
"content": [
|
436 |
-
{
|
437 |
-
"type": "image",
|
438 |
-
"image": "file:///path/to/your/image.jpg",
|
439 |
-
"resized_height": 280,
|
440 |
-
"resized_width": 420,
|
441 |
-
},
|
442 |
-
{"type": "text", "text": "Describe this image."},
|
443 |
-
],
|
444 |
-
}
|
445 |
-
]
|
446 |
-
# resized_height and resized_width
|
447 |
-
messages = [
|
448 |
-
{
|
449 |
-
"role": "user",
|
450 |
-
"content": [
|
451 |
-
{
|
452 |
-
"type": "image",
|
453 |
-
"image": "file:///path/to/your/image.jpg",
|
454 |
-
"min_pixels": 50176,
|
455 |
-
"max_pixels": 50176,
|
456 |
-
},
|
457 |
-
{"type": "text", "text": "Describe this image."},
|
458 |
-
],
|
459 |
-
}
|
460 |
-
]
|
461 |
-
```
|
462 |
-
|
463 |
-
## Limitations
|
464 |
-
|
465 |
-
While Qwen2-VL are applicable to a wide range of visual tasks, it is equally important to understand its limitations. Here are some known restrictions:
|
466 |
-
|
467 |
-
1. Lack of Audio Support: The current model does **not comprehend audio information** within videos.
|
468 |
-
2. Data timeliness: Our image dataset is **updated until June 2023**, and information subsequent to this date may not be covered.
|
469 |
-
3. Constraints in Individuals and Intellectual Property (IP): The model's capacity to recognize specific individuals or IPs is limited, potentially failing to comprehensively cover all well-known personalities or brands.
|
470 |
-
4. Limited Capacity for Complex Instruction: When faced with intricate multi-step instructions, the model's understanding and execution capabilities require enhancement.
|
471 |
-
5. Insufficient Counting Accuracy: Particularly in complex scenes, the accuracy of object counting is not high, necessitating further improvements.
|
472 |
-
6. Weak Spatial Reasoning Skills: Especially in 3D spaces, the model's inference of object positional relationships is inadequate, making it difficult to precisely judge the relative positions of objects.
|
473 |
-
|
474 |
-
These limitations serve as ongoing directions for model optimization and improvement, and we are committed to continually enhancing the model's performance and scope of application.
|
475 |
-
|
476 |
-
|
477 |
-
## Citation
|
478 |
-
|
479 |
-
If you find our work helpful, feel free to give us a cite.
|
480 |
-
|
481 |
-
```
|
482 |
-
@article{Qwen2VL,
|
483 |
-
title={Qwen2-VL: Enhancing Vision-Language Model's Perception of the World at Any Resolution},
|
484 |
-
author={Wang, Peng and Bai, Shuai and Tan, Sinan and Wang, Shijie and Fan, Zhihao and Bai, Jinze and Chen, Keqin and Liu, Xuejing and Wang, Jialin and Ge, Wenbin and Fan, Yang and Dang, Kai and Du, Mengfei and Ren, Xuancheng and Men, Rui and Liu, Dayiheng and Zhou, Chang and Zhou, Jingren and Lin, Junyang},
|
485 |
-
journal={arXiv preprint arXiv:2409.12191},
|
486 |
-
year={2024}
|
487 |
-
}
|
488 |
-
|
489 |
-
@article{Qwen-VL,
|
490 |
-
title={Qwen-VL: A Versatile Vision-Language Model for Understanding, Localization, Text Reading, and Beyond},
|
491 |
-
author={Bai, Jinze and Bai, Shuai and Yang, Shusheng and Wang, Shijie and Tan, Sinan and Wang, Peng and Lin, Junyang and Zhou, Chang and Zhou, Jingren},
|
492 |
-
journal={arXiv preprint arXiv:2308.12966},
|
493 |
-
year={2023}
|
494 |
-
}
|
495 |
-
```
|
|
|
2 |
license: apache-2.0
|
3 |
language:
|
4 |
- en
|
5 |
+
- zh
|
6 |
+
- ja
|
7 |
pipeline_tag: image-text-to-text
|
8 |
tags:
|
9 |
+
- fire
|
10 |
+
- wildfire
|
11 |
- multimodal
|
12 |
library_name: transformers
|
13 |
---
|
14 |
|
15 |
+
# Qwen2-Wildfire-2B-Instruct
|
16 |
|
17 |
+
## 1. 模型概述 / Model Overview / モデル概要
|
18 |
|
19 |
+
**中文:**
|
20 |
+
Qwen2-Wildfire-2B 提供了对野火火灾场景的深度识别,可以提供比其他模型更深入的场景细节信息。相较于其他深度学习模型,它能够为消防工作提供更多有效的信息,且由于模型体量为2B,将来也可以方便地搭载到移动设备上。我们的模型能够识别以下八大关键要素:
|
21 |
+
- 火灾类型与环境描述
|
22 |
+
- 火焰特性(颜色、高度、强度)
|
23 |
+
- 烟雾特性(颜色、密度、扩散方向)
|
24 |
+
- 火行为(传播速度、运动模式)
|
25 |
+
- 受影响区域描述(火灾覆盖范围、涉及的植被或结构)
|
26 |
+
- 现场应对措施(消防员、车辆、设备的可见性)
|
27 |
+
- 环境因素(周围地形、风速、温度)
|
28 |
+
- 图像质量(拍摄时间、角度、分辨率)
|
29 |
|
30 |
+
训练的数据集由4135张高质量图片构成,经过人工检查后进行标注,并用于微调 Qwen 的 Qwen2-VL-2B-Instruct 模型,取得了很好的效果。
|
31 |
|
32 |
+
**English:**
|
33 |
+
Qwen2-Wildfire-2B provides deep recognition of wildfire scenarios, offering more detailed scene information than other models. Compared to other deep learning models, it can offer more useful information for firefighting, and with its 2B size, it can be conveniently deployed on mobile devices in the future. Our model recognizes eight key elements:
|
34 |
+
- Fire type and environmental description
|
35 |
+
- Flame characteristics (color, height, intensity)
|
36 |
+
- Smoke characteristics (color, density, spread direction)
|
37 |
+
- Fire behavior (spread speed, movement pattern)
|
38 |
+
- Affected area description (fire coverage, vegetation or structures involved)
|
39 |
+
- On-site response measures (visibility of firefighters, vehicles, equipment)
|
40 |
+
- Environmental factors (terrain, wind speed, temperature)
|
41 |
+
- Image quality (time of capture, angle, resolution)
|
42 |
|
43 |
+
The training dataset consists of 4,135 high-quality images, manually checked and labeled, and used to fine-tune Qwen’s Qwen2-VL-2B-Instruct model, yielding excellent results.
|
44 |
|
45 |
+
**日本語:**
|
46 |
+
Qwen2-Wildfire-2Bは、他のモデルよりも詳細な野火災害シーンの深い認識を提供し、消防活動に有効な情報を提供します。2Bサイズのため、将来的にモバイルデバイスへの搭載も可能です。モデルは次の8つの主要な要素を認識できます:
|
47 |
+
- 火災の種類と環境の説明
|
48 |
+
- 炎の特性(色、高さ、強度)
|
49 |
+
- 煙の特性(色、密度、拡散方向)
|
50 |
+
- 火の動作(伝播速度、動きのパターン)
|
51 |
+
- 影響を受けた地域の説明(火災範囲、関与する植生や構造物)
|
52 |
+
- 現場の対応(消防士、車両、機器の視認性)
|
53 |
+
- 環境要因(地形、風速、温度)
|
54 |
+
- 画像の品質(撮影時間、角度、解像度)
|
55 |
|
56 |
+
4135枚の高品質な画像を含むデータセットを使用し、ラベリング後に人工で確認し、Qwen2-VL-2B-Instructモデルの微調整に使用して非常に良好な結果を得ました。
|
57 |
|
58 |
+
## 2. 开发者信息 / Developer Information / 開発者情報
|
59 |
|
60 |
+
**中文:**
|
61 |
+
我现在(2024年10月10日星期四)是神奈川大学的修士二年级学生,隶属于电子电子情报学科。我的所有研究都由我所在的情报学部能登研究室支持,没有教授和老师的大力支持,我无法拥有硬件设备来微调模型。我从大四开始一直研究火灾识别,期间发表了两篇国际会议论文。此次的 Qwen2-Wildfire-2B 是我最新的研究成果,研究周期长达7个月,所有工作均由我独立完成。希望通过开源为火灾识别领域做出贡献。如果有机构希望将模型用于商业用途,请务必附上我的信息,研究不易,请理解。如果有防灾机构对我的研究��兴趣,请联系:[email protected]。
|
62 |
|
63 |
+
**English:**
|
64 |
+
As of Thursday, October 10, 2024, I am a second-year master’s student at Kanagawa University, under the Department of Electrical and Electronic Information. My research is supported by the Noto Lab at the School of Informatics. Without the support of professors and mentors, I wouldn’t have access to the necessary hardware to fine-tune this model. Since my senior year, I have been studying wildfire recognition, and I have published two international conference papers. Qwen2-Wildfire-2B is my latest research achievement, completed over the course of 7 months. All the work was done by myself, and I hope to contribute to the field of wildfire recognition by open-sourcing this research. If any organizations are interested in using this model for commercial purposes, please attribute my work accordingly. Research is difficult, and I appreciate your understanding. If any disaster prevention organizations are interested in my research, please contact: [email protected].
|
65 |
|
66 |
+
**日本語:**
|
67 |
+
2024年10月10日(木曜日)現在、私は神奈川大学の修士2年生であり、電子・電子情報学科に所属しています。私の研究は、情報学部能登研究室のサポートを受けています。教授や指導者の大きなサポートがなければ、モデルの微調整に必要なハードウェアを入手することはできませんでした。大学4年生から火災認識の研究を行い、国際会議で2本の論文を発表しました。Qwen2-Wildfire-2Bは私の最新の研究成果であり、7ヶ月間にわたってすべての作業を一人で完了しました。火災認識分野への貢献を目指し、この研究をオープンソースにしました。もし、このモデルを商業利用したい場合は、必ず私の情報を明記してください。研究は難しいものであり、理解していただければ幸いです。防災に関心のある機関は、[email protected]までご連絡ください。
|
68 |
|
69 |
+
## 3. 模型基础 / Base Model / モデルの基礎
|
70 |
|
71 |
+
- `Qwen2-VL-2B-Instruct`
|
|
|
|
|
72 |
|
73 |
+
## 4. 用途和限制 / Uses and Limitations / 用途と制限
|
74 |
|
75 |
+
**中文:**
|
76 |
+
- 用于有效识别野火场景中的各种要素。
|
|
|
77 |
|
78 |
+
**English:**
|
79 |
+
- Used for effective recognition of various elements in wildfire scenarios.
|
80 |
|
81 |
+
**日本語:**
|
82 |
+
- 野火のシーン要素の効果的な認識に使用されます。
|
83 |
|
84 |
+
## 5. 训练细节 / Training Details / トレーニングの詳細
|
85 |
|
86 |
+
**中文:**
|
87 |
+
训练数据集由我们自建的4135张高质量图片构成,打标后由人工检查数据的准确性,随后用于微调 Qwen 的 Qwen2-VL-2B-Instruct 模型,效果非常理想。
|
88 |
|
89 |
+
**English:**
|
90 |
+
The training dataset consists of 4,135 high-quality images we constructed, manually checked after labeling. These data were used to fine-tune Qwen's Qwen2-VL-2B-Instruct model, yielding excellent results.
|
91 |
|
92 |
+
**日本語:**
|
93 |
+
トレーニングデータセットは、自前で作成した4135枚の高品質な画像で構成され、ラベリング後に人工でデータの正確さを確認しました。その後、QwenのQwen2-VL-2B-Instructモデルの微調整に使用され、非常に良好な結果が得られました。
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
|
95 |
+
## 6. 技术细节 / Technical Details / 技術的な詳細
|
96 |
|
97 |
+
**中文:**
|
98 |
+
- 微调设备:RTX A5000 24GB。
|
|
|
|
|
|
|
|
|
99 |
|
100 |
+
**English:**
|
101 |
+
- Fine-tuning hardware: RTX A5000 with 24GB memory.
|
102 |
|
103 |
+
**日本語:**
|
104 |
+
- 微調整デバイス:RTX A5000 24GB。
|
|
|
|
|
|
|
105 |
|
106 |
+
## 7. 联系信息 / Contact Information / 連絡先情報
|
|
|
107 |
|
108 |
+
**中文:**
|
109 |
+
Qwen2-Wildfire-2B 是我最新的研究成果,研究周期长达7个月,所有工作均由我独立完成。如果有机构希望将模型用于商业用途,请务必附上我的信息。研究不易,请理解。如果有防灾机构对我的研究感兴趣,请联系:[email protected]。
|
|
|
110 |
|
111 |
+
**English:**
|
112 |
+
Qwen2-Wildfire-2B is my latest research achievement, completed over the course of 7 months. All the work was done by myself. If any organizations are interested in using this model for commercial purposes, please attribute my work accordingly. Research is difficult, and I appreciate your understanding. If any disaster prevention organizations are interested in my research, please contact: [email protected].
|
113 |
|
114 |
+
**日本語:**
|
115 |
+
Qwen2-Wildfire-2Bは私の最新の研究成果であり、7ヶ月間かけてすべての作業を一人で完了しました。商業目的でこのモデルを使用したい場合は、必ず私の情報を明記してください。研究は困難であるため、理解していただければ幸いです。防災機関が私の研究に興味がある場合は、[email protected]までご連絡ください。
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|