Upload folder using huggingface_hub
Browse files- README.md +28 -91
- modeling_intern_vit.py +6 -12
README.md
CHANGED
@@ -62,6 +62,8 @@ InternVL 2.0 is a multimodal large language model series, featuring models of va
|
|
62 |
| MathVista<sub>testmini</sub> | 58.1 | 57.7 | 53.5 | 59.4 |
|
63 |
| OpenCompass<sub>avg</sub> | 63.5 | 64.4 | 61.7 | 66.4 |
|
64 |
|
|
|
|
|
65 |
- We simultaneously use InternVL and VLMEvalKit repositories for model evaluation. Specifically, the results reported for DocVQA, ChartQA, InfoVQA, TextVQA, MME, AI2D, MMBench, CCBench, MMVet, and SEED-Image were tested using the InternVL repository. OCRBench, RealWorldQA, HallBench, and MathVista were evaluated using the VLMEvalKit.
|
66 |
|
67 |
- For MMMU, we report both the original scores (left side: evaluated using the InternVL codebase for InternVL series models, and sourced from technical reports or webpages for other models) and the VLMEvalKit scores (right side: collected from the OpenCompass leaderboard).
|
@@ -291,7 +293,7 @@ tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast
|
|
291 |
|
292 |
# set the max number of tiles in `max_num`
|
293 |
pixel_values = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
|
294 |
-
generation_config = dict(max_new_tokens=1024, do_sample=
|
295 |
|
296 |
# pure-text conversation (纯文本对话)
|
297 |
question = 'Hello, who are you?'
|
@@ -443,7 +445,7 @@ for new_text in streamer:
|
|
443 |
|
444 |
## Finetune
|
445 |
|
446 |
-
|
447 |
|
448 |
## Deployment
|
449 |
|
@@ -452,7 +454,7 @@ SWIFT from ModelScope community has supported the fine-tuning (Image/Video) of I
|
|
452 |
LMDeploy is a toolkit for compressing, deploying, and serving LLM, developed by the MMRazor and MMDeploy teams.
|
453 |
|
454 |
```sh
|
455 |
-
pip install lmdeploy
|
456 |
```
|
457 |
|
458 |
LMDeploy abstracts the complex inference process of multi-modal Vision-Language Models (VLM) into an easy-to-use pipeline, similar to the Large Language Model (LLM) inference pipeline.
|
@@ -460,16 +462,12 @@ LMDeploy abstracts the complex inference process of multi-modal Vision-Language
|
|
460 |
#### A 'Hello, world' example
|
461 |
|
462 |
```python
|
463 |
-
from lmdeploy import pipeline, TurbomindEngineConfig
|
464 |
from lmdeploy.vl import load_image
|
465 |
|
466 |
model = 'OpenGVLab/InternVL2-26B'
|
467 |
-
system_prompt = '我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。'
|
468 |
image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/tests/data/tiger.jpeg')
|
469 |
-
|
470 |
-
chat_template_config.meta_instruction = system_prompt
|
471 |
-
pipe = pipeline(model, chat_template_config=chat_template_config,
|
472 |
-
backend_config=TurbomindEngineConfig(session_len=8192))
|
473 |
response = pipe(('describe this image', image))
|
474 |
print(response.text)
|
475 |
```
|
@@ -483,16 +481,12 @@ When dealing with multiple images, you can put them all in one list. Keep in min
|
|
483 |
> Warning: Due to the scarcity of multi-image conversation data, the performance on multi-image tasks may be unstable, and it may require multiple attempts to achieve satisfactory results.
|
484 |
|
485 |
```python
|
486 |
-
from lmdeploy import pipeline, TurbomindEngineConfig
|
487 |
from lmdeploy.vl import load_image
|
488 |
from lmdeploy.vl.constants import IMAGE_TOKEN
|
489 |
|
490 |
model = 'OpenGVLab/InternVL2-26B'
|
491 |
-
|
492 |
-
chat_template_config = ChatTemplateConfig('internvl-internlm2')
|
493 |
-
chat_template_config.meta_instruction = system_prompt
|
494 |
-
pipe = pipeline(model, chat_template_config=chat_template_config,
|
495 |
-
backend_config=TurbomindEngineConfig(session_len=8192))
|
496 |
|
497 |
image_urls=[
|
498 |
'https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg',
|
@@ -510,15 +504,11 @@ print(response.text)
|
|
510 |
Conducting inference with batch prompts is quite straightforward; just place them within a list structure:
|
511 |
|
512 |
```python
|
513 |
-
from lmdeploy import pipeline, TurbomindEngineConfig
|
514 |
from lmdeploy.vl import load_image
|
515 |
|
516 |
model = 'OpenGVLab/InternVL2-26B'
|
517 |
-
|
518 |
-
chat_template_config = ChatTemplateConfig('internvl-internlm2')
|
519 |
-
chat_template_config.meta_instruction = system_prompt
|
520 |
-
pipe = pipeline(model, chat_template_config=chat_template_config,
|
521 |
-
backend_config=TurbomindEngineConfig(session_len=8192))
|
522 |
|
523 |
image_urls=[
|
524 |
"https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg",
|
@@ -534,15 +524,11 @@ print(response)
|
|
534 |
There are two ways to do the multi-turn conversations with the pipeline. One is to construct messages according to the format of OpenAI and use above introduced method, the other is to use the `pipeline.chat` interface.
|
535 |
|
536 |
```python
|
537 |
-
from lmdeploy import pipeline, TurbomindEngineConfig,
|
538 |
from lmdeploy.vl import load_image
|
539 |
|
540 |
model = 'OpenGVLab/InternVL2-26B'
|
541 |
-
|
542 |
-
chat_template_config = ChatTemplateConfig('internvl-internlm2')
|
543 |
-
chat_template_config.meta_instruction = system_prompt
|
544 |
-
pipe = pipeline(model, chat_template_config=chat_template_config,
|
545 |
-
backend_config=TurbomindEngineConfig(session_len=8192))
|
546 |
|
547 |
image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg')
|
548 |
gen_config = GenerationConfig(top_k=40, top_p=0.8, temperature=0.8)
|
@@ -554,20 +540,10 @@ print(sess.response.text)
|
|
554 |
|
555 |
#### Service
|
556 |
|
557 |
-
To deploy InternVL2 as an API, please configure the chat template config first. Create the following JSON file `chat_template.json`.
|
558 |
-
|
559 |
-
```json
|
560 |
-
{
|
561 |
-
"model_name":"internvl-internlm2",
|
562 |
-
"meta_instruction":"我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。",
|
563 |
-
"stop_words":["<|im_start|>", "<|im_end|>"]
|
564 |
-
}
|
565 |
-
```
|
566 |
-
|
567 |
LMDeploy's `api_server` enables models to be easily packed into services with a single command. The provided RESTful APIs are compatible with OpenAI's interfaces. Below are an example of service startup:
|
568 |
|
569 |
```shell
|
570 |
-
lmdeploy serve api_server OpenGVLab/InternVL2-26B --backend turbomind --server-port 23333
|
571 |
```
|
572 |
|
573 |
To use the OpenAI-style interface, you need to install OpenAI:
|
@@ -604,14 +580,6 @@ response = client.chat.completions.create(
|
|
604 |
print(response)
|
605 |
```
|
606 |
|
607 |
-
### vLLM
|
608 |
-
|
609 |
-
TODO
|
610 |
-
|
611 |
-
### Ollama
|
612 |
-
|
613 |
-
TODO
|
614 |
-
|
615 |
## License
|
616 |
|
617 |
This project is released under the MIT license, while InternLM2 is licensed under the Apache-2.0 license.
|
@@ -684,6 +652,8 @@ InternVL 2.0 是一个多模态大语言模型系列,包含各种规模的模
|
|
684 |
| MathVista<sub>testmini</sub> | 58.1 | 57.7 | 53.5 | 59.4 |
|
685 |
| OpenCompass<sub>avg</sub> | 63.5 | 64.4 | 61.7 | 66.4 |
|
686 |
|
|
|
|
|
687 |
- 我们同时使用 InternVL 和 VLMEvalKit 仓库进行模型评估。具体来说,DocVQA、ChartQA、InfoVQA、TextVQA、MME、AI2D、MMBench、CCBench、MMVet 和 SEED-Image 的结果是使用 InternVL 仓库测试的。OCRBench、RealWorldQA、HallBench 和 MathVista 是使用 VLMEvalKit 进行评估的。
|
688 |
|
689 |
- 对于MMMU,我们报告了原始分数(左侧:InternVL系列模型使用InternVL代码库评测,其他模型的分数来自其技术报告或网页)和VLMEvalKit分数(右侧:从OpenCompass排行榜收集)。
|
@@ -742,7 +712,7 @@ InternVL 2.0 是一个多模态大语言模型系列,包含各种规模的模
|
|
742 |
|
743 |
## 微调
|
744 |
|
745 |
-
|
746 |
|
747 |
## 部署
|
748 |
|
@@ -751,7 +721,7 @@ InternVL 2.0 是一个多模态大语言模型系列,包含各种规模的模
|
|
751 |
LMDeploy 是由 MMRazor 和 MMDeploy 团队开发的用于压缩、部署和服务大语言模型(LLM)的工具包。
|
752 |
|
753 |
```sh
|
754 |
-
pip install lmdeploy
|
755 |
```
|
756 |
|
757 |
LMDeploy 将多模态视觉-语言模型(VLM)的复杂推理过程抽象为一个易于使用的管道,类似于大语言模型(LLM)的推理管道。
|
@@ -759,16 +729,12 @@ LMDeploy 将多模态视觉-语言模型(VLM)的复杂推理过程抽象为
|
|
759 |
#### 一个“你好,世界”示例
|
760 |
|
761 |
```python
|
762 |
-
from lmdeploy import pipeline, TurbomindEngineConfig
|
763 |
from lmdeploy.vl import load_image
|
764 |
|
765 |
model = 'OpenGVLab/InternVL2-26B'
|
766 |
-
system_prompt = '我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。'
|
767 |
image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/tests/data/tiger.jpeg')
|
768 |
-
|
769 |
-
chat_template_config.meta_instruction = system_prompt
|
770 |
-
pipe = pipeline(model, chat_template_config=chat_template_config,
|
771 |
-
backend_config=TurbomindEngineConfig(session_len=8192))
|
772 |
response = pipe(('describe this image', image))
|
773 |
print(response.text)
|
774 |
```
|
@@ -780,16 +746,12 @@ print(response.text)
|
|
780 |
在处理多张图像时,可以将它们全部放入一个列表中。请注意,多张图像会导致输入 token 数量增加,因此通常需要增加上下文窗口的大小。
|
781 |
|
782 |
```python
|
783 |
-
from lmdeploy import pipeline, TurbomindEngineConfig
|
784 |
from lmdeploy.vl import load_image
|
785 |
from lmdeploy.vl.constants import IMAGE_TOKEN
|
786 |
|
787 |
model = 'OpenGVLab/InternVL2-26B'
|
788 |
-
|
789 |
-
chat_template_config = ChatTemplateConfig('internvl-internlm2')
|
790 |
-
chat_template_config.meta_instruction = system_prompt
|
791 |
-
pipe = pipeline(model, chat_template_config=chat_template_config,
|
792 |
-
backend_config=TurbomindEngineConfig(session_len=8192))
|
793 |
|
794 |
image_urls=[
|
795 |
'https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg',
|
@@ -797,6 +759,7 @@ image_urls=[
|
|
797 |
]
|
798 |
|
799 |
images = [load_image(img_url) for img_url in image_urls]
|
|
|
800 |
response = pipe((f'Image-1: {IMAGE_TOKEN}\nImage-2: {IMAGE_TOKEN}\ndescribe these two images', images))
|
801 |
print(response.text)
|
802 |
```
|
@@ -806,15 +769,11 @@ print(response.text)
|
|
806 |
使用批量Prompt进行推理非常简单;只需将它们放在一个列表结构中:
|
807 |
|
808 |
```python
|
809 |
-
from lmdeploy import pipeline, TurbomindEngineConfig
|
810 |
from lmdeploy.vl import load_image
|
811 |
|
812 |
model = 'OpenGVLab/InternVL2-26B'
|
813 |
-
|
814 |
-
chat_template_config = ChatTemplateConfig('internvl-internlm2')
|
815 |
-
chat_template_config.meta_instruction = system_prompt
|
816 |
-
pipe = pipeline(model, chat_template_config=chat_template_config,
|
817 |
-
backend_config=TurbomindEngineConfig(session_len=8192))
|
818 |
|
819 |
image_urls=[
|
820 |
"https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg",
|
@@ -830,15 +789,11 @@ print(response)
|
|
830 |
使用管道进行多轮对话有两种方法。一种是根据 OpenAI 的格式构建消息并使用上述方法,另一种是使用 `pipeline.chat` 接口。
|
831 |
|
832 |
```python
|
833 |
-
from lmdeploy import pipeline, TurbomindEngineConfig,
|
834 |
from lmdeploy.vl import load_image
|
835 |
|
836 |
model = 'OpenGVLab/InternVL2-26B'
|
837 |
-
|
838 |
-
chat_template_config = ChatTemplateConfig('internvl-internlm2')
|
839 |
-
chat_template_config.meta_instruction = system_prompt
|
840 |
-
pipe = pipeline(model, chat_template_config=chat_template_config,
|
841 |
-
backend_config=TurbomindEngineConfig(session_len=8192))
|
842 |
|
843 |
image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg')
|
844 |
gen_config = GenerationConfig(top_k=40, top_p=0.8, temperature=0.8)
|
@@ -850,20 +805,10 @@ print(sess.response.text)
|
|
850 |
|
851 |
#### API部署
|
852 |
|
853 |
-
为了将InternVL2部署成API,请先配置聊天模板配置文件。创建如下的 JSON 文件 `chat_template.json`。
|
854 |
-
|
855 |
-
```json
|
856 |
-
{
|
857 |
-
"model_name":"internvl-internlm2",
|
858 |
-
"meta_instruction":"我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多���合作单位联合开发的多模态大语言模型。",
|
859 |
-
"stop_words":["<|im_start|>", "<|im_end|>"]
|
860 |
-
}
|
861 |
-
```
|
862 |
-
|
863 |
LMDeploy 的 `api_server` 使模型能够通过一个命令轻松打包成服务。提供的 RESTful API 与 OpenAI 的接口兼容。以下是服务启动的示例:
|
864 |
|
865 |
```shell
|
866 |
-
lmdeploy serve api_server OpenGVLab/InternVL2-26B --backend turbomind --server-port 23333
|
867 |
```
|
868 |
|
869 |
为了使用OpenAI风格的API接口,您需要安装OpenAI:
|
@@ -900,14 +845,6 @@ response = client.chat.completions.create(
|
|
900 |
print(response)
|
901 |
```
|
902 |
|
903 |
-
### vLLM
|
904 |
-
|
905 |
-
TODO
|
906 |
-
|
907 |
-
### Ollama
|
908 |
-
|
909 |
-
TODO
|
910 |
-
|
911 |
## 开源许可证
|
912 |
|
913 |
该项目采用 MIT 许可证发布,而 InternLM2 则采用 Apache-2.0 许可证。
|
|
|
62 |
| MathVista<sub>testmini</sub> | 58.1 | 57.7 | 53.5 | 59.4 |
|
63 |
| OpenCompass<sub>avg</sub> | 63.5 | 64.4 | 61.7 | 66.4 |
|
64 |
|
65 |
+
- For more details and evaluation reproduction, please refer to our [Evaluation Guide](https://internvl.readthedocs.io/en/latest/internvl2.0/evaluation.html).
|
66 |
+
|
67 |
- We simultaneously use InternVL and VLMEvalKit repositories for model evaluation. Specifically, the results reported for DocVQA, ChartQA, InfoVQA, TextVQA, MME, AI2D, MMBench, CCBench, MMVet, and SEED-Image were tested using the InternVL repository. OCRBench, RealWorldQA, HallBench, and MathVista were evaluated using the VLMEvalKit.
|
68 |
|
69 |
- For MMMU, we report both the original scores (left side: evaluated using the InternVL codebase for InternVL series models, and sourced from technical reports or webpages for other models) and the VLMEvalKit scores (right side: collected from the OpenCompass leaderboard).
|
|
|
293 |
|
294 |
# set the max number of tiles in `max_num`
|
295 |
pixel_values = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
|
296 |
+
generation_config = dict(max_new_tokens=1024, do_sample=True)
|
297 |
|
298 |
# pure-text conversation (纯文本对话)
|
299 |
question = 'Hello, who are you?'
|
|
|
445 |
|
446 |
## Finetune
|
447 |
|
448 |
+
Many repositories now support fine-tuning of the InternVL series models, including [InternVL](https://github.com/OpenGVLab/InternVL), [SWIFT](https://github.com/modelscope/ms-swift), [XTurner](https://github.com/InternLM/xtuner), and others. Please refer to their documentation for more details on fine-tuning.
|
449 |
|
450 |
## Deployment
|
451 |
|
|
|
454 |
LMDeploy is a toolkit for compressing, deploying, and serving LLM, developed by the MMRazor and MMDeploy teams.
|
455 |
|
456 |
```sh
|
457 |
+
pip install lmdeploy==0.5.3
|
458 |
```
|
459 |
|
460 |
LMDeploy abstracts the complex inference process of multi-modal Vision-Language Models (VLM) into an easy-to-use pipeline, similar to the Large Language Model (LLM) inference pipeline.
|
|
|
462 |
#### A 'Hello, world' example
|
463 |
|
464 |
```python
|
465 |
+
from lmdeploy import pipeline, TurbomindEngineConfig
|
466 |
from lmdeploy.vl import load_image
|
467 |
|
468 |
model = 'OpenGVLab/InternVL2-26B'
|
|
|
469 |
image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/tests/data/tiger.jpeg')
|
470 |
+
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192))
|
|
|
|
|
|
|
471 |
response = pipe(('describe this image', image))
|
472 |
print(response.text)
|
473 |
```
|
|
|
481 |
> Warning: Due to the scarcity of multi-image conversation data, the performance on multi-image tasks may be unstable, and it may require multiple attempts to achieve satisfactory results.
|
482 |
|
483 |
```python
|
484 |
+
from lmdeploy import pipeline, TurbomindEngineConfig
|
485 |
from lmdeploy.vl import load_image
|
486 |
from lmdeploy.vl.constants import IMAGE_TOKEN
|
487 |
|
488 |
model = 'OpenGVLab/InternVL2-26B'
|
489 |
+
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192))
|
|
|
|
|
|
|
|
|
490 |
|
491 |
image_urls=[
|
492 |
'https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg',
|
|
|
504 |
Conducting inference with batch prompts is quite straightforward; just place them within a list structure:
|
505 |
|
506 |
```python
|
507 |
+
from lmdeploy import pipeline, TurbomindEngineConfig
|
508 |
from lmdeploy.vl import load_image
|
509 |
|
510 |
model = 'OpenGVLab/InternVL2-26B'
|
511 |
+
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192))
|
|
|
|
|
|
|
|
|
512 |
|
513 |
image_urls=[
|
514 |
"https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg",
|
|
|
524 |
There are two ways to do the multi-turn conversations with the pipeline. One is to construct messages according to the format of OpenAI and use above introduced method, the other is to use the `pipeline.chat` interface.
|
525 |
|
526 |
```python
|
527 |
+
from lmdeploy import pipeline, TurbomindEngineConfig, GenerationConfig
|
528 |
from lmdeploy.vl import load_image
|
529 |
|
530 |
model = 'OpenGVLab/InternVL2-26B'
|
531 |
+
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192))
|
|
|
|
|
|
|
|
|
532 |
|
533 |
image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg')
|
534 |
gen_config = GenerationConfig(top_k=40, top_p=0.8, temperature=0.8)
|
|
|
540 |
|
541 |
#### Service
|
542 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
543 |
LMDeploy's `api_server` enables models to be easily packed into services with a single command. The provided RESTful APIs are compatible with OpenAI's interfaces. Below are an example of service startup:
|
544 |
|
545 |
```shell
|
546 |
+
lmdeploy serve api_server OpenGVLab/InternVL2-26B --backend turbomind --server-port 23333
|
547 |
```
|
548 |
|
549 |
To use the OpenAI-style interface, you need to install OpenAI:
|
|
|
580 |
print(response)
|
581 |
```
|
582 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
583 |
## License
|
584 |
|
585 |
This project is released under the MIT license, while InternLM2 is licensed under the Apache-2.0 license.
|
|
|
652 |
| MathVista<sub>testmini</sub> | 58.1 | 57.7 | 53.5 | 59.4 |
|
653 |
| OpenCompass<sub>avg</sub> | 63.5 | 64.4 | 61.7 | 66.4 |
|
654 |
|
655 |
+
- 关于更多的细节以及评测复现,请看我们的[评测指南](https://internvl.readthedocs.io/en/latest/internvl2.0/evaluation.html)。
|
656 |
+
|
657 |
- 我们同时使用 InternVL 和 VLMEvalKit 仓库进行模型评估。具体来说,DocVQA、ChartQA、InfoVQA、TextVQA、MME、AI2D、MMBench、CCBench、MMVet 和 SEED-Image 的结果是使用 InternVL 仓库测试的。OCRBench、RealWorldQA、HallBench 和 MathVista 是使用 VLMEvalKit 进行评估的。
|
658 |
|
659 |
- 对于MMMU,我们报告了原始分数(左侧:InternVL系列模型使用InternVL代码库评测,其他模型的分数来自其技术报告或网页)和VLMEvalKit分数(右侧:从OpenCompass排行榜收集)。
|
|
|
712 |
|
713 |
## 微调
|
714 |
|
715 |
+
许多仓库现在都支持 InternVL 系列模型的微调,包括 [InternVL](https://github.com/OpenGVLab/InternVL)、[SWIFT](https://github.com/modelscope/ms-swift)、[XTurner](https://github.com/InternLM/xtuner) 等。请参阅它们的文档以获取更多微调细节。
|
716 |
|
717 |
## 部署
|
718 |
|
|
|
721 |
LMDeploy 是由 MMRazor 和 MMDeploy 团队开发的用于压缩、部署和服务大语言模型(LLM)的工具包。
|
722 |
|
723 |
```sh
|
724 |
+
pip install lmdeploy==0.5.3
|
725 |
```
|
726 |
|
727 |
LMDeploy 将多模态视觉-语言模型(VLM)的复杂推理过程抽象为一个易于使用的管道,类似于大语言模型(LLM)的推理管道。
|
|
|
729 |
#### 一个“你好,世界”示例
|
730 |
|
731 |
```python
|
732 |
+
from lmdeploy import pipeline, TurbomindEngineConfig
|
733 |
from lmdeploy.vl import load_image
|
734 |
|
735 |
model = 'OpenGVLab/InternVL2-26B'
|
|
|
736 |
image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/tests/data/tiger.jpeg')
|
737 |
+
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192))
|
|
|
|
|
|
|
738 |
response = pipe(('describe this image', image))
|
739 |
print(response.text)
|
740 |
```
|
|
|
746 |
在处理多张图像时,可以将它们全部放入一个列表中。请注意,多张图像会导致输入 token 数量增加,因此通常需要增加上下文窗口的大小。
|
747 |
|
748 |
```python
|
749 |
+
from lmdeploy import pipeline, TurbomindEngineConfig
|
750 |
from lmdeploy.vl import load_image
|
751 |
from lmdeploy.vl.constants import IMAGE_TOKEN
|
752 |
|
753 |
model = 'OpenGVLab/InternVL2-26B'
|
754 |
+
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192))
|
|
|
|
|
|
|
|
|
755 |
|
756 |
image_urls=[
|
757 |
'https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg',
|
|
|
759 |
]
|
760 |
|
761 |
images = [load_image(img_url) for img_url in image_urls]
|
762 |
+
# Numbering images improves multi-image conversations
|
763 |
response = pipe((f'Image-1: {IMAGE_TOKEN}\nImage-2: {IMAGE_TOKEN}\ndescribe these two images', images))
|
764 |
print(response.text)
|
765 |
```
|
|
|
769 |
使用批量Prompt进行推理非常简单;只需将它们放在一个列表结构中:
|
770 |
|
771 |
```python
|
772 |
+
from lmdeploy import pipeline, TurbomindEngineConfig
|
773 |
from lmdeploy.vl import load_image
|
774 |
|
775 |
model = 'OpenGVLab/InternVL2-26B'
|
776 |
+
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192))
|
|
|
|
|
|
|
|
|
777 |
|
778 |
image_urls=[
|
779 |
"https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg",
|
|
|
789 |
使用管道进行多轮对话有两种方法。一种是根据 OpenAI 的格式构建消息并使用上述方法,另一种是使用 `pipeline.chat` 接口。
|
790 |
|
791 |
```python
|
792 |
+
from lmdeploy import pipeline, TurbomindEngineConfig, GenerationConfig
|
793 |
from lmdeploy.vl import load_image
|
794 |
|
795 |
model = 'OpenGVLab/InternVL2-26B'
|
796 |
+
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192))
|
|
|
|
|
|
|
|
|
797 |
|
798 |
image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg')
|
799 |
gen_config = GenerationConfig(top_k=40, top_p=0.8, temperature=0.8)
|
|
|
805 |
|
806 |
#### API部署
|
807 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
808 |
LMDeploy 的 `api_server` 使模型能够通过一个命令轻松打包成服务。提供的 RESTful API 与 OpenAI 的接口兼容。以下是服务启动的示例:
|
809 |
|
810 |
```shell
|
811 |
+
lmdeploy serve api_server OpenGVLab/InternVL2-26B --backend turbomind --server-port 23333
|
812 |
```
|
813 |
|
814 |
为了使用OpenAI风格的API接口,您需要安装OpenAI:
|
|
|
845 |
print(response)
|
846 |
```
|
847 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
848 |
## 开源许可证
|
849 |
|
850 |
该项目采用 MIT 许可证发布,而 InternLM2 则采用 Apache-2.0 许可证。
|
modeling_intern_vit.py
CHANGED
@@ -20,18 +20,12 @@ from transformers.utils import logging
|
|
20 |
from .configuration_intern_vit import InternVisionConfig
|
21 |
|
22 |
try:
|
23 |
-
try: # v1
|
24 |
-
from flash_attn.flash_attn_interface import \
|
25 |
-
flash_attn_unpadded_qkvpacked_func
|
26 |
-
except: # v2
|
27 |
-
from flash_attn.flash_attn_interface import \
|
28 |
-
flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func
|
29 |
-
|
30 |
from flash_attn.bert_padding import pad_input, unpad_input
|
31 |
-
|
|
|
32 |
has_flash_attn = True
|
33 |
except:
|
34 |
-
print('
|
35 |
has_flash_attn = False
|
36 |
|
37 |
logger = logging.get_logger(__name__)
|
@@ -74,7 +68,7 @@ class FlashAttention(nn.Module):
|
|
74 |
max_s = seqlen
|
75 |
cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
|
76 |
device=qkv.device)
|
77 |
-
output =
|
78 |
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
79 |
softmax_scale=self.softmax_scale, causal=causal
|
80 |
)
|
@@ -84,7 +78,7 @@ class FlashAttention(nn.Module):
|
|
84 |
x = rearrange(qkv, 'b s three h d -> b s (three h d)')
|
85 |
x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
|
86 |
x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
|
87 |
-
output_unpad =
|
88 |
x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
89 |
softmax_scale=self.softmax_scale, causal=causal
|
90 |
)
|
@@ -93,7 +87,7 @@ class FlashAttention(nn.Module):
|
|
93 |
'b s (h d) -> b s h d', h=nheads)
|
94 |
else:
|
95 |
assert max_s is not None
|
96 |
-
output =
|
97 |
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
98 |
softmax_scale=self.softmax_scale, causal=causal
|
99 |
)
|
|
|
20 |
from .configuration_intern_vit import InternVisionConfig
|
21 |
|
22 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
from flash_attn.bert_padding import pad_input, unpad_input
|
24 |
+
from flash_attn.flash_attn_interface import \
|
25 |
+
flash_attn_varlen_qkvpacked_func
|
26 |
has_flash_attn = True
|
27 |
except:
|
28 |
+
print('FlashAttention2 is not installed.')
|
29 |
has_flash_attn = False
|
30 |
|
31 |
logger = logging.get_logger(__name__)
|
|
|
68 |
max_s = seqlen
|
69 |
cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
|
70 |
device=qkv.device)
|
71 |
+
output = flash_attn_varlen_qkvpacked_func(
|
72 |
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
73 |
softmax_scale=self.softmax_scale, causal=causal
|
74 |
)
|
|
|
78 |
x = rearrange(qkv, 'b s three h d -> b s (three h d)')
|
79 |
x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
|
80 |
x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
|
81 |
+
output_unpad = flash_attn_varlen_qkvpacked_func(
|
82 |
x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
83 |
softmax_scale=self.softmax_scale, causal=causal
|
84 |
)
|
|
|
87 |
'b s (h d) -> b s h d', h=nheads)
|
88 |
else:
|
89 |
assert max_s is not None
|
90 |
+
output = flash_attn_varlen_qkvpacked_func(
|
91 |
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
92 |
softmax_scale=self.softmax_scale, causal=causal
|
93 |
)
|