czczup commited on
Commit
746114f
1 Parent(s): b038051

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. modeling_internvl_chat.py +2 -1
modeling_internvl_chat.py CHANGED
@@ -17,7 +17,7 @@ from transformers.utils import ModelOutput, logging
17
 
18
  from .configuration_internvl_chat import InternVLChatConfig
19
  from .conversation import get_conv_template
20
- from .modeling_intern_vit import InternVisionModel
21
 
22
  logger = logging.get_logger(__name__)
23
 
@@ -48,6 +48,7 @@ class InternVLChatModel(PreTrainedModel):
48
  self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
49
  self.downsample_ratio = config.downsample_ratio
50
  self.ps_version = config.ps_version
 
51
  config.vision_config.use_flash_attn = True if use_flash_attn else False
52
  config.llm_config._attn_implementation = 'flash_attention_2' if use_flash_attn else 'eager'
53
 
 
17
 
18
  from .configuration_internvl_chat import InternVLChatConfig
19
  from .conversation import get_conv_template
20
+ from .modeling_intern_vit import InternVisionModel, has_flash_attn
21
 
22
  logger = logging.get_logger(__name__)
23
 
 
48
  self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
49
  self.downsample_ratio = config.downsample_ratio
50
  self.ps_version = config.ps_version
51
+ use_flash_attn = use_flash_attn if has_flash_attn else False
52
  config.vision_config.use_flash_attn = True if use_flash_attn else False
53
  config.llm_config._attn_implementation = 'flash_attention_2' if use_flash_attn else 'eager'
54