|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
import copy |
|
from typing import Union |
|
|
|
from transformers import LlamaConfig |
|
from transformers.configuration_utils import PretrainedConfig |
|
from transformers.utils import logging |
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
class InternVisionConfig(PretrainedConfig): |
|
r""" |
|
This is the configuration class to store the configuration of a [`InternVisionModel`]. It is used to |
|
instantiate a vision encoder according to the specified arguments, defining the model architecture. |
|
|
|
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the |
|
documentation from [`PretrainedConfig`] for more information. |
|
|
|
Args: |
|
num_channels (`int`, *optional*, defaults to 3): |
|
Number of color channels in the input images (e.g., 3 for RGB). |
|
patch_size (`int`, *optional*, defaults to 14): |
|
The size (resolution) of each patch. |
|
image_size (`int`, *optional*, defaults to 224): |
|
The size (resolution) of each image. |
|
qkv_bias (`bool`, *optional*, defaults to `False`): |
|
Whether to add a bias to the queries and values in the self-attention layers. |
|
hidden_size (`int`, *optional*, defaults to 3200): |
|
Dimensionality of the encoder layers and the pooler layer. |
|
num_attention_heads (`int`, *optional*, defaults to 25): |
|
Number of attention heads for each attention layer in the Transformer encoder. |
|
intermediate_size (`int`, *optional*, defaults to 12800): |
|
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. |
|
qk_normalization (`bool`, *optional*, defaults to `True`): |
|
Whether to normalize the queries and keys in the self-attention layers. |
|
num_hidden_layers (`int`, *optional*, defaults to 48): |
|
Number of hidden layers in the Transformer encoder. |
|
use_flash_attn (`bool`, *optional*, defaults to `True`): |
|
Whether to use flash attention mechanism. |
|
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): |
|
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, |
|
`"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported. |
|
layer_norm_eps (`float`, *optional*, defaults to 1e-6): |
|
The epsilon used by the layer normalization layers. |
|
dropout (`float`, *optional*, defaults to 0.0): |
|
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. |
|
drop_path_rate (`float`, *optional*, defaults to 0.0): |
|
Dropout rate for stochastic depth. |
|
attention_dropout (`float`, *optional*, defaults to 0.0): |
|
The dropout ratio for the attention probabilities. |
|
initializer_range (`float`, *optional*, defaults to 0.02): |
|
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. |
|
initializer_factor (`float`, *optional*, defaults to 0.1): |
|
A factor for layer scale. |
|
""" |
|
|
|
model_type = 'intern_vit_6b' |
|
|
|
def __init__( |
|
self, |
|
num_channels=3, |
|
patch_size=14, |
|
image_size=224, |
|
qkv_bias=False, |
|
hidden_size=3200, |
|
num_attention_heads=25, |
|
intermediate_size=12800, |
|
qk_normalization=True, |
|
num_hidden_layers=48, |
|
use_flash_attn=True, |
|
hidden_act='gelu', |
|
layer_norm_eps=1e-6, |
|
dropout=0.0, |
|
drop_path_rate=0.0, |
|
attention_dropout=0.0, |
|
initializer_range=0.02, |
|
initializer_factor=0.1, |
|
**kwargs, |
|
): |
|
super().__init__(**kwargs) |
|
|
|
self.hidden_size = hidden_size |
|
self.intermediate_size = intermediate_size |
|
self.dropout = dropout |
|
self.drop_path_rate = drop_path_rate |
|
self.num_hidden_layers = num_hidden_layers |
|
self.num_attention_heads = num_attention_heads |
|
self.num_channels = num_channels |
|
self.patch_size = patch_size |
|
self.image_size = image_size |
|
self.initializer_range = initializer_range |
|
self.initializer_factor = initializer_factor |
|
self.attention_dropout = attention_dropout |
|
self.layer_norm_eps = layer_norm_eps |
|
self.hidden_act = hidden_act |
|
self.qkv_bias = qkv_bias |
|
self.qk_normalization = qk_normalization |
|
self.use_flash_attn = use_flash_attn |
|
|
|
@classmethod |
|
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': |
|
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) |
|
|
|
if 'vision_config' in config_dict: |
|
config_dict = config_dict['vision_config'] |
|
|
|
if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type: |
|
logger.warning( |
|
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " |
|
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' |
|
) |
|
|
|
return cls.from_dict(config_dict, **kwargs) |
|
|
|
|
|
|
|
class InternVLChatConfig(PretrainedConfig): |
|
model_type = 'internvl_chat' |
|
is_composition = True |
|
|
|
def __init__( |
|
self, |
|
vision_config=None, |
|
llm_config=None, |
|
use_backbone_lora=0, |
|
use_llm_lora=0, |
|
pad2square=False, |
|
select_layer=-4, |
|
force_image_size=None, |
|
downsample_ratio=0.5, |
|
template=None, |
|
**kwargs): |
|
super().__init__(**kwargs) |
|
|
|
if vision_config is None: |
|
vision_config = {} |
|
logger.info('vision_config is None. Initializing the InternVisionConfig with default values.') |
|
|
|
if llm_config is None: |
|
llm_config = {} |
|
logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).') |
|
|
|
self.vision_config = InternVisionConfig(**vision_config) |
|
self.llm_config = LlamaConfig(**llm_config) |
|
self.use_backbone_lora = use_backbone_lora |
|
self.use_llm_lora = use_llm_lora |
|
self.pad2square = pad2square |
|
self.select_layer = select_layer |
|
self.force_image_size = force_image_size |
|
self.downsample_ratio = downsample_ratio |
|
self.template = template |
|
|
|
def to_dict(self): |
|
""" |
|
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. |
|
|
|
Returns: |
|
`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, |
|
""" |
|
output = copy.deepcopy(self.__dict__) |
|
output['vision_config'] = self.vision_config.to_dict() |
|
output['llm_config'] = self.llm_config.to_dict() |
|
output['model_type'] = self.__class__.model_type |
|
output['use_backbone_lora'] = self.use_backbone_lora |
|
output['use_llm_lora'] = self.use_llm_lora |
|
output['pad2square'] = self.pad2square |
|
output['select_layer'] = self.select_layer |
|
output['force_image_size'] = self.force_image_size |
|
output['downsample_ratio'] = self.downsample_ratio |
|
output['template'] = self.template |
|
|
|
return output |
|
|