Llama-3.1-8B-DALv0.1
/
venv
/lib
/python3.12
/site-packages
/transformers
/models
/owlvit
/configuration_owlvit.py
# coding=utf-8 | |
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""OWL-ViT model configuration""" | |
import os | |
from collections import OrderedDict | |
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union | |
if TYPE_CHECKING: | |
from ...processing_utils import ProcessorMixin | |
from ...utils import TensorType | |
from ...configuration_utils import PretrainedConfig | |
from ...onnx import OnnxConfig | |
from ...utils import logging | |
logger = logging.get_logger(__name__) | |
class OwlViTTextConfig(PretrainedConfig): | |
r""" | |
This is the configuration class to store the configuration of an [`OwlViTTextModel`]. It is used to instantiate an | |
OwlViT text encoder according to the specified arguments, defining the model architecture. Instantiating a | |
configuration with the defaults will yield a similar configuration to that of the OwlViT | |
[google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture. | |
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the | |
documentation from [`PretrainedConfig`] for more information. | |
Args: | |
vocab_size (`int`, *optional*, defaults to 49408): | |
Vocabulary size of the OWL-ViT text model. Defines the number of different tokens that can be represented | |
by the `inputs_ids` passed when calling [`OwlViTTextModel`]. | |
hidden_size (`int`, *optional*, defaults to 512): | |
Dimensionality of the encoder layers and the pooler layer. | |
intermediate_size (`int`, *optional*, defaults to 2048): | |
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. | |
num_hidden_layers (`int`, *optional*, defaults to 12): | |
Number of hidden layers in the Transformer encoder. | |
num_attention_heads (`int`, *optional*, defaults to 8): | |
Number of attention heads for each attention layer in the Transformer encoder. | |
max_position_embeddings (`int`, *optional*, defaults to 16): | |
The maximum sequence length that this model might ever be used with. Typically set this to something large | |
just in case (e.g., 512 or 1024 or 2048). | |
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): | |
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, | |
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. | |
layer_norm_eps (`float`, *optional*, defaults to 1e-05): | |
The epsilon used by the layer normalization layers. | |
attention_dropout (`float`, *optional*, defaults to 0.0): | |
The dropout ratio for the attention probabilities. | |
initializer_range (`float`, *optional*, defaults to 0.02): | |
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. | |
initializer_factor (`float`, *optional*, defaults to 1.0): | |
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization | |
testing). | |
pad_token_id (`int`, *optional*, defaults to 0): | |
The id of the padding token in the input sequences. | |
bos_token_id (`int`, *optional*, defaults to 49406): | |
The id of the beginning-of-sequence token in the input sequences. | |
eos_token_id (`int`, *optional*, defaults to 49407): | |
The id of the end-of-sequence token in the input sequences. | |
Example: | |
```python | |
>>> from transformers import OwlViTTextConfig, OwlViTTextModel | |
>>> # Initializing a OwlViTTextModel with google/owlvit-base-patch32 style configuration | |
>>> configuration = OwlViTTextConfig() | |
>>> # Initializing a OwlViTTextConfig from the google/owlvit-base-patch32 style configuration | |
>>> model = OwlViTTextModel(configuration) | |
>>> # Accessing the model configuration | |
>>> configuration = model.config | |
```""" | |
model_type = "owlvit_text_model" | |
def __init__( | |
self, | |
vocab_size=49408, | |
hidden_size=512, | |
intermediate_size=2048, | |
num_hidden_layers=12, | |
num_attention_heads=8, | |
max_position_embeddings=16, | |
hidden_act="quick_gelu", | |
layer_norm_eps=1e-5, | |
attention_dropout=0.0, | |
initializer_range=0.02, | |
initializer_factor=1.0, | |
pad_token_id=0, | |
bos_token_id=49406, | |
eos_token_id=49407, | |
**kwargs, | |
): | |
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) | |
self.vocab_size = vocab_size | |
self.hidden_size = hidden_size | |
self.intermediate_size = intermediate_size | |
self.num_hidden_layers = num_hidden_layers | |
self.num_attention_heads = num_attention_heads | |
self.max_position_embeddings = max_position_embeddings | |
self.hidden_act = hidden_act | |
self.layer_norm_eps = layer_norm_eps | |
self.attention_dropout = attention_dropout | |
self.initializer_range = initializer_range | |
self.initializer_factor = initializer_factor | |
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": | |
cls._set_token_in_kwargs(kwargs) | |
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) | |
# get the text config dict if we are loading from OwlViTConfig | |
if config_dict.get("model_type") == "owlvit": | |
config_dict = config_dict["text_config"] | |
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: | |
logger.warning( | |
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " | |
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." | |
) | |
return cls.from_dict(config_dict, **kwargs) | |
class OwlViTVisionConfig(PretrainedConfig): | |
r""" | |
This is the configuration class to store the configuration of an [`OwlViTVisionModel`]. It is used to instantiate | |
an OWL-ViT image encoder according to the specified arguments, defining the model architecture. Instantiating a | |
configuration with the defaults will yield a similar configuration to that of the OWL-ViT | |
[google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture. | |
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the | |
documentation from [`PretrainedConfig`] for more information. | |
Args: | |
hidden_size (`int`, *optional*, defaults to 768): | |
Dimensionality of the encoder layers and the pooler layer. | |
intermediate_size (`int`, *optional*, defaults to 3072): | |
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. | |
num_hidden_layers (`int`, *optional*, defaults to 12): | |
Number of hidden layers in the Transformer encoder. | |
num_attention_heads (`int`, *optional*, defaults to 12): | |
Number of attention heads for each attention layer in the Transformer encoder. | |
num_channels (`int`, *optional*, defaults to 3): | |
Number of channels in the input images. | |
image_size (`int`, *optional*, defaults to 768): | |
The size (resolution) of each image. | |
patch_size (`int`, *optional*, defaults to 32): | |
The size (resolution) of each patch. | |
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): | |
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, | |
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. | |
layer_norm_eps (`float`, *optional*, defaults to 1e-05): | |
The epsilon used by the layer normalization layers. | |
attention_dropout (`float`, *optional*, defaults to 0.0): | |
The dropout ratio for the attention probabilities. | |
initializer_range (`float`, *optional*, defaults to 0.02): | |
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. | |
initializer_factor (`float`, *optional*, defaults to 1.0): | |
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization | |
testing). | |
Example: | |
```python | |
>>> from transformers import OwlViTVisionConfig, OwlViTVisionModel | |
>>> # Initializing a OwlViTVisionModel with google/owlvit-base-patch32 style configuration | |
>>> configuration = OwlViTVisionConfig() | |
>>> # Initializing a OwlViTVisionModel model from the google/owlvit-base-patch32 style configuration | |
>>> model = OwlViTVisionModel(configuration) | |
>>> # Accessing the model configuration | |
>>> configuration = model.config | |
```""" | |
model_type = "owlvit_vision_model" | |
def __init__( | |
self, | |
hidden_size=768, | |
intermediate_size=3072, | |
num_hidden_layers=12, | |
num_attention_heads=12, | |
num_channels=3, | |
image_size=768, | |
patch_size=32, | |
hidden_act="quick_gelu", | |
layer_norm_eps=1e-5, | |
attention_dropout=0.0, | |
initializer_range=0.02, | |
initializer_factor=1.0, | |
**kwargs, | |
): | |
super().__init__(**kwargs) | |
self.hidden_size = hidden_size | |
self.intermediate_size = intermediate_size | |
self.num_hidden_layers = num_hidden_layers | |
self.num_attention_heads = num_attention_heads | |
self.num_channels = num_channels | |
self.image_size = image_size | |
self.patch_size = patch_size | |
self.hidden_act = hidden_act | |
self.layer_norm_eps = layer_norm_eps | |
self.attention_dropout = attention_dropout | |
self.initializer_range = initializer_range | |
self.initializer_factor = initializer_factor | |
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": | |
cls._set_token_in_kwargs(kwargs) | |
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) | |
# get the vision config dict if we are loading from OwlViTConfig | |
if config_dict.get("model_type") == "owlvit": | |
config_dict = config_dict["vision_config"] | |
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: | |
logger.warning( | |
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " | |
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." | |
) | |
return cls.from_dict(config_dict, **kwargs) | |
class OwlViTConfig(PretrainedConfig): | |
r""" | |
[`OwlViTConfig`] is the configuration class to store the configuration of an [`OwlViTModel`]. It is used to | |
instantiate an OWL-ViT model according to the specified arguments, defining the text model and vision model | |
configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the OWL-ViT | |
[google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture. | |
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the | |
documentation from [`PretrainedConfig`] for more information. | |
Args: | |
text_config (`dict`, *optional*): | |
Dictionary of configuration options used to initialize [`OwlViTTextConfig`]. | |
vision_config (`dict`, *optional*): | |
Dictionary of configuration options used to initialize [`OwlViTVisionConfig`]. | |
projection_dim (`int`, *optional*, defaults to 512): | |
Dimensionality of text and vision projection layers. | |
logit_scale_init_value (`float`, *optional*, defaults to 2.6592): | |
The initial value of the *logit_scale* parameter. Default is used as per the original OWL-ViT | |
implementation. | |
return_dict (`bool`, *optional*, defaults to `True`): | |
Whether or not the model should return a dictionary. If `False`, returns a tuple. | |
kwargs (*optional*): | |
Dictionary of keyword arguments. | |
""" | |
model_type = "owlvit" | |
def __init__( | |
self, | |
text_config=None, | |
vision_config=None, | |
projection_dim=512, | |
logit_scale_init_value=2.6592, | |
return_dict=True, | |
**kwargs, | |
): | |
super().__init__(**kwargs) | |
if text_config is None: | |
text_config = {} | |
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values.") | |
if vision_config is None: | |
vision_config = {} | |
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values.") | |
self.text_config = OwlViTTextConfig(**text_config) | |
self.vision_config = OwlViTVisionConfig(**vision_config) | |
self.projection_dim = projection_dim | |
self.logit_scale_init_value = logit_scale_init_value | |
self.return_dict = return_dict | |
self.initializer_factor = 1.0 | |
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": | |
cls._set_token_in_kwargs(kwargs) | |
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) | |
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: | |
logger.warning( | |
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " | |
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." | |
) | |
return cls.from_dict(config_dict, **kwargs) | |
def from_text_vision_configs(cls, text_config: Dict, vision_config: Dict, **kwargs): | |
r""" | |
Instantiate a [`OwlViTConfig`] (or a derived class) from owlvit text model configuration and owlvit vision | |
model configuration. | |
Returns: | |
[`OwlViTConfig`]: An instance of a configuration object | |
""" | |
config_dict = {} | |
config_dict["text_config"] = text_config | |
config_dict["vision_config"] = vision_config | |
return cls.from_dict(config_dict, **kwargs) | |
class OwlViTOnnxConfig(OnnxConfig): | |
def inputs(self) -> Mapping[str, Mapping[int, str]]: | |
return OrderedDict( | |
[ | |
("input_ids", {0: "batch", 1: "sequence"}), | |
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), | |
("attention_mask", {0: "batch", 1: "sequence"}), | |
] | |
) | |
def outputs(self) -> Mapping[str, Mapping[int, str]]: | |
return OrderedDict( | |
[ | |
("logits_per_image", {0: "batch"}), | |
("logits_per_text", {0: "batch"}), | |
("text_embeds", {0: "batch"}), | |
("image_embeds", {0: "batch"}), | |
] | |
) | |
def atol_for_validation(self) -> float: | |
return 1e-4 | |
def generate_dummy_inputs( | |
self, | |
processor: "ProcessorMixin", | |
batch_size: int = -1, | |
seq_length: int = -1, | |
framework: Optional["TensorType"] = None, | |
) -> Mapping[str, Any]: | |
text_input_dict = super().generate_dummy_inputs( | |
processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework | |
) | |
image_input_dict = super().generate_dummy_inputs( | |
processor.image_processor, batch_size=batch_size, framework=framework | |
) | |
return {**text_input_dict, **image_input_dict} | |
def default_onnx_opset(self) -> int: | |
return 14 | |