Llama-3.1-8B-DALv0.1
/
venv
/lib
/python3.12
/site-packages
/transformers
/models
/electra
/configuration_electra.py
# coding=utf-8 | |
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. | |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""ELECTRA model configuration""" | |
from collections import OrderedDict | |
from typing import Mapping | |
from ...configuration_utils import PretrainedConfig | |
from ...onnx import OnnxConfig | |
from ...utils import logging | |
logger = logging.get_logger(__name__) | |
class ElectraConfig(PretrainedConfig): | |
r""" | |
This is the configuration class to store the configuration of a [`ElectraModel`] or a [`TFElectraModel`]. It is | |
used to instantiate a ELECTRA model according to the specified arguments, defining the model architecture. | |
Instantiating a configuration with the defaults will yield a similar configuration to that of the ELECTRA | |
[google/electra-small-discriminator](https://huggingface.co/google/electra-small-discriminator) architecture. | |
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the | |
documentation from [`PretrainedConfig`] for more information. | |
Args: | |
vocab_size (`int`, *optional*, defaults to 30522): | |
Vocabulary size of the ELECTRA model. Defines the number of different tokens that can be represented by the | |
`inputs_ids` passed when calling [`ElectraModel`] or [`TFElectraModel`]. | |
embedding_size (`int`, *optional*, defaults to 128): | |
Dimensionality of the encoder layers and the pooler layer. | |
hidden_size (`int`, *optional*, defaults to 256): | |
Dimensionality of the encoder layers and the pooler layer. | |
num_hidden_layers (`int`, *optional*, defaults to 12): | |
Number of hidden layers in the Transformer encoder. | |
num_attention_heads (`int`, *optional*, defaults to 4): | |
Number of attention heads for each attention layer in the Transformer encoder. | |
intermediate_size (`int`, *optional*, defaults to 1024): | |
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. | |
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): | |
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, | |
`"relu"`, `"silu"` and `"gelu_new"` are supported. | |
hidden_dropout_prob (`float`, *optional*, defaults to 0.1): | |
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. | |
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): | |
The dropout ratio for the attention probabilities. | |
max_position_embeddings (`int`, *optional*, defaults to 512): | |
The maximum sequence length that this model might ever be used with. Typically set this to something large | |
just in case (e.g., 512 or 1024 or 2048). | |
type_vocab_size (`int`, *optional*, defaults to 2): | |
The vocabulary size of the `token_type_ids` passed when calling [`ElectraModel`] or [`TFElectraModel`]. | |
initializer_range (`float`, *optional*, defaults to 0.02): | |
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. | |
layer_norm_eps (`float`, *optional*, defaults to 1e-12): | |
The epsilon used by the layer normalization layers. | |
summary_type (`str`, *optional*, defaults to `"first"`): | |
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. | |
Has to be one of the following options: | |
- `"last"`: Take the last token hidden state (like XLNet). | |
- `"first"`: Take the first token hidden state (like BERT). | |
- `"mean"`: Take the mean of all tokens hidden states. | |
- `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2). | |
- `"attn"`: Not implemented now, use multi-head attention. | |
summary_use_proj (`bool`, *optional*, defaults to `True`): | |
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. | |
Whether or not to add a projection after the vector extraction. | |
summary_activation (`str`, *optional*): | |
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. | |
Pass `"gelu"` for a gelu activation to the output, any other value will result in no activation. | |
summary_last_dropout (`float`, *optional*, defaults to 0.0): | |
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. | |
The dropout ratio to be used after the projection and activation. | |
position_embedding_type (`str`, *optional*, defaults to `"absolute"`): | |
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For | |
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to | |
[Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). | |
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models | |
with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). | |
use_cache (`bool`, *optional*, defaults to `True`): | |
Whether or not the model should return the last key/values attentions (not used by all models). Only | |
relevant if `config.is_decoder=True`. | |
classifier_dropout (`float`, *optional*): | |
The dropout ratio for the classification head. | |
Examples: | |
```python | |
>>> from transformers import ElectraConfig, ElectraModel | |
>>> # Initializing a ELECTRA electra-base-uncased style configuration | |
>>> configuration = ElectraConfig() | |
>>> # Initializing a model (with random weights) from the electra-base-uncased style configuration | |
>>> model = ElectraModel(configuration) | |
>>> # Accessing the model configuration | |
>>> configuration = model.config | |
```""" | |
model_type = "electra" | |
def __init__( | |
self, | |
vocab_size=30522, | |
embedding_size=128, | |
hidden_size=256, | |
num_hidden_layers=12, | |
num_attention_heads=4, | |
intermediate_size=1024, | |
hidden_act="gelu", | |
hidden_dropout_prob=0.1, | |
attention_probs_dropout_prob=0.1, | |
max_position_embeddings=512, | |
type_vocab_size=2, | |
initializer_range=0.02, | |
layer_norm_eps=1e-12, | |
summary_type="first", | |
summary_use_proj=True, | |
summary_activation="gelu", | |
summary_last_dropout=0.1, | |
pad_token_id=0, | |
position_embedding_type="absolute", | |
use_cache=True, | |
classifier_dropout=None, | |
**kwargs, | |
): | |
super().__init__(pad_token_id=pad_token_id, **kwargs) | |
self.vocab_size = vocab_size | |
self.embedding_size = embedding_size | |
self.hidden_size = hidden_size | |
self.num_hidden_layers = num_hidden_layers | |
self.num_attention_heads = num_attention_heads | |
self.intermediate_size = intermediate_size | |
self.hidden_act = hidden_act | |
self.hidden_dropout_prob = hidden_dropout_prob | |
self.attention_probs_dropout_prob = attention_probs_dropout_prob | |
self.max_position_embeddings = max_position_embeddings | |
self.type_vocab_size = type_vocab_size | |
self.initializer_range = initializer_range | |
self.layer_norm_eps = layer_norm_eps | |
self.summary_type = summary_type | |
self.summary_use_proj = summary_use_proj | |
self.summary_activation = summary_activation | |
self.summary_last_dropout = summary_last_dropout | |
self.position_embedding_type = position_embedding_type | |
self.use_cache = use_cache | |
self.classifier_dropout = classifier_dropout | |
class ElectraOnnxConfig(OnnxConfig): | |
def inputs(self) -> Mapping[str, Mapping[int, str]]: | |
if self.task == "multiple-choice": | |
dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} | |
else: | |
dynamic_axis = {0: "batch", 1: "sequence"} | |
return OrderedDict( | |
[ | |
("input_ids", dynamic_axis), | |
("attention_mask", dynamic_axis), | |
("token_type_ids", dynamic_axis), | |
] | |
) | |