Llama-3.1-8B-DALv0.1
/
venv
/lib
/python3.12
/site-packages
/transformers
/models
/pop2piano
/configuration_pop2piano.py
# coding=utf-8 | |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""Pop2Piano model configuration""" | |
from ...configuration_utils import PretrainedConfig | |
from ...utils import logging | |
logger = logging.get_logger(__name__) | |
class Pop2PianoConfig(PretrainedConfig): | |
r""" | |
This is the configuration class to store the configuration of a [`Pop2PianoForConditionalGeneration`]. It is used | |
to instantiate a Pop2PianoForConditionalGeneration model according to the specified arguments, defining the model | |
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the | |
Pop2Piano [sweetcocoa/pop2piano](https://huggingface.co/sweetcocoa/pop2piano) architecture. | |
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the | |
documentation from [`PretrainedConfig`] for more information. | |
Arguments: | |
vocab_size (`int`, *optional*, defaults to 2400): | |
Vocabulary size of the `Pop2PianoForConditionalGeneration` model. Defines the number of different tokens | |
that can be represented by the `inputs_ids` passed when calling [`Pop2PianoForConditionalGeneration`]. | |
composer_vocab_size (`int`, *optional*, defaults to 21): | |
Denotes the number of composers. | |
d_model (`int`, *optional*, defaults to 512): | |
Size of the encoder layers and the pooler layer. | |
d_kv (`int`, *optional*, defaults to 64): | |
Size of the key, query, value projections per attention head. The `inner_dim` of the projection layer will | |
be defined as `num_heads * d_kv`. | |
d_ff (`int`, *optional*, defaults to 2048): | |
Size of the intermediate feed forward layer in each `Pop2PianoBlock`. | |
num_layers (`int`, *optional*, defaults to 6): | |
Number of hidden layers in the Transformer encoder. | |
num_decoder_layers (`int`, *optional*): | |
Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set. | |
num_heads (`int`, *optional*, defaults to 8): | |
Number of attention heads for each attention layer in the Transformer encoder. | |
relative_attention_num_buckets (`int`, *optional*, defaults to 32): | |
The number of buckets to use for each attention layer. | |
relative_attention_max_distance (`int`, *optional*, defaults to 128): | |
The maximum distance of the longer sequences for the bucket separation. | |
dropout_rate (`float`, *optional*, defaults to 0.1): | |
The ratio for all dropout layers. | |
layer_norm_epsilon (`float`, *optional*, defaults to 1e-6): | |
The epsilon used by the layer normalization layers. | |
initializer_factor (`float`, *optional*, defaults to 1.0): | |
A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization | |
testing). | |
feed_forward_proj (`string`, *optional*, defaults to `"gated-gelu"`): | |
Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. | |
use_cache (`bool`, *optional*, defaults to `True`): | |
Whether or not the model should return the last key/values attentions (not used by all models). | |
dense_act_fn (`string`, *optional*, defaults to `"relu"`): | |
Type of Activation Function to be used in `Pop2PianoDenseActDense` and in `Pop2PianoDenseGatedActDense`. | |
""" | |
model_type = "pop2piano" | |
keys_to_ignore_at_inference = ["past_key_values"] | |
def __init__( | |
self, | |
vocab_size=2400, | |
composer_vocab_size=21, | |
d_model=512, | |
d_kv=64, | |
d_ff=2048, | |
num_layers=6, | |
num_decoder_layers=None, | |
num_heads=8, | |
relative_attention_num_buckets=32, | |
relative_attention_max_distance=128, | |
dropout_rate=0.1, | |
layer_norm_epsilon=1e-6, | |
initializer_factor=1.0, | |
feed_forward_proj="gated-gelu", # noqa | |
is_encoder_decoder=True, | |
use_cache=True, | |
pad_token_id=0, | |
eos_token_id=1, | |
dense_act_fn="relu", | |
**kwargs, | |
): | |
self.vocab_size = vocab_size | |
self.composer_vocab_size = composer_vocab_size | |
self.d_model = d_model | |
self.d_kv = d_kv | |
self.d_ff = d_ff | |
self.num_layers = num_layers | |
self.num_decoder_layers = num_decoder_layers if num_decoder_layers is not None else self.num_layers | |
self.num_heads = num_heads | |
self.relative_attention_num_buckets = relative_attention_num_buckets | |
self.relative_attention_max_distance = relative_attention_max_distance | |
self.dropout_rate = dropout_rate | |
self.layer_norm_epsilon = layer_norm_epsilon | |
self.initializer_factor = initializer_factor | |
self.feed_forward_proj = feed_forward_proj | |
self.use_cache = use_cache | |
self.dense_act_fn = dense_act_fn | |
self.is_gated_act = self.feed_forward_proj.split("-")[0] == "gated" | |
self.hidden_size = self.d_model | |
self.num_attention_heads = num_heads | |
self.num_hidden_layers = num_layers | |
super().__init__( | |
pad_token_id=pad_token_id, | |
eos_token_id=eos_token_id, | |
is_encoder_decoder=is_encoder_decoder, | |
**kwargs, | |
) | |