onkarpandit-g42 commited on
Commit
f42b5a3
1 Parent(s): 3d5812e

Upload configuration_jais.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. configuration_jais.py +196 -0
configuration_jais.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The OpenAI Team Authors and HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ # Copyright 2023 Cerebras Systems.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ """ JAIS configuration"""
18
+
19
+ from transformers.configuration_utils import PretrainedConfig
20
+ from transformers.utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+ class JAISConfig(PretrainedConfig):
26
+ """
27
+ This is the configuration class to store the configuration of a [`JAISModel`]. It is used to instantiate a JAIS
28
+ model according to the specified arguments, defining the model architecture.
29
+
30
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
31
+ documentation from [`PretrainedConfig`] for more information.
32
+
33
+
34
+ Args:
35
+ vocab_size (`int`, *optional*, defaults to 50257):
36
+ Vocabulary size of the JAIS model. Defines the number of different tokens that can be represented by the
37
+ `inputs_ids` passed when calling [`JAISModel`].
38
+ n_positions (`int`, *optional*, defaults to 1024):
39
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
40
+ just in case (e.g., 512 or 1024 or 2048).
41
+ n_embd (`int`, *optional*, defaults to 768):
42
+ Dimensionality of the embeddings and hidden states.
43
+ n_layer (`int`, *optional*, defaults to 12):
44
+ Number of hidden layers in the Transformer encoder.
45
+ n_head (`int`, *optional*, defaults to 12):
46
+ Number of attention heads for each attention layer in the Transformer encoder.
47
+ n_inner (`int`, *optional*, defaults to None):
48
+ Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
49
+ activation_function (`str`, *optional*, defaults to `"gelu"`):
50
+ Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new", "swiglu"]`.
51
+ resid_pdrop (`float`, *optional*, defaults to 0.1):
52
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
53
+ embd_pdrop (`float`, *optional*, defaults to 0.1):
54
+ The dropout ratio for the embeddings.
55
+ attn_pdrop (`float`, *optional*, defaults to 0.1):
56
+ The dropout ratio for the attention.
57
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
58
+ The epsilon to use in the layer normalization layers.
59
+ initializer_range (`float`, *optional*, defaults to 0.02):
60
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
61
+ scale_attn_weights (`bool`, *optional*, defaults to `True`):
62
+ Scale attention weights by dividing by sqrt(hidden_size)..
63
+ use_cache (`bool`, *optional*, defaults to `True`):
64
+ Whether or not the model should return the last key/values attentions (not used by all models).
65
+ scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`):
66
+ Whether to additionally scale attention weights by `1 / layer_idx + 1`.
67
+ reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`):
68
+ Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention
69
+ dot-product/softmax to float() when training with mixed precision.
70
+ position_embedding_type (`str`, *optional*, defaults to `"learned"`):
71
+ Positional embedding can be either `"alibi"` or `"learned"`.
72
+ mup_width_scale (`float`, *optional*, defaults to 1.0):
73
+ muP parameter to scale learning rate and initializers. Calculated as (`d_model,0 / d_model`), where
74
+ `d_model` is the model's width and `d_model,0` is the proxy model's width.
75
+ mup_embeddings_scale (`float`, *optional*, defaults to 1.0):
76
+ muP parameter to scale token and position embeddings.
77
+ mup_output_alpha (`float`, *optional*, defaults to 1.0):
78
+ muP parameter to scale output logits (`output_logits_scale = mup_output_alpha * mup_width_scale`).
79
+ mup_scale_qk_dot_by_d (`bool`, *optional*, defaults to `False`):
80
+ Scale attention weights by dividing by hidden_size instead of sqrt(hidden_size). Need to set
81
+ scale_attn_weights to `True` as well.
82
+ alibi_scaling (`Dict`, *optional*):
83
+ Dictionary containing the scaling configuration for ALiBi embeddings. Currently only supports linear
84
+ scaling strategy. Can specify either the scaling `factor` (must be a float greater than 1) for fixed scaling
85
+ or `train_seq_len` for dynamic scaling on input samples with sequence length > `train_seq_len`. The expected
86
+ formats are `{"type": strategy name, "factor": scaling factor}` or
87
+ `{"type": strategy name, "train_seq_len": training sequence length}`.
88
+
89
+ Example:
90
+
91
+ ```python
92
+ >>> from transformers import JAISConfig, JAISModel
93
+
94
+ >>> # Initializing a JAIS configuration
95
+ >>> configuration = JAISConfig()
96
+
97
+ >>> # Initializing a model (with random weights) from the configuration
98
+ >>> model = JAISModel(configuration)
99
+
100
+ >>> # Accessing the model configuration
101
+ >>> configuration = model.config
102
+ ```"""
103
+
104
+ model_type = "jais"
105
+ keys_to_ignore_at_inference = ["past_key_values"]
106
+ attribute_map = {
107
+ "hidden_size": "n_embd",
108
+ "max_position_embeddings": "n_positions",
109
+ "num_attention_heads": "n_head",
110
+ "num_hidden_layers": "n_layer",
111
+ }
112
+
113
+ def __init__(
114
+ self,
115
+ vocab_size=50257,
116
+ n_positions=1024,
117
+ n_embd=768,
118
+ n_layer=12,
119
+ n_head=12,
120
+ n_inner=None,
121
+ activation_function="gelu_new",
122
+ resid_pdrop=0.1,
123
+ embd_pdrop=0.1,
124
+ attn_pdrop=0.1,
125
+ layer_norm_epsilon=1e-5,
126
+ initializer_range=0.02,
127
+ scale_attn_weights=True,
128
+ use_cache=True,
129
+ bos_token_id=50256,
130
+ eos_token_id=50256,
131
+ scale_attn_by_inverse_layer_idx=False,
132
+ reorder_and_upcast_attn=False,
133
+ position_embedding_type="learned",
134
+ mup_width_scale=1.0,
135
+ mup_embeddings_scale=1.0,
136
+ mup_output_alpha=1.0,
137
+ mup_scale_qk_dot_by_d=False,
138
+ alibi_scaling=None,
139
+ **kwargs,
140
+ ):
141
+ self.vocab_size = vocab_size
142
+ self.n_positions = n_positions
143
+ self.n_embd = n_embd
144
+ self.n_layer = n_layer
145
+ self.n_head = n_head
146
+ self.n_inner = n_inner
147
+ self.activation_function = activation_function
148
+ self.resid_pdrop = resid_pdrop
149
+ self.embd_pdrop = embd_pdrop
150
+ self.attn_pdrop = attn_pdrop
151
+ self.layer_norm_epsilon = layer_norm_epsilon
152
+ self.initializer_range = initializer_range
153
+ self.scale_attn_weights = scale_attn_weights
154
+ self.use_cache = use_cache
155
+ self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
156
+ self.reorder_and_upcast_attn = reorder_and_upcast_attn
157
+
158
+ self.bos_token_id = bos_token_id
159
+ self.eos_token_id = eos_token_id
160
+
161
+ self.position_embedding_type = position_embedding_type
162
+ self.mup_width_scale = mup_width_scale
163
+ self.mup_embeddings_scale = mup_embeddings_scale
164
+ self.mup_output_alpha = mup_output_alpha
165
+ self.mup_scale_qk_dot_by_d = mup_scale_qk_dot_by_d
166
+
167
+ self.alibi_scaling = alibi_scaling
168
+ self._alibi_scaling_validation()
169
+
170
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
171
+
172
+ def _alibi_scaling_validation(self):
173
+ """
174
+ Validate the `alibi_scaling` configuration.
175
+ """
176
+ if self.alibi_scaling is None:
177
+ return
178
+
179
+ if not isinstance(self.alibi_scaling, dict) or len(self.alibi_scaling) != 2:
180
+ raise ValueError(
181
+ "`alibi_scaling` must be a dictionary with two fields, `type` and `factor` or `type` and `train_seq_len`, "
182
+ f"got {self.alibi_scaling}"
183
+ )
184
+ alibi_scaling_type = self.alibi_scaling.get("type", None)
185
+ alibi_scaling_factor = self.alibi_scaling.get("factor", None)
186
+ alibi_dynamic_scaling = self.alibi_scaling.get("train_seq_len", None)
187
+ if alibi_scaling_type is None or alibi_scaling_type != "linear":
188
+ raise ValueError(
189
+ f"`alibi_scaling`'s type field must be 'linear', got {alibi_scaling_type}"
190
+ )
191
+ if alibi_scaling_factor is not None:
192
+ if not isinstance(alibi_scaling_factor, float) or alibi_scaling_factor <= 1.0:
193
+ raise ValueError(f"`alibi_scaling`'s factor field must be a float > 1.0, got {alibi_scaling_factor}")
194
+ if alibi_dynamic_scaling is not None:
195
+ if not isinstance(alibi_dynamic_scaling, int) or alibi_dynamic_scaling <= 1:
196
+ raise ValueError(f"`alibi_scaling`'s `train_seq_len` field must be an integer > 1, got {alibi_dynamic_scaling}")