BAAI
/

shunxing1234 commited on
Commit
0c129fb
1 Parent(s): bf47f92

Upload configuration_aquila.py

Browse files
Files changed (1) hide show
  1. configuration_aquila.py +119 -0
configuration_aquila.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ Aquila model configuration"""
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ AQUILA_PRETRAINED_CONFIG_ARCHIVE_MAP = {
29
+ "BAAI/Aquila-7B": "https://huggingface.co/BAAI/Aquila-7B/resolve/main/config.json",
30
+ }
31
+
32
+ class AquilaConfig(PretrainedConfig):
33
+ r"""
34
+ This is the configuration class to store the configuration of a [`AquilaModel`]. It is used to instantiate an Aquila
35
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
36
+ defaults will yield a similar configuration to that of the Aquila-7B.
37
+
38
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
39
+ documentation from [`PretrainedConfig`] for more information.
40
+
41
+
42
+ Args:
43
+ vocab_size (`int`, *optional*, defaults to 32000):
44
+ Vocabulary size of the Aquila model. Defines the number of different tokens that can be represented by the
45
+ `inputs_ids` passed when calling [`AquilaModel`]
46
+ hidden_size (`int`, *optional*, defaults to 4096):
47
+ Dimension of the hidden representations.
48
+ intermediate_size (`int`, *optional*, defaults to 11008):
49
+ Dimension of the MLP representations.
50
+ num_hidden_layers (`int`, *optional*, defaults to 32):
51
+ Number of hidden layers in the Transformer encoder.
52
+ num_attention_heads (`int`, *optional*, defaults to 32):
53
+ Number of attention heads for each attention layer in the Transformer encoder.
54
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
55
+ The non-linear activation function (function or string) in the decoder.
56
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
57
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
58
+ just in case (e.g., 512 or 1024 or 2048).
59
+ initializer_range (`float`, *optional*, defaults to 0.02):
60
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
61
+ rms_norm_eps (`float`, *optional*, defaults to 1e-12):
62
+ The epsilon used by the rms normalization layers.
63
+ use_cache (`bool`, *optional*, defaults to `True`):
64
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
65
+ relevant if `config.is_decoder=True`.
66
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
67
+ Whether to tie weight embeddings
68
+ Example:
69
+
70
+ ```python
71
+ >>> from transformers import AquilaModel, AquilaConfig
72
+
73
+ >>> # Initializing a Aquila aquila-7b style configuration
74
+ >>> configuration = AquilaConfig()
75
+
76
+ >>> # Initializing a model from the aquila-7b style configuration
77
+ >>> model = AquilaModel(configuration)
78
+
79
+ >>> # Accessing the model configuration
80
+ >>> configuration = model.config
81
+ ```"""
82
+ model_type = "aquila"
83
+ keys_to_ignore_at_inference = ["past_key_values"]
84
+
85
+ def __init__(
86
+ self,
87
+ vocab_size=32000,
88
+ hidden_size=4096,
89
+ intermediate_size=11008,
90
+ num_hidden_layers=32,
91
+ num_attention_heads=32,
92
+ hidden_act="silu",
93
+ max_position_embeddings=2048,
94
+ initializer_range=0.02,
95
+ rms_norm_eps=1e-6,
96
+ use_cache=True,
97
+ pad_token_id=0,
98
+ bos_token_id=1,
99
+ eos_token_id=2,
100
+ tie_word_embeddings=False,
101
+ **kwargs,
102
+ ):
103
+ self.vocab_size = vocab_size
104
+ self.max_position_embeddings = max_position_embeddings
105
+ self.hidden_size = hidden_size
106
+ self.intermediate_size = intermediate_size
107
+ self.num_hidden_layers = num_hidden_layers
108
+ self.num_attention_heads = num_attention_heads
109
+ self.hidden_act = hidden_act
110
+ self.initializer_range = initializer_range
111
+ self.rms_norm_eps = rms_norm_eps
112
+ self.use_cache = use_cache
113
+ super().__init__(
114
+ pad_token_id=pad_token_id,
115
+ bos_token_id=bos_token_id,
116
+ eos_token_id=eos_token_id,
117
+ tie_word_embeddings=tie_word_embeddings,
118
+ **kwargs,
119
+ )