ehartford commited on
Commit
6e24c54
1 Parent(s): 88eb5b2

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. config.json +48 -0
  2. configuration_hunyuan.py +206 -0
  3. generation_config.json +10 -0
  4. hy.tiktoken +0 -0
  5. model-00001-of-00159.safetensors +3 -0
  6. model-00002-of-00159.safetensors +3 -0
  7. model-00003-of-00159.safetensors +3 -0
  8. model-00004-of-00159.safetensors +3 -0
  9. model-00005-of-00159.safetensors +3 -0
  10. model-00006-of-00159.safetensors +3 -0
  11. model-00008-of-00159.safetensors +3 -0
  12. model-00009-of-00159.safetensors +3 -0
  13. model-00010-of-00159.safetensors +3 -0
  14. model-00011-of-00159.safetensors +3 -0
  15. model-00013-of-00159.safetensors +3 -0
  16. model-00014-of-00159.safetensors +3 -0
  17. model-00015-of-00159.safetensors +3 -0
  18. model-00016-of-00159.safetensors +3 -0
  19. model-00017-of-00159.safetensors +3 -0
  20. model-00018-of-00159.safetensors +3 -0
  21. model-00019-of-00159.safetensors +3 -0
  22. model-00020-of-00159.safetensors +3 -0
  23. model-00021-of-00159.safetensors +3 -0
  24. model-00022-of-00159.safetensors +3 -0
  25. model-00024-of-00159.safetensors +3 -0
  26. model-00025-of-00159.safetensors +3 -0
  27. model-00026-of-00159.safetensors +3 -0
  28. model-00027-of-00159.safetensors +3 -0
  29. model-00028-of-00159.safetensors +3 -0
  30. model-00029-of-00159.safetensors +3 -0
  31. model-00030-of-00159.safetensors +3 -0
  32. model-00031-of-00159.safetensors +3 -0
  33. model-00032-of-00159.safetensors +3 -0
  34. model-00033-of-00159.safetensors +3 -0
  35. model-00034-of-00159.safetensors +3 -0
  36. model-00035-of-00159.safetensors +3 -0
  37. model-00036-of-00159.safetensors +3 -0
  38. model-00037-of-00159.safetensors +3 -0
  39. model-00038-of-00159.safetensors +3 -0
  40. model-00039-of-00159.safetensors +3 -0
  41. model-00040-of-00159.safetensors +3 -0
  42. model-00041-of-00159.safetensors +3 -0
  43. model-00042-of-00159.safetensors +3 -0
  44. model-00045-of-00159.safetensors +3 -0
  45. model-00046-of-00159.safetensors +3 -0
  46. model-00047-of-00159.safetensors +3 -0
  47. model-00048-of-00159.safetensors +3 -0
  48. model-00049-of-00159.safetensors +3 -0
  49. model-00051-of-00159.safetensors +3 -0
  50. model-00052-of-00159.safetensors +3 -0
config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/workspace/models/tencent/Hunyuan-A52B-Pretrain",
3
+ "architectures": [
4
+ "HunYuanForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "auto_map": {
9
+ "AutoConfig": "configuration_hunyuan.HunYuanConfig",
10
+ "AutoModel": "modeling_hunyuan.HunyuanModel",
11
+ "AutoModelForCausalLM": "modeling_hunyuan.HunYuanForCausalLM"
12
+ },
13
+ "bos_token_id": 1,
14
+ "capacity_factor": 1.0,
15
+ "cla_share_factor": 2,
16
+ "eos_token_id": 2,
17
+ "hidden_act": "silu",
18
+ "hidden_size": 6400,
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 18304,
21
+ "max_position_embeddings": 262144,
22
+ "model_type": "hunyuan",
23
+ "moe_drop_tokens": false,
24
+ "moe_random_routing_dropped_token": false,
25
+ "moe_topk": 1,
26
+ "num_attention_heads": 80,
27
+ "num_experts": 16,
28
+ "num_hidden_layers": 64,
29
+ "num_key_value_heads": 8,
30
+ "num_shared_expert": 1,
31
+ "pad_token_id": 0,
32
+ "pretraining_tp": 1,
33
+ "rms_norm_eps": 1e-05,
34
+ "rope_scaling": {
35
+ "alpha": 100000.0,
36
+ "factor": 1.0,
37
+ "type": "dynamic"
38
+ },
39
+ "rope_theta": 10000.0,
40
+ "tie_word_embeddings": true,
41
+ "torch_dtype": "bfloat16",
42
+ "transformers_version": "4.46.2",
43
+ "use_cache": true,
44
+ "use_cla": true,
45
+ "use_mixed_mlp_moe": true,
46
+ "use_qk_norm": true,
47
+ "vocab_size": 128512
48
+ }
configuration_hunyuan.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
2
+ #
3
+ # Licensed under the TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # https://github.com/Tencent/Tencent-Hunyuan-Large/blob/main/License.docx
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """ HunYuan model configuration"""
15
+
16
+ from transformers.configuration_utils import PretrainedConfig
17
+ from transformers.utils import logging
18
+
19
+
20
+ logger = logging.get_logger(__name__)
21
+
22
+
23
+ class HunYuanConfig(PretrainedConfig):
24
+ r"""
25
+ This is the configuration class to store the configuration of a [`HunYuanModel`]. It is used to instantiate an
26
+ HunYuan model according to the specified arguments, defining the model architecture. Instantiating a configuration
27
+ with the defaults will yield a similar configuration to that of the HunYuan-7B.
28
+
29
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
30
+ documentation from [`PretrainedConfig`] for more information.
31
+
32
+
33
+ Args:
34
+ vocab_size (`int`, *optional*, defaults to 32000):
35
+ Vocabulary size of the HunYuan model. Defines the number of different tokens that can be represented by the
36
+ `inputs_ids` passed when calling [`HunYuanModel`]
37
+ hidden_size (`int`, *optional*, defaults to 4096):
38
+ Dimension of the hidden representations.
39
+ intermediate_size (`int`, *optional*, defaults to 11008):
40
+ Dimension of the MLP representations.
41
+ num_hidden_layers (`int`, *optional*, defaults to 32):
42
+ Number of hidden layers in the Transformer decoder.
43
+ num_attention_heads (`int`, *optional*, defaults to 32):
44
+ Number of attention heads for each attention layer in the Transformer decoder.
45
+ num_key_value_heads (`int`, *optional*):
46
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
47
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
48
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
49
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
50
+ by meanpooling all the original heads within that group. For more details checkout [this
51
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
52
+ `num_attention_heads`.
53
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
54
+ The non-linear activation function (function or string) in the decoder.
55
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
56
+ The maximum sequence length that this model might ever be used with.
57
+ initializer_range (`float`, *optional*, defaults to 0.02):
58
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
59
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
60
+ The epsilon used by the rms normalization layers.
61
+ use_cache (`bool`, *optional*, defaults to `True`):
62
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
63
+ relevant if `config.is_decoder=True`.
64
+ pad_token_id (`int`, *optional*):
65
+ Padding token id.
66
+ bos_token_id (`int`, *optional*, defaults to 1):
67
+ Beginning of stream token id.
68
+ eos_token_id (`int`, *optional*, defaults to 2):
69
+ End of stream token id.
70
+ pretraining_tp (`int`, *optional*, defaults to 1):
71
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
72
+ document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
73
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
74
+ issue](https://github.com/pytorch/pytorch/issues/76232).
75
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
76
+ Whether to tie weight embeddings
77
+ rope_theta (`float`, *optional*, defaults to 10000.0):
78
+ The base period of the RoPE embeddings.
79
+ rope_scaling (`Dict`, *optional*):
80
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
81
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
82
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
83
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
84
+ these scaling strategies behave:
85
+ https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
86
+ experimental feature, subject to breaking API changes in future versions.
87
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
88
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
89
+ attention_dropout (`float`, *optional*, defaults to 0.0):
90
+ The dropout ratio for the attention probabilities.
91
+ use_qk_norm (`bool`, *optional*, defaults to `False`):
92
+ Whether query and key in attention use norm
93
+ use_cla (`bool`, *optional*, defaults to `False`):
94
+ Whether to use CLA in attention
95
+ cla_share_factor (`int`, *optional*, defaults to 1):
96
+ The share factor of CLA
97
+ """
98
+
99
+ model_type = "hunyuan"
100
+ keys_to_ignore_at_inference = ["past_key_values"]
101
+
102
+ def __init__(
103
+ self,
104
+ vocab_size=290943,
105
+ hidden_size=4096,
106
+ intermediate_size=11008,
107
+ num_hidden_layers=32,
108
+ num_attention_heads=32,
109
+ num_key_value_heads=None,
110
+ hidden_act="silu",
111
+ max_position_embeddings=2048,
112
+ initializer_range=0.02,
113
+ rms_norm_eps=1e-5,
114
+ use_cache=True,
115
+ pad_token_id=0,
116
+ bos_token_id=1,
117
+ eos_token_id=2,
118
+ pretraining_tp=1,
119
+ tie_word_embeddings=False,
120
+ rope_theta=10000.0,
121
+ rope_scaling=None,
122
+ attention_bias=False,
123
+ attention_dropout=0.0,
124
+ use_qk_norm=False,
125
+ use_cla=False,
126
+ cla_share_factor=1,
127
+ num_experts=1,
128
+ use_mixed_mlp_moe=False,
129
+ num_shared_expert=1,
130
+ moe_topk=1,
131
+ capacity_factor=1.0,
132
+ moe_drop_tokens=False,
133
+ moe_random_routing_dropped_token=False,
134
+ **kwargs,
135
+ ):
136
+ self.vocab_size = vocab_size
137
+ self.max_position_embeddings = max_position_embeddings
138
+ self.hidden_size = hidden_size
139
+ self.intermediate_size = intermediate_size
140
+ self.num_hidden_layers = num_hidden_layers
141
+ self.num_attention_heads = num_attention_heads
142
+ self.num_experts = num_experts
143
+ self.use_mixed_mlp_moe = use_mixed_mlp_moe
144
+ self.num_shared_expert = num_shared_expert
145
+ self.moe_topk = moe_topk
146
+ self.capacity_factor = capacity_factor
147
+ self.moe_drop_tokens = moe_drop_tokens
148
+ self.moe_random_routing_dropped_token = moe_random_routing_dropped_token
149
+
150
+ # for backward compatibility
151
+ if num_key_value_heads is None:
152
+ num_key_value_heads = num_attention_heads
153
+
154
+ self.num_key_value_heads = num_key_value_heads
155
+ self.hidden_act = hidden_act
156
+ self.initializer_range = initializer_range
157
+ self.rms_norm_eps = rms_norm_eps
158
+ self.pretraining_tp = pretraining_tp
159
+ self.use_cache = use_cache
160
+ self.rope_theta = rope_theta
161
+ self.rope_scaling = rope_scaling
162
+ # self._rope_scaling_validation() # TODO: Need validation?
163
+ self.attention_bias = attention_bias
164
+ self.attention_dropout = attention_dropout
165
+ self.use_qk_norm = use_qk_norm
166
+ self.use_cla = use_cla
167
+ self.cla_share_factor = cla_share_factor
168
+
169
+ super().__init__(
170
+ pad_token_id=pad_token_id,
171
+ bos_token_id=bos_token_id,
172
+ eos_token_id=eos_token_id,
173
+ tie_word_embeddings=tie_word_embeddings,
174
+ **kwargs,
175
+ )
176
+
177
+ def _rope_scaling_validation(self):
178
+ """
179
+ Validate the `rope_scaling` configuration.
180
+ """
181
+ if self.rope_scaling is None:
182
+ return
183
+
184
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
185
+ raise ValueError(
186
+ "`rope_scaling` must be a dictionary with with two fields, `type` and `factor` or `type` and `alpha`, "
187
+ f"got {self.rope_scaling}"
188
+ )
189
+ rope_scaling_type = self.rope_scaling.get("type", None)
190
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
191
+ rope_scaling_alpha = self.rope_scaling.get("alpha", None)
192
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
193
+ raise ValueError(
194
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
195
+ )
196
+ if rope_scaling_factor is None and rope_scaling_alpha is None:
197
+ raise ValueError(f"`rope_scaling`'s factor or alpha field must be have one, got both of none")
198
+ if rope_scaling_factor is not None:
199
+ if not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
200
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1.0, got {rope_scaling_factor}")
201
+ if rope_scaling_alpha is not None:
202
+ if not isinstance(rope_scaling_alpha, float) or rope_scaling_alpha <= 1.0:
203
+ raise ValueError(f"`rope_scaling`'s alpha field must be a float > 1.0, got {rope_scaling_alpha}")
204
+
205
+
206
+
generation_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_sample": true,
3
+ "eos_token_id": 127957,
4
+ "pad_token_id": 127961,
5
+ "repetition_penalty": 1.05,
6
+ "temperature": 0.7,
7
+ "top_k": 20,
8
+ "top_p": 0.6,
9
+ "transformers_version": "4.46.2"
10
+ }
hy.tiktoken ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d02636c2a0611eeb6f282f5efa1acdb1486ffee1022de6b5fad192346f0b5917
3
+ size 4871375680
model-00002-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10b5f27568e7dbe15d29a80ce34f8b01714faa86dd8d06fa6d2cc95edd353e50
3
+ size 4920117840
model-00003-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48e79be63c81f8180365ab11cc8772dada06ad8ed4ce17a24007e48651f40d62
3
+ size 4850102848
model-00004-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e8bf7e16d2ef0173cddfcb07e3e71569b9283881ada06d6e664060573ba0d0f
3
+ size 4920117840
model-00005-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b36d2dae790b037ca622cb21432490f3c0e512bcd202e86d58de0b7d2d18ed2
3
+ size 4920117848
model-00006-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6519488ed8f45cf928a768df6a8f8867363e4139ca523511e6c1de018e071994
3
+ size 4866487064
model-00008-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1678d8ff2d75ae34f6b41a2dada3f72b63ea4e5e3643c00670706c47eebbce70
3
+ size 4850102848
model-00009-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d55ac6de17622290bd5445f7b2f0a7a3eeb24b1d570d9603686f9ab771dfa1d
3
+ size 4920117840
model-00010-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:126fddeb816435b51f234f8ca861a19cf1b05d1e6122b56e38da255bed5fcabc
3
+ size 4920117848
model-00011-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19e253f7d1ec38bb5d33a03f70acd6dc31669714bfa6364fcc293a2da54d2143
3
+ size 4866487064
model-00013-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0cf37d56746270caec157f4c3febb94d3ec726c6296faea42d99aa4debc4b2a
3
+ size 4850102848
model-00014-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afefb05f3a84eac0d82a37f6ac47273b458177954fac4d5ef98a939f4232f28e
3
+ size 4920117840
model-00015-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27842489e53a3e0723c411f71a93411167ff129ac77dbd266925a71ad953ec92
3
+ size 4920117848
model-00016-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7a2360e13de1bb1b9980039da104eab43c68e72a3e708c195523b693f9bf937
3
+ size 4866487064
model-00017-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb5aff00577bbe01ae95d30e6ff9c275856be367a2525c1bb1f39a0cf6f1f592
3
+ size 4920117840
model-00018-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edad410289d28d8a6912db4a52bb79848b0902b6f3aa9598407b698d4b83540b
3
+ size 4850102840
model-00019-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64c3985c21672f43ad87159f2f75774c75477950397b80a3f73b491bba1e656b
3
+ size 4920117840
model-00020-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d07714f9490cd72c75a71d30ad558746f5e4ee36df39327884f177aa314d8299
3
+ size 4920117856
model-00021-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89be2376178aea70bc336b88cb53a70cdb9eb9f8732466f6c4f4142eb870f43f
3
+ size 4866487064
model-00022-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85e23b2631f02e8bc0be0c7eb536b7f6e2d17cfc0e1cb75188218adaf063ec7a
3
+ size 4920117840
model-00024-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a08ee2553b01097b89624d86f25dbf4d888c424cff6754e56661a6d44d7c9e97
3
+ size 4920117840
model-00025-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f81fe9fa4f747d72247a9b6f569ef61add1d6d85055313193f7e0ecb1e641411
3
+ size 4920117856
model-00026-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b9c098d4c01699cd966fdac5c76bbc3d82a07055458639e5331b1faddee7121
3
+ size 4866487096
model-00027-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30513fbaf6767f6cb2d1b555f65d49ad45371a6afd9ebcc8db06a925b59db5c9
3
+ size 4920117864
model-00028-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb311dbf2b233de5a86ae4c0d53aa584f7b074ef5d954d44231746d57050b62d
3
+ size 4850102872
model-00029-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e70528b1bf9188c0485a1b57db4e2ca0dc109a4dc9bd3811fee745615403fb9
3
+ size 4920117856
model-00030-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc8e5db8d423d677cdf99aeeac15d4b9d47aedab26b45144d3039b66d4414699
3
+ size 4920117872
model-00031-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca1ab3d7e8e86f681290cb270f9139bc1c6fed295364bd522458bdcd5963f32a
3
+ size 4866487088
model-00032-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:496d47eaf12c9643b0da6293a4df694b7422840a9f55b939f27e8d49ed7e2b4b
3
+ size 4920117864
model-00033-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6aba9dbdf5fe9ae55416d29807907f802d9276bda9359e972130ec60f193e1af
3
+ size 4850102864
model-00034-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c343efd544b3f9ebc1e220f3eb5f29af0f58ae592f48ef20e02bfad44873911
3
+ size 4920117856
model-00035-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1b53bec63864c2faa7b99a3947d1710dffe9cd97cece7806bbfea2d802c2668
3
+ size 4920143696
model-00036-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2d8f143056b83132f171ec4e02f556cf8d6f1235beba66fb8becdfbf677af0f
3
+ size 4866461264
model-00037-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b0f289e0de9e790212f3e8efcded434655a8955b6eb7ddfd8b6bc6690689ed9
3
+ size 4920117864
model-00038-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83aee07c96abcfc0f26aa76f853fad283b6f640788a742897836b0b38d7bab05
3
+ size 4850102872
model-00039-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73f6a7b0b57b91ed16ce89938a0c038f7189fecf12bbbb25cbb8daeeaa8df018
3
+ size 4920117856
model-00040-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:284088f9f03f4fab5299e9ea3ffa479ac9014ca974f163527dace0fdda5bbeec
3
+ size 4866077400
model-00041-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab19efe2653a69b8758a33bff1cdab58b3de9ecadbc83f7280c3dab788d80f91
3
+ size 4920527560
model-00042-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdad0e9a96981b164110cf52e1cc8ec4ad3af7af1028804e3d8eb902e561b269
3
+ size 4920117864
model-00045-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85790bfa1ded2537ea24204d9eed0bfb541f65547c867ce5d96ac7da1361588e
3
+ size 4866077408
model-00046-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af71d7dd615efa8c7f84864c68e47448b679c6c1f6ca8831487b2a5ebb66f558
3
+ size 4920527560
model-00047-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20d01936d0a198755cfb75674cc19c232853fb0a52454558ab78e0100000bac5
3
+ size 4920117872
model-00048-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:125adec47d4a073c03a06b393d5ccd7cbf7aa0ec9399392e4c5d715eeffaddbb
3
+ size 4850102864
model-00049-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee8c3b7bb81fc4a28fa8108ccc19dfb73a884e184ae35cee5113a625fa239245
3
+ size 4920117856
model-00051-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32efc58f4bee4ed54d03f32649bbdb7fa0f228c312e7b8cb5d2b670ba48fa4d5
3
+ size 4920527560
model-00052-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09bee42b7e41e8ca8b0802fbb4f3a404f7c8630b2f6708b3044b89905daa00b4
3
+ size 4920117872