diff --git a/README.md b/README.md index 30ed713e264505fbce49bcee131beb2f01543cb9..c80bcf46d1a800e7137d0504e804adcb89e8bf2b 100644 --- a/README.md +++ b/README.md @@ -164,13 +164,12 @@ tele_ai@chinatelecom.cn,提交《TeleChat模型社区许可协议》要求的 如需引用我们的工作,请使用如下 reference: ``` -@misc{wang2025technicalreporttelechat2telechat25, - title={Technical Report of TeleChat2, TeleChat2.5 and T1}, - author={Zihan Wang and Xinzhang Liu and Yitong Yao and Chao Wang and Yu Zhao and Zhihao Yang and Wenmin Deng and Kaipeng Jia and Jiaxin Peng and Yuyao Huang and Sishi Xiong and Zhuo Jiang and Kaidong Yu and Xiaohui Hu and Fubei Yao and Ruiyu Fang and Zhuoru Jiang and Ruiting Song and Qiyi Xie and Rui Xue and Xuewei He and Yanlei Xue and Zhu Yuan and Zhaoxi Zhang and Zilu Huang and Shiquan Wang and Xin Wang and Hanming Wu and Mingyuan Wang and Xufeng Zhan and Yuhan Sun and Zhaohu Xing and Yuhao Jiang and Bingkai Yang and Shuangyong Song and Yongxiang Li and Zhongjiang He and Xuelong Li}, - year={2025}, - eprint={2507.18013}, +@misc{wang2024telechat, + title={TeleChat Technical Report}, + author={Zihan Wang and Xinzhang Liu and Shixuan Liu and Yitong Yao and Yuyao Huang and Zhongjiang He and Xuelong Li and Yongxiang Li and Zhonghao Che and Zhaoxi Zhang and Yan Wang and Xin Wang and Luwen Pu and Huihan Xu and Ruiyu Fang and Yu Zhao and Jie Zhang and Xiaomeng Huang and Zhilong Lu and Jiaxin Peng and Wenjun Zheng and Shiquan Wang and Bingkai Yang and Xuewei he and Zhuoru Jiang and Qiyi Xie and Yanhan Zhang and Zhongqiu Li and Lingling Shi and Weiwei Fu and Yin Zhang and Zilu Huang and Sishi Xiong and Yuxiang Zhang and Chao Wang and Shuangyong Song}, + year={2024}, + eprint={2401.03804}, archivePrefix={arXiv}, - primaryClass={cs.CL}, - url={https://arxiv.org/abs/2507.18013}, + primaryClass={cs.CL} } ``` \ No newline at end of file diff --git a/config.json b/config.json deleted file mode 100644 index b4a8f0d278960843c5b09435d38aefeaceb8bdbb..0000000000000000000000000000000000000000 --- a/config.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "apply_residual_connection_post_layernorm": false, - "architectures": [ - "TelechatForCausalLM" - ], - "auto_map": { - "AutoConfig": "configuration_telechat.TelechatConfig", - "AutoModelForCausalLM": "modeling_telechat.TelechatForCausalLM" - }, - "attention_dropout": 0.0, - "attention_softmax_in_fp32": true, - "bias_dropout_fusion": true, - "bos_token_id": 1, - "eos_token_id": 2, - "hidden_dropout": 0.0, - "hidden_size": 8192, - "initializer_range": 0.02, - "layer_norm_epsilon": 1e-05, - "masked_softmax_fusion": true, - "model_type": "telechat", - "n_head": 64, - "n_inner": null, - "num_key_value_heads": 8, - "n_layer": 96, - "pad_token_id": 3, - "pretraining_tp": 2, - "skip_bias_add": false, - "skip_bias_add_qkv": false, - "slow_but_exact": false, - "unk_token_id": 0, - "use_cache": true, - "vocab_size": 131072, - "ffn_hidden_size": 40960, - "flash_attn":true, - "tie_word_embeddings":false, - "training_seqlen":8192, - "base_seqlen":8192, - "seq_length": 8192 -} - diff --git a/configuration.json b/configuration.json deleted file mode 100644 index 9fbcf957f0338085ab0d7245f17f9fd1a1b64a65..0000000000000000000000000000000000000000 --- a/configuration.json +++ /dev/null @@ -1 +0,0 @@ -{"task":"text-generation"} \ No newline at end of file diff --git a/configuration_telechat.py b/configuration_telechat.py deleted file mode 100644 index 6c6169db242f100ed18215302d25dc375e7e5033..0000000000000000000000000000000000000000 --- a/configuration_telechat.py +++ /dev/null @@ -1,94 +0,0 @@ -# coding=utf-8 -# Copyright 2022 the Big Science Workshop and HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" Telechat configuration""" - -from packaging import version -from collections import OrderedDict -from transformers.utils import is_torch_available, logging -from transformers.configuration_utils import PretrainedConfig -from typing import TYPE_CHECKING, Any, List, Mapping, Optional - -logger = logging.get_logger(__name__) - -class TelechatConfig(PretrainedConfig): - """ - Args: - vocab_size (`int`, *optional*, defaults to 160256): Vocabulary size of the Telechat model. - hidden_size (`int`, *optional*, defaults to 4096): Dimensionality of the embeddings and hidden states. - ffn_hidden_size (`int`, *optional*, defaults to 12288): Dimensionality of the feed-forward hidden states. - n_layer (`int`, *optional*, defaults to 30): Number of hidden layers in the Transformer - n_head (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer. - layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): The epsilon to use in the layer normalization layers. - initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - apply_residual_connection_post_layernorm (`bool`, *optional*, defaults to `False`): If enabled, use the layer norm of the hidden states as the residual in the transformer blocks - hidden_dropout (`float`, *optional*, defaults to 0.0): Dropout rate of the dropout function on the bias dropout. - attention_dropout (`float`, *optional*, defaults to 0.0): Dropout rate applied to the attention probs - use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions. - training_seqlen (`int`, *optional*, defaults to 8192): Sequence length during last finetuning. - logn (`bool`, *optional*, defaults to `True`): Whether or not to use logN during extrapolation. - embed_layernorm (`bool`, *optional*, defaults to `True`): Whether or not to use embedding layernorm. - - """ - - model_type = "telechat" - keys_to_ignore_at_inference = ["past_key_values"] - attribute_map = { - "num_hidden_layers": "n_layer", - "num_attention_heads": "n_head", - } - - def __init__( - self, - vocab_size=160256, - hidden_size=4096, - n_layer=30, - n_head=32, - layer_norm_epsilon=1e-5, - initializer_range=0.02, - use_cache=True, - bos_token_id=1, - eos_token_id=2, - apply_residual_connection_post_layernorm=False, - hidden_dropout=0.0, - attention_dropout=0.0, - ffn_hidden_size=12288, - training_seqlen = 8192, - logn = True, - embed_layernorm = False, - **kwargs, - ): - self.vocab_size = vocab_size - n_embed = kwargs.pop("n_embed", None) - self.hidden_size = hidden_size if n_embed is None else n_embed - self.n_layer = n_layer - self.n_head = n_head - self.layer_norm_epsilon = layer_norm_epsilon - self.initializer_range = initializer_range - self.use_cache = use_cache - self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm - self.hidden_dropout = hidden_dropout - self.attention_dropout = attention_dropout - self.bos_token_id = bos_token_id - self.eos_token_id = eos_token_id - self.logn = logn - self.ffn_hidden_size = ffn_hidden_size - self.training_seqlen = training_seqlen - self.embed_layernorm = embed_layernorm - self.num_key_value_heads= kwargs.pop("num_key_value_heads", None) - - - super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) - diff --git a/generation_config.json b/generation_config.json deleted file mode 100644 index 67fd08a69f94fd250bd5c50c8905f64e9297ecb8..0000000000000000000000000000000000000000 --- a/generation_config.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "max_length": 8192, - "do_sample": false, - "use_cache": true, - "temperature": 0.3, - "top_k": 5, - "top_p": 0.85, - "repetition_penalty": 1.03, - "pad_token_id": 3, - "bos_token_id": 1, - "eos_token_id": 2, - "user_token_id": 4, - "bot_token_id": 5 -} diff --git a/generation_utils.py b/generation_utils.py deleted file mode 100644 index 82410f2eeb3e8ef64f995d7786f2da4419c0f0e7..0000000000000000000000000000000000000000 --- a/generation_utils.py +++ /dev/null @@ -1,162 +0,0 @@ -from typing import Optional -from collections import deque -from queue import Queue -import copy - - -class History: - - def __init__(self, tokenizer, history): - ''' - init from a list of dict - ''' - # use deque to meet some special situation - self.input_history = deque() - self.tokenizer = tokenizer - if history: - self._transfer_from_list(history) - - def _transfer_from_list(self, history): - for message in history: - content = message.get("content") - # the token result may not be equal to the result model gen - message.update(self.tokenizer(content)) - self.input_history.append(message) - - def append(self, message): - content = message.get("content") - if "input_ids" not in message or "attention_mask" not in message: - message.update(self.tokenizer(content)) - self.input_history.append(message) - - def append_left(self, message): - content = message.get("content") - if "input_ids" not in message or "attention_mask" not in message: - message.update(self.tokenizer(content)) - self.input_history.appendleft(message) - - def pop(self): - x = self.input_history.pop() - return x - - def pop_left(self): - x = self.input_history.pop_left() - return x - - def update(self, message): - self.input_history.pop() - self.append(message) - - def __len__(self): - return self.input_history.__len__() - - def __str__(self): - return self.input_history.__str__() - - def __copy__(self): - new_instance = type(self)(self.tokenizer, []) - new_instance.input_history = copy.copy(self.input_history) - return new_instance - - def __deepcopy__(self, memodict={}): - new_instance = type(self)(self.tokenizer, []) - new_instance.input_history = copy.deepcopy(self.input_history) - return new_instance - - -class TelechatIterTextStreamer: - """ - With reference to the TextIterStreamers in transformers, we have rewritten this class - """ - - def __init__( - self, tokenizer, history: History = None, skip_prompt: bool = False, timeout: Optional[float] = None, - **decode_kwargs - ): - - self.tokenizer = tokenizer - self.history = history - self.skip_prompt = skip_prompt - self.timeout = timeout - self.decode_kwargs = decode_kwargs - - self.text_queue = Queue() - self.cache_time = 0 - self.text_until = "" - self.token_until = [] - self.stop_signal = None - self.next_tokens_are_prompt = True - - self.history.append({"role": "bot", "content": self.text_until}) - - def put(self, value): - """ - put printable text into queue - """ - if len(value.shape) > 1 and value.shape[0] > 1: - raise ValueError("TextStreamer only supports batch size 1") - elif len(value.shape) > 1: - value = value[0] - - if self.skip_prompt and self.next_tokens_are_prompt: - self.next_tokens_are_prompt = False - return - - if value[-1] == self.tokenizer.eos_token_id: - return - - # there may be some smart way to decode. - self.token_until.extend(value.tolist()) - text = self.tokenizer.decode(self.token_until, **self.decode_kwargs) - - - if self._is_printable(text) or self.cache_time >= 6: - output_text = text[len(self.text_until):] - self.text_until = text - - else: - self.cache_time+=1 - return - - self.on_finalized_text(output_text) - - def end(self): - """Flushes any remaining cache and prints a newline to stdout.""" - # Flush the cache, if it exists - text = self.tokenizer.decode(self.token_until, **self.decode_kwargs) - output_text = text[len(self.text_until):] - self.text_until = text - self.on_finalized_text(output_text, stream_end=True) - self.clear_cache() - - def clear_cache(self): - self.cache_time = 0 - self.token_until = [] - self.text_until = "" - self.history = None - self.next_tokens_are_prompt = True - - def on_finalized_text(self, text: str, stream_end: bool = False): - """Put the text tuple in the queue.""" - self.history.update({"role": "bot", "content": self.text_until, "input_ids": self.token_until, - "attention_mask": [1] * len(self.token_until)}) - self.text_queue.put((text, self.history), timeout=self.timeout) - if stream_end: - self.text_queue.put((self.stop_signal, self.history), timeout=self.timeout) - - @staticmethod - def _is_printable(cp): - """Checks whether tokens can be decoded or not""" - if "�" in cp: - return False - return True - - def __iter__(self): - return self - - def __next__(self): - value_now, history_until = self.text_queue.get(timeout=self.timeout) - if value_now == self.stop_signal: - raise StopIteration() - else: - return value_now, history_until \ No newline at end of file diff --git a/model-00001-of-00050.safetensors b/model-00001-of-00050.safetensors deleted file mode 100644 index 426d53ab14b3b78d70fd6986e3708f0b9b51388d..0000000000000000000000000000000000000000 --- a/model-00001-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:50a4549e91bf98dcb213f6455f92440454fdd012cd0369e0fdb2f019c83fe4c7 -size 8925611272 diff --git a/model-00002-of-00050.safetensors b/model-00002-of-00050.safetensors deleted file mode 100644 index a7eda89723d05ca4657a914a798299392cf46f20..0000000000000000000000000000000000000000 --- a/model-00002-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:548f67873b1f538bf325590f30dbe267600356ccd1bef73c1f1cc7a2902c2aa7 -size 9261287712 diff --git a/model-00003-of-00050.safetensors b/model-00003-of-00050.safetensors deleted file mode 100644 index 9da07177d5e2d3d3bf9862fb05de0e0d78370ad3..0000000000000000000000000000000000000000 --- a/model-00003-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e855d3528914e522dcad5348c2229dc2e88fa55e87416f7d5106437d3679a8d3 -size 9261287712 diff --git a/model-00004-of-00050.safetensors b/model-00004-of-00050.safetensors deleted file mode 100644 index 0464ad79419900ee9bcf6af5db3290d2a546feba..0000000000000000000000000000000000000000 --- a/model-00004-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:509b7084b8dcb308c7c7d22fa988e805b6f6947e356526f60cccdd40ceb2a11d -size 9261287712 diff --git a/model-00005-of-00050.safetensors b/model-00005-of-00050.safetensors deleted file mode 100644 index 8b7c32a147d32e433942d3f2a714b791b44459ea..0000000000000000000000000000000000000000 --- a/model-00005-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:bb044a2e88e6e7a0817459f7f409eeb5abd00836346f69f8ebfd9d0c47028ebf -size 9261287712 diff --git a/model-00006-of-00050.safetensors b/model-00006-of-00050.safetensors deleted file mode 100644 index 011d2601c9ed9767d5723c0213147e7fa9499a08..0000000000000000000000000000000000000000 --- a/model-00006-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a3e410d45493b53ba09cdb8d368ecad07df8c1db96a58d928cd7045c8ed7a48f -size 9261287728 diff --git a/model-00007-of-00050.safetensors b/model-00007-of-00050.safetensors deleted file mode 100644 index 5a5b58baa52f37d9c9be321568bc79faeacce13a..0000000000000000000000000000000000000000 --- a/model-00007-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b9cdcab0486ef69e5eedbb99b08f0b55c918a83f227194cb8b8937f010c950a7 -size 9261287736 diff --git a/model-00008-of-00050.safetensors b/model-00008-of-00050.safetensors deleted file mode 100644 index e4982fc575729421b355442381ea2880be3bc04d..0000000000000000000000000000000000000000 --- a/model-00008-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f2a7cf122475960568ea086f9b98b2283dbf9344dda2720726a52bff7f31edbc -size 9261287736 diff --git a/model-00009-of-00050.safetensors b/model-00009-of-00050.safetensors deleted file mode 100644 index e9517e6ccc3ca3e12848e779f565fc39a4f920be..0000000000000000000000000000000000000000 --- a/model-00009-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:cc5d19f0add344ac71fb3855ca8f04e588fd4d5263e05709ec4dd668ebcf8ee4 -size 9261287736 diff --git a/model-00010-of-00050.safetensors b/model-00010-of-00050.safetensors deleted file mode 100644 index f04889e5eb226789f9e74c0df9161d4251a1b6b2..0000000000000000000000000000000000000000 --- a/model-00010-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5d9763dc2004ea25ca751d271f92a1cce636f92c6991077ea391788428cfc6c1 -size 9261287736 diff --git a/model-00011-of-00050.safetensors b/model-00011-of-00050.safetensors deleted file mode 100644 index 1e1e2df0ad9ffaec520448de27d3ad7376eb5044..0000000000000000000000000000000000000000 --- a/model-00011-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ca7e26c5ed4c0fedec09a4cceef1ac7e03d037c921c18d1df0b782ca4fe7b71b -size 9261287736 diff --git a/model-00012-of-00050.safetensors b/model-00012-of-00050.safetensors deleted file mode 100644 index d51d04ce2d8564fada56337ec00df57e6953487d..0000000000000000000000000000000000000000 --- a/model-00012-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e0390b0e55acc47cebac2bc81e57e1e7d3f02f72a0cad2b01b5165caf976ab0b -size 9261287736 diff --git a/model-00013-of-00050.safetensors b/model-00013-of-00050.safetensors deleted file mode 100644 index 3ae3f60416daf48143e336694ea18a9fed769c22..0000000000000000000000000000000000000000 --- a/model-00013-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:bbb3ccd22fc048c59e67dab8df54264acf53d7d62fc98fb49ffeafe7ea181739 -size 9261287736 diff --git a/model-00014-of-00050.safetensors b/model-00014-of-00050.safetensors deleted file mode 100644 index 9892baad14cfb948ec60e2f3b547ad273de6dac5..0000000000000000000000000000000000000000 --- a/model-00014-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1e2c53dae773a55d42d3265d0d85ae02c8f6f70543a90d923823cab68d44d5ce -size 9261287736 diff --git a/model-00015-of-00050.safetensors b/model-00015-of-00050.safetensors deleted file mode 100644 index 28f47ab7f8e76554e4295337e8b372e7be705908..0000000000000000000000000000000000000000 --- a/model-00015-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b813a2adab5ff776a06986e60d22e917c596122c417a7d6ae1930b8ed5d61a4f -size 9261287736 diff --git a/model-00016-of-00050.safetensors b/model-00016-of-00050.safetensors deleted file mode 100644 index 1533631fbb8cfb1c1bf074a2af340150acc69a97..0000000000000000000000000000000000000000 --- a/model-00016-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:33976aac9f40e0ff430435fe0295ba1f4443dc4b78233c077de95453eff51d01 -size 9261287736 diff --git a/model-00017-of-00050.safetensors b/model-00017-of-00050.safetensors deleted file mode 100644 index 4d003d91f97e4c6b1703c4b015ca9c8665e159d8..0000000000000000000000000000000000000000 --- a/model-00017-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:096850a1c187e804c370956830ef94d20731f79ee1078fa0613604c93015d305 -size 9261287736 diff --git a/model-00018-of-00050.safetensors b/model-00018-of-00050.safetensors deleted file mode 100644 index 87b81a764935743e9e0fb9fb2d34a543af72ef87..0000000000000000000000000000000000000000 --- a/model-00018-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b7ed47b73f1171ee30de651924ab58aaa7088037b9f96203be23b4b2514dfb27 -size 9261287736 diff --git a/model-00019-of-00050.safetensors b/model-00019-of-00050.safetensors deleted file mode 100644 index d4db36b8ff81c6314f40c883e0059f70864c1065..0000000000000000000000000000000000000000 --- a/model-00019-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fe1e87f96e41804e06c966605e6b00e0a9c6b51c504ee5e22dfdb28e90d1308f -size 9261287736 diff --git a/model-00020-of-00050.safetensors b/model-00020-of-00050.safetensors deleted file mode 100644 index 18711a652f15f28872d05e599c10bc6e53fe0b6b..0000000000000000000000000000000000000000 --- a/model-00020-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:93bc200e651f67d4910170de85235af15b3228b23192afb5c32d5126daaaf996 -size 9261287736 diff --git a/model-00021-of-00050.safetensors b/model-00021-of-00050.safetensors deleted file mode 100644 index 5a1112a2baba2f36f507b60ede9c8a05ddd5fd37..0000000000000000000000000000000000000000 --- a/model-00021-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:645df22333bb21b8cef574bdadcaecaf0efcf43d3e747a0b0c0d462740d0b7c4 -size 9261287736 diff --git a/model-00022-of-00050.safetensors b/model-00022-of-00050.safetensors deleted file mode 100644 index 5ff162d27c9d68bbbe2399757e925e52907a3582..0000000000000000000000000000000000000000 --- a/model-00022-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3a288d0dafad8ca11e21c869a31d62057f6bc13036934e306cb68cd7fdd5861f -size 9261287736 diff --git a/model-00023-of-00050.safetensors b/model-00023-of-00050.safetensors deleted file mode 100644 index 0c367f7dbe88ec4f307e559496cca9d517825059..0000000000000000000000000000000000000000 --- a/model-00023-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:94e110e13a08711bfd02cbb3d20e32baecd675f2593a8ae66ba1fcf039e34073 -size 9261287736 diff --git a/model-00024-of-00050.safetensors b/model-00024-of-00050.safetensors deleted file mode 100644 index 72909e4735f5e9b59848cec7d4bfa2920275e8c1..0000000000000000000000000000000000000000 --- a/model-00024-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:56894143f2c2a3ff2af49e7f3ec2af9929ff8d95fe7e8e63b709280ba4babe52 -size 9261287736 diff --git a/model-00025-of-00050.safetensors b/model-00025-of-00050.safetensors deleted file mode 100644 index 75fb1501f0c277e5b899d9d90a32c5f23a98c881..0000000000000000000000000000000000000000 --- a/model-00025-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:72d4aee125d0a41ceb2afc164c78d25b9edc89cf845da5f91e9c60b743b2f3a7 -size 9261287736 diff --git a/model-00026-of-00050.safetensors b/model-00026-of-00050.safetensors deleted file mode 100644 index 51d9835113b524c45ee2fafa75800b4f8627551f..0000000000000000000000000000000000000000 --- a/model-00026-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:72fdd61ca5c065b386092a0701f6fe8ed7ae8d99b0eaba87e50e60acff058876 -size 9261287736 diff --git a/model-00027-of-00050.safetensors b/model-00027-of-00050.safetensors deleted file mode 100644 index 4392013dc21137991c5a8a5deab5000abadf7db4..0000000000000000000000000000000000000000 --- a/model-00027-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:764f995de7385ab9c14e7393ee6adc73b557c1a182efdcbbf143c3fe22f353c3 -size 9261287736 diff --git a/model-00028-of-00050.safetensors b/model-00028-of-00050.safetensors deleted file mode 100644 index 19149dc27dc0cc7bb6e261e9684389015aed22ac..0000000000000000000000000000000000000000 --- a/model-00028-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ba9bd63f6e97a17e03b467629d9a4bafd581da1b9445b134423c75d5aa409c08 -size 9261287736 diff --git a/model-00029-of-00050.safetensors b/model-00029-of-00050.safetensors deleted file mode 100644 index 59260a818ad863f170b8ca7e7742f71b46a1fc52..0000000000000000000000000000000000000000 --- a/model-00029-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:21c3f8b212fef1678419b95693121290430930dd2930bd531aa7a3faa40fac41 -size 9261287736 diff --git a/model-00030-of-00050.safetensors b/model-00030-of-00050.safetensors deleted file mode 100644 index a2e8070dff10c5e27fa805247542da801756b9c9..0000000000000000000000000000000000000000 --- a/model-00030-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b861e871ac1d1ae14b6fcadaefe876c45d694f715eb91d68bacf114c2c426adf -size 9261287736 diff --git a/model-00031-of-00050.safetensors b/model-00031-of-00050.safetensors deleted file mode 100644 index 8e8ee1d2cd088bf29429505bc34949f8c1751be0..0000000000000000000000000000000000000000 --- a/model-00031-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:495ecf892bce401191ce699032b3d95b957d935987ee462df76de2b5928c1127 -size 9261287736 diff --git a/model-00032-of-00050.safetensors b/model-00032-of-00050.safetensors deleted file mode 100644 index f6c1ed3f2979c4d960f167af198cded2596d9020..0000000000000000000000000000000000000000 --- a/model-00032-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2613dc2c31320d4c300e260aac5a313dc183b8fdd140f6b3aea338667126ffae -size 9261287736 diff --git a/model-00033-of-00050.safetensors b/model-00033-of-00050.safetensors deleted file mode 100644 index db920738a5a9dcdb89c0b025e89363aa48f96ecf..0000000000000000000000000000000000000000 --- a/model-00033-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:67348068c452d63d6ea33f468f810157993b1431c823fc4fc68d1e4c7cf1b653 -size 9261287736 diff --git a/model-00034-of-00050.safetensors b/model-00034-of-00050.safetensors deleted file mode 100644 index f5dd0b2830c789a0af78e05c7b0bff71d3b963ed..0000000000000000000000000000000000000000 --- a/model-00034-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e28056f9391f029ddea9743445dbf763c265feb31a184ba73a7e8de91280d981 -size 9261287736 diff --git a/model-00035-of-00050.safetensors b/model-00035-of-00050.safetensors deleted file mode 100644 index 76839e63506bc6c1209ebc20111182ca8944174f..0000000000000000000000000000000000000000 --- a/model-00035-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7c966308e58af945da780ddf143e6f436f21a8019ddb1824200822ac8ecf72c8 -size 9261287736 diff --git a/model-00036-of-00050.safetensors b/model-00036-of-00050.safetensors deleted file mode 100644 index bc7027d7c1398ca59cf58c9724c8dc0a43536196..0000000000000000000000000000000000000000 --- a/model-00036-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:998076e5d150bea2630b24af7bf1da793a951f5c5b9ddfeb7b9400eb1188eea6 -size 9261287736 diff --git a/model-00037-of-00050.safetensors b/model-00037-of-00050.safetensors deleted file mode 100644 index 5ee15196b1d3982741e117b6c8a57bebfca0fb6f..0000000000000000000000000000000000000000 --- a/model-00037-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1facd7663e7db14ec43876cc727f3d8e75b40bb025cb7a25fb7d2f9a60de68f9 -size 9261287736 diff --git a/model-00038-of-00050.safetensors b/model-00038-of-00050.safetensors deleted file mode 100644 index 3582e0a48dc68a40fd9b79163495df571f7031fa..0000000000000000000000000000000000000000 --- a/model-00038-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c06aafaf597e562ebce7672650df8ae3fcabf22977c4d47004f817ca4f8fe536 -size 9261287736 diff --git a/model-00039-of-00050.safetensors b/model-00039-of-00050.safetensors deleted file mode 100644 index 1d299b60693e3ee1cc1745162624fcef38d8981a..0000000000000000000000000000000000000000 --- a/model-00039-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d3bda7f14ffa81266bd9ec07e05e9c3bc16726f8fcd75281d4af4002508bfceb -size 9261287736 diff --git a/model-00040-of-00050.safetensors b/model-00040-of-00050.safetensors deleted file mode 100644 index 4aa7642b76fcea370ed65104c83e33d6ac03aedd..0000000000000000000000000000000000000000 --- a/model-00040-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:bf19d0fbf517593390ccc10b1c00c1410b54286ca5c651c1fe3ee8de7b02c4d1 -size 9261287736 diff --git a/model-00041-of-00050.safetensors b/model-00041-of-00050.safetensors deleted file mode 100644 index 701adb43db72cc034547664107939802c16c7e7d..0000000000000000000000000000000000000000 --- a/model-00041-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f98f5ea063046592be25906b7dc84f0096e67f104f1bcd847858ee778ba4e1eb -size 9261287736 diff --git a/model-00042-of-00050.safetensors b/model-00042-of-00050.safetensors deleted file mode 100644 index dc00db6eb6f59f07a92b232674153abaf18d4f29..0000000000000000000000000000000000000000 --- a/model-00042-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:033ba66aecb88ed950b1c49bd2c6f6d5e5173ed4882b22ff189f4caef542ec9f -size 9261287736 diff --git a/model-00043-of-00050.safetensors b/model-00043-of-00050.safetensors deleted file mode 100644 index 621342bbdd38b2a3c23bf85c703e3654994aac63..0000000000000000000000000000000000000000 --- a/model-00043-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5a46195067f9f791eee221c2195d30136f0f8eb723dd0c1674d68c6c2409fa68 -size 9261287736 diff --git a/model-00044-of-00050.safetensors b/model-00044-of-00050.safetensors deleted file mode 100644 index a7601a75167bc353c3914ef3f277d135995ab62e..0000000000000000000000000000000000000000 --- a/model-00044-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d5becd94d0a4e65b2b0496f21ca679ba4b171ffb32851eabd15af17004bda901 -size 9261287736 diff --git a/model-00045-of-00050.safetensors b/model-00045-of-00050.safetensors deleted file mode 100644 index 2e27e51423a18e1ce3a30dee2b8fe446255e1056..0000000000000000000000000000000000000000 --- a/model-00045-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:460bd2d228da729c1ef006c65582a8f8d709c748a731f56ff6129ec8beb7cd57 -size 9261287736 diff --git a/model-00046-of-00050.safetensors b/model-00046-of-00050.safetensors deleted file mode 100644 index aa2e89240ef87d36b2e42111b0d2bb517462fd95..0000000000000000000000000000000000000000 --- a/model-00046-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:008d1a031198ab127c27761d605accc0fd48f1d872afb3e0711613b1722cc14f -size 9261287736 diff --git a/model-00047-of-00050.safetensors b/model-00047-of-00050.safetensors deleted file mode 100644 index 77897c6cebb6b7dae751ee810dbeb1058f67da96..0000000000000000000000000000000000000000 --- a/model-00047-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d7f43eceff211a0f79ac9315091a2afc585a10b0aaee46c1b2caa0ca57aaffc4 -size 9261287736 diff --git a/model-00048-of-00050.safetensors b/model-00048-of-00050.safetensors deleted file mode 100644 index adcf5beed7177079df6f99c86dc8b2b294337f39..0000000000000000000000000000000000000000 --- a/model-00048-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c15f954ccbe5a57682184600a4f55fd6c5eb0fe98d18d0ad96ae48c48a2f4e3c -size 9261287736 diff --git a/model-00049-of-00050.safetensors b/model-00049-of-00050.safetensors deleted file mode 100644 index 313d23061828e08fe3682020e1063fd46df66d0c..0000000000000000000000000000000000000000 --- a/model-00049-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9ec2e464fb26c94e5862f52d3b5bcf239e01e8903e587fb1bd416ef2051a225f -size 4630643872 diff --git a/model-00050-of-00050.safetensors b/model-00050-of-00050.safetensors deleted file mode 100644 index cf4495d91a3fc94f49e8a4091294b5728f15e1ab..0000000000000000000000000000000000000000 --- a/model-00050-of-00050.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0dcbfe5b08c5190c2a12bd635d9a23e53aca6d1f1dabc42bae7566d1b121db78 -size 4295000288 diff --git a/model.safetensors.index.json b/model.safetensors.index.json deleted file mode 100644 index 6bc2a4c59294fc86fac4dccaab02b0f4d50196f3..0000000000000000000000000000000000000000 --- a/model.safetensors.index.json +++ /dev/null @@ -1 +0,0 @@ -{"metadata": {"total_size": 453131665408}, "weight_map": {"transformer.word_embeddings.weight": "model-00001-of-00050.safetensors", "transformer.h.0.input_layernorm.weight": "model-00001-of-00050.safetensors", "transformer.h.0.post_attention_layernorm.weight": "model-00001-of-00050.safetensors", "transformer.h.0.self_attention.dense.weight": "model-00001-of-00050.safetensors", "transformer.h.0.self_attention.dense.bias": "model-00001-of-00050.safetensors", "transformer.h.0.self_attention.query.weight": "model-00001-of-00050.safetensors", "transformer.h.0.self_attention.key_value.weight": "model-00001-of-00050.safetensors", "transformer.h.0.mlp.gate_proj.weight": "model-00001-of-00050.safetensors", "transformer.h.0.mlp.down_proj.weight": "model-00001-of-00050.safetensors", "transformer.h.0.mlp.down_proj.bias": "model-00001-of-00050.safetensors", "transformer.h.0.mlp.up_proj.weight": "model-00001-of-00050.safetensors", "transformer.h.1.input_layernorm.weight": "model-00002-of-00050.safetensors", "transformer.h.1.post_attention_layernorm.weight": "model-00002-of-00050.safetensors", "transformer.h.1.self_attention.dense.weight": "model-00002-of-00050.safetensors", "transformer.h.1.self_attention.dense.bias": "model-00002-of-00050.safetensors", "transformer.h.1.self_attention.query.weight": "model-00002-of-00050.safetensors", "transformer.h.1.self_attention.key_value.weight": "model-00002-of-00050.safetensors", "transformer.h.1.mlp.gate_proj.weight": "model-00002-of-00050.safetensors", "transformer.h.1.mlp.down_proj.weight": "model-00002-of-00050.safetensors", "transformer.h.1.mlp.down_proj.bias": "model-00002-of-00050.safetensors", "transformer.h.1.mlp.up_proj.weight": "model-00002-of-00050.safetensors", "transformer.h.2.input_layernorm.weight": "model-00002-of-00050.safetensors", "transformer.h.2.post_attention_layernorm.weight": "model-00002-of-00050.safetensors", "transformer.h.2.self_attention.dense.weight": "model-00002-of-00050.safetensors", "transformer.h.2.self_attention.dense.bias": "model-00002-of-00050.safetensors", "transformer.h.2.self_attention.query.weight": "model-00002-of-00050.safetensors", "transformer.h.2.self_attention.key_value.weight": "model-00002-of-00050.safetensors", "transformer.h.2.mlp.gate_proj.weight": "model-00002-of-00050.safetensors", "transformer.h.2.mlp.down_proj.weight": "model-00002-of-00050.safetensors", "transformer.h.2.mlp.down_proj.bias": "model-00002-of-00050.safetensors", "transformer.h.2.mlp.up_proj.weight": "model-00002-of-00050.safetensors", "transformer.h.3.input_layernorm.weight": "model-00003-of-00050.safetensors", "transformer.h.3.post_attention_layernorm.weight": "model-00003-of-00050.safetensors", "transformer.h.3.self_attention.dense.weight": "model-00003-of-00050.safetensors", "transformer.h.3.self_attention.dense.bias": "model-00003-of-00050.safetensors", "transformer.h.3.self_attention.query.weight": "model-00003-of-00050.safetensors", "transformer.h.3.self_attention.key_value.weight": "model-00003-of-00050.safetensors", "transformer.h.3.mlp.gate_proj.weight": "model-00003-of-00050.safetensors", "transformer.h.3.mlp.down_proj.weight": "model-00003-of-00050.safetensors", "transformer.h.3.mlp.down_proj.bias": "model-00003-of-00050.safetensors", "transformer.h.3.mlp.up_proj.weight": "model-00003-of-00050.safetensors", "transformer.h.4.input_layernorm.weight": "model-00003-of-00050.safetensors", "transformer.h.4.post_attention_layernorm.weight": "model-00003-of-00050.safetensors", "transformer.h.4.self_attention.dense.weight": "model-00003-of-00050.safetensors", "transformer.h.4.self_attention.dense.bias": "model-00003-of-00050.safetensors", "transformer.h.4.self_attention.query.weight": "model-00003-of-00050.safetensors", "transformer.h.4.self_attention.key_value.weight": "model-00003-of-00050.safetensors", "transformer.h.4.mlp.gate_proj.weight": "model-00003-of-00050.safetensors", "transformer.h.4.mlp.down_proj.weight": "model-00003-of-00050.safetensors", "transformer.h.4.mlp.down_proj.bias": "model-00003-of-00050.safetensors", "transformer.h.4.mlp.up_proj.weight": "model-00003-of-00050.safetensors", "transformer.h.5.input_layernorm.weight": "model-00004-of-00050.safetensors", "transformer.h.5.post_attention_layernorm.weight": "model-00004-of-00050.safetensors", "transformer.h.5.self_attention.dense.weight": "model-00004-of-00050.safetensors", "transformer.h.5.self_attention.dense.bias": "model-00004-of-00050.safetensors", "transformer.h.5.self_attention.query.weight": "model-00004-of-00050.safetensors", "transformer.h.5.self_attention.key_value.weight": "model-00004-of-00050.safetensors", "transformer.h.5.mlp.gate_proj.weight": "model-00004-of-00050.safetensors", "transformer.h.5.mlp.down_proj.weight": "model-00004-of-00050.safetensors", "transformer.h.5.mlp.down_proj.bias": "model-00004-of-00050.safetensors", "transformer.h.5.mlp.up_proj.weight": "model-00004-of-00050.safetensors", "transformer.h.6.input_layernorm.weight": "model-00004-of-00050.safetensors", "transformer.h.6.post_attention_layernorm.weight": "model-00004-of-00050.safetensors", "transformer.h.6.self_attention.dense.weight": "model-00004-of-00050.safetensors", "transformer.h.6.self_attention.dense.bias": "model-00004-of-00050.safetensors", "transformer.h.6.self_attention.query.weight": "model-00004-of-00050.safetensors", "transformer.h.6.self_attention.key_value.weight": "model-00004-of-00050.safetensors", "transformer.h.6.mlp.gate_proj.weight": "model-00004-of-00050.safetensors", "transformer.h.6.mlp.down_proj.weight": "model-00004-of-00050.safetensors", "transformer.h.6.mlp.down_proj.bias": "model-00004-of-00050.safetensors", "transformer.h.6.mlp.up_proj.weight": "model-00004-of-00050.safetensors", "transformer.h.7.input_layernorm.weight": "model-00005-of-00050.safetensors", "transformer.h.7.post_attention_layernorm.weight": "model-00005-of-00050.safetensors", "transformer.h.7.self_attention.dense.weight": "model-00005-of-00050.safetensors", "transformer.h.7.self_attention.dense.bias": "model-00005-of-00050.safetensors", "transformer.h.7.self_attention.query.weight": "model-00005-of-00050.safetensors", "transformer.h.7.self_attention.key_value.weight": "model-00005-of-00050.safetensors", "transformer.h.7.mlp.gate_proj.weight": "model-00005-of-00050.safetensors", "transformer.h.7.mlp.down_proj.weight": "model-00005-of-00050.safetensors", "transformer.h.7.mlp.down_proj.bias": "model-00005-of-00050.safetensors", "transformer.h.7.mlp.up_proj.weight": "model-00005-of-00050.safetensors", "transformer.h.8.input_layernorm.weight": "model-00005-of-00050.safetensors", "transformer.h.8.post_attention_layernorm.weight": "model-00005-of-00050.safetensors", "transformer.h.8.self_attention.dense.weight": "model-00005-of-00050.safetensors", "transformer.h.8.self_attention.dense.bias": "model-00005-of-00050.safetensors", "transformer.h.8.self_attention.query.weight": "model-00005-of-00050.safetensors", "transformer.h.8.self_attention.key_value.weight": "model-00005-of-00050.safetensors", "transformer.h.8.mlp.gate_proj.weight": "model-00005-of-00050.safetensors", "transformer.h.8.mlp.down_proj.weight": "model-00005-of-00050.safetensors", "transformer.h.8.mlp.down_proj.bias": "model-00005-of-00050.safetensors", "transformer.h.8.mlp.up_proj.weight": "model-00005-of-00050.safetensors", "transformer.h.9.input_layernorm.weight": "model-00006-of-00050.safetensors", "transformer.h.9.post_attention_layernorm.weight": "model-00006-of-00050.safetensors", "transformer.h.9.self_attention.dense.weight": "model-00006-of-00050.safetensors", "transformer.h.9.self_attention.dense.bias": "model-00006-of-00050.safetensors", "transformer.h.9.self_attention.query.weight": "model-00006-of-00050.safetensors", "transformer.h.9.self_attention.key_value.weight": "model-00006-of-00050.safetensors", "transformer.h.9.mlp.gate_proj.weight": "model-00006-of-00050.safetensors", "transformer.h.9.mlp.down_proj.weight": "model-00006-of-00050.safetensors", "transformer.h.9.mlp.down_proj.bias": "model-00006-of-00050.safetensors", "transformer.h.9.mlp.up_proj.weight": "model-00006-of-00050.safetensors", "transformer.h.10.input_layernorm.weight": "model-00006-of-00050.safetensors", "transformer.h.10.post_attention_layernorm.weight": "model-00006-of-00050.safetensors", "transformer.h.10.self_attention.dense.weight": "model-00006-of-00050.safetensors", "transformer.h.10.self_attention.dense.bias": "model-00006-of-00050.safetensors", "transformer.h.10.self_attention.query.weight": "model-00006-of-00050.safetensors", "transformer.h.10.self_attention.key_value.weight": "model-00006-of-00050.safetensors", "transformer.h.10.mlp.gate_proj.weight": "model-00006-of-00050.safetensors", "transformer.h.10.mlp.down_proj.weight": "model-00006-of-00050.safetensors", "transformer.h.10.mlp.down_proj.bias": "model-00006-of-00050.safetensors", "transformer.h.10.mlp.up_proj.weight": "model-00006-of-00050.safetensors", "transformer.h.11.input_layernorm.weight": "model-00007-of-00050.safetensors", "transformer.h.11.post_attention_layernorm.weight": "model-00007-of-00050.safetensors", "transformer.h.11.self_attention.dense.weight": "model-00007-of-00050.safetensors", "transformer.h.11.self_attention.dense.bias": "model-00007-of-00050.safetensors", "transformer.h.11.self_attention.query.weight": "model-00007-of-00050.safetensors", "transformer.h.11.self_attention.key_value.weight": "model-00007-of-00050.safetensors", "transformer.h.11.mlp.gate_proj.weight": "model-00007-of-00050.safetensors", "transformer.h.11.mlp.down_proj.weight": "model-00007-of-00050.safetensors", "transformer.h.11.mlp.down_proj.bias": "model-00007-of-00050.safetensors", "transformer.h.11.mlp.up_proj.weight": "model-00007-of-00050.safetensors", "transformer.h.12.input_layernorm.weight": "model-00007-of-00050.safetensors", "transformer.h.12.post_attention_layernorm.weight": "model-00007-of-00050.safetensors", "transformer.h.12.self_attention.dense.weight": "model-00007-of-00050.safetensors", "transformer.h.12.self_attention.dense.bias": "model-00007-of-00050.safetensors", "transformer.h.12.self_attention.query.weight": "model-00007-of-00050.safetensors", "transformer.h.12.self_attention.key_value.weight": "model-00007-of-00050.safetensors", "transformer.h.12.mlp.gate_proj.weight": "model-00007-of-00050.safetensors", "transformer.h.12.mlp.down_proj.weight": "model-00007-of-00050.safetensors", "transformer.h.12.mlp.down_proj.bias": "model-00007-of-00050.safetensors", "transformer.h.12.mlp.up_proj.weight": "model-00007-of-00050.safetensors", "transformer.h.13.input_layernorm.weight": "model-00008-of-00050.safetensors", "transformer.h.13.post_attention_layernorm.weight": "model-00008-of-00050.safetensors", "transformer.h.13.self_attention.dense.weight": "model-00008-of-00050.safetensors", "transformer.h.13.self_attention.dense.bias": "model-00008-of-00050.safetensors", "transformer.h.13.self_attention.query.weight": "model-00008-of-00050.safetensors", "transformer.h.13.self_attention.key_value.weight": "model-00008-of-00050.safetensors", "transformer.h.13.mlp.gate_proj.weight": "model-00008-of-00050.safetensors", "transformer.h.13.mlp.down_proj.weight": "model-00008-of-00050.safetensors", "transformer.h.13.mlp.down_proj.bias": "model-00008-of-00050.safetensors", "transformer.h.13.mlp.up_proj.weight": "model-00008-of-00050.safetensors", "transformer.h.14.input_layernorm.weight": "model-00008-of-00050.safetensors", "transformer.h.14.post_attention_layernorm.weight": "model-00008-of-00050.safetensors", "transformer.h.14.self_attention.dense.weight": "model-00008-of-00050.safetensors", "transformer.h.14.self_attention.dense.bias": "model-00008-of-00050.safetensors", "transformer.h.14.self_attention.query.weight": "model-00008-of-00050.safetensors", "transformer.h.14.self_attention.key_value.weight": "model-00008-of-00050.safetensors", "transformer.h.14.mlp.gate_proj.weight": "model-00008-of-00050.safetensors", "transformer.h.14.mlp.down_proj.weight": "model-00008-of-00050.safetensors", "transformer.h.14.mlp.down_proj.bias": "model-00008-of-00050.safetensors", "transformer.h.14.mlp.up_proj.weight": "model-00008-of-00050.safetensors", "transformer.h.15.input_layernorm.weight": "model-00009-of-00050.safetensors", "transformer.h.15.post_attention_layernorm.weight": "model-00009-of-00050.safetensors", "transformer.h.15.self_attention.dense.weight": "model-00009-of-00050.safetensors", "transformer.h.15.self_attention.dense.bias": "model-00009-of-00050.safetensors", "transformer.h.15.self_attention.query.weight": "model-00009-of-00050.safetensors", "transformer.h.15.self_attention.key_value.weight": "model-00009-of-00050.safetensors", "transformer.h.15.mlp.gate_proj.weight": "model-00009-of-00050.safetensors", "transformer.h.15.mlp.down_proj.weight": "model-00009-of-00050.safetensors", "transformer.h.15.mlp.down_proj.bias": "model-00009-of-00050.safetensors", "transformer.h.15.mlp.up_proj.weight": "model-00009-of-00050.safetensors", "transformer.h.16.input_layernorm.weight": "model-00009-of-00050.safetensors", "transformer.h.16.post_attention_layernorm.weight": "model-00009-of-00050.safetensors", "transformer.h.16.self_attention.dense.weight": "model-00009-of-00050.safetensors", "transformer.h.16.self_attention.dense.bias": "model-00009-of-00050.safetensors", "transformer.h.16.self_attention.query.weight": "model-00009-of-00050.safetensors", "transformer.h.16.self_attention.key_value.weight": "model-00009-of-00050.safetensors", "transformer.h.16.mlp.gate_proj.weight": "model-00009-of-00050.safetensors", "transformer.h.16.mlp.down_proj.weight": "model-00009-of-00050.safetensors", "transformer.h.16.mlp.down_proj.bias": "model-00009-of-00050.safetensors", "transformer.h.16.mlp.up_proj.weight": "model-00009-of-00050.safetensors", "transformer.h.17.input_layernorm.weight": "model-00010-of-00050.safetensors", "transformer.h.17.post_attention_layernorm.weight": "model-00010-of-00050.safetensors", "transformer.h.17.self_attention.dense.weight": "model-00010-of-00050.safetensors", "transformer.h.17.self_attention.dense.bias": "model-00010-of-00050.safetensors", "transformer.h.17.self_attention.query.weight": "model-00010-of-00050.safetensors", "transformer.h.17.self_attention.key_value.weight": "model-00010-of-00050.safetensors", "transformer.h.17.mlp.gate_proj.weight": "model-00010-of-00050.safetensors", "transformer.h.17.mlp.down_proj.weight": "model-00010-of-00050.safetensors", "transformer.h.17.mlp.down_proj.bias": "model-00010-of-00050.safetensors", "transformer.h.17.mlp.up_proj.weight": "model-00010-of-00050.safetensors", "transformer.h.18.input_layernorm.weight": "model-00010-of-00050.safetensors", "transformer.h.18.post_attention_layernorm.weight": "model-00010-of-00050.safetensors", "transformer.h.18.self_attention.dense.weight": "model-00010-of-00050.safetensors", "transformer.h.18.self_attention.dense.bias": "model-00010-of-00050.safetensors", "transformer.h.18.self_attention.query.weight": "model-00010-of-00050.safetensors", "transformer.h.18.self_attention.key_value.weight": "model-00010-of-00050.safetensors", "transformer.h.18.mlp.gate_proj.weight": "model-00010-of-00050.safetensors", "transformer.h.18.mlp.down_proj.weight": "model-00010-of-00050.safetensors", "transformer.h.18.mlp.down_proj.bias": "model-00010-of-00050.safetensors", "transformer.h.18.mlp.up_proj.weight": "model-00010-of-00050.safetensors", "transformer.h.19.input_layernorm.weight": "model-00011-of-00050.safetensors", "transformer.h.19.post_attention_layernorm.weight": "model-00011-of-00050.safetensors", "transformer.h.19.self_attention.dense.weight": "model-00011-of-00050.safetensors", "transformer.h.19.self_attention.dense.bias": "model-00011-of-00050.safetensors", "transformer.h.19.self_attention.query.weight": "model-00011-of-00050.safetensors", "transformer.h.19.self_attention.key_value.weight": "model-00011-of-00050.safetensors", "transformer.h.19.mlp.gate_proj.weight": "model-00011-of-00050.safetensors", "transformer.h.19.mlp.down_proj.weight": "model-00011-of-00050.safetensors", "transformer.h.19.mlp.down_proj.bias": "model-00011-of-00050.safetensors", "transformer.h.19.mlp.up_proj.weight": "model-00011-of-00050.safetensors", "transformer.h.20.input_layernorm.weight": "model-00011-of-00050.safetensors", "transformer.h.20.post_attention_layernorm.weight": "model-00011-of-00050.safetensors", "transformer.h.20.self_attention.dense.weight": "model-00011-of-00050.safetensors", "transformer.h.20.self_attention.dense.bias": "model-00011-of-00050.safetensors", "transformer.h.20.self_attention.query.weight": "model-00011-of-00050.safetensors", "transformer.h.20.self_attention.key_value.weight": "model-00011-of-00050.safetensors", "transformer.h.20.mlp.gate_proj.weight": "model-00011-of-00050.safetensors", "transformer.h.20.mlp.down_proj.weight": "model-00011-of-00050.safetensors", "transformer.h.20.mlp.down_proj.bias": "model-00011-of-00050.safetensors", "transformer.h.20.mlp.up_proj.weight": "model-00011-of-00050.safetensors", "transformer.h.21.input_layernorm.weight": "model-00012-of-00050.safetensors", "transformer.h.21.post_attention_layernorm.weight": "model-00012-of-00050.safetensors", "transformer.h.21.self_attention.dense.weight": "model-00012-of-00050.safetensors", "transformer.h.21.self_attention.dense.bias": "model-00012-of-00050.safetensors", "transformer.h.21.self_attention.query.weight": "model-00012-of-00050.safetensors", "transformer.h.21.self_attention.key_value.weight": "model-00012-of-00050.safetensors", "transformer.h.21.mlp.gate_proj.weight": "model-00012-of-00050.safetensors", "transformer.h.21.mlp.down_proj.weight": "model-00012-of-00050.safetensors", "transformer.h.21.mlp.down_proj.bias": "model-00012-of-00050.safetensors", "transformer.h.21.mlp.up_proj.weight": "model-00012-of-00050.safetensors", "transformer.h.22.input_layernorm.weight": "model-00012-of-00050.safetensors", "transformer.h.22.post_attention_layernorm.weight": "model-00012-of-00050.safetensors", "transformer.h.22.self_attention.dense.weight": "model-00012-of-00050.safetensors", "transformer.h.22.self_attention.dense.bias": "model-00012-of-00050.safetensors", "transformer.h.22.self_attention.query.weight": "model-00012-of-00050.safetensors", "transformer.h.22.self_attention.key_value.weight": "model-00012-of-00050.safetensors", "transformer.h.22.mlp.gate_proj.weight": "model-00012-of-00050.safetensors", "transformer.h.22.mlp.down_proj.weight": "model-00012-of-00050.safetensors", "transformer.h.22.mlp.down_proj.bias": "model-00012-of-00050.safetensors", "transformer.h.22.mlp.up_proj.weight": "model-00012-of-00050.safetensors", "transformer.h.23.input_layernorm.weight": "model-00013-of-00050.safetensors", "transformer.h.23.post_attention_layernorm.weight": "model-00013-of-00050.safetensors", "transformer.h.23.self_attention.dense.weight": "model-00013-of-00050.safetensors", "transformer.h.23.self_attention.dense.bias": "model-00013-of-00050.safetensors", "transformer.h.23.self_attention.query.weight": "model-00013-of-00050.safetensors", "transformer.h.23.self_attention.key_value.weight": "model-00013-of-00050.safetensors", "transformer.h.23.mlp.gate_proj.weight": "model-00013-of-00050.safetensors", "transformer.h.23.mlp.down_proj.weight": "model-00013-of-00050.safetensors", "transformer.h.23.mlp.down_proj.bias": "model-00013-of-00050.safetensors", "transformer.h.23.mlp.up_proj.weight": "model-00013-of-00050.safetensors", "transformer.h.24.input_layernorm.weight": "model-00013-of-00050.safetensors", "transformer.h.24.post_attention_layernorm.weight": "model-00013-of-00050.safetensors", "transformer.h.24.self_attention.dense.weight": "model-00013-of-00050.safetensors", "transformer.h.24.self_attention.dense.bias": "model-00013-of-00050.safetensors", "transformer.h.24.self_attention.query.weight": "model-00013-of-00050.safetensors", "transformer.h.24.self_attention.key_value.weight": "model-00013-of-00050.safetensors", "transformer.h.24.mlp.gate_proj.weight": "model-00013-of-00050.safetensors", "transformer.h.24.mlp.down_proj.weight": "model-00013-of-00050.safetensors", "transformer.h.24.mlp.down_proj.bias": "model-00013-of-00050.safetensors", "transformer.h.24.mlp.up_proj.weight": "model-00013-of-00050.safetensors", "transformer.h.25.input_layernorm.weight": "model-00014-of-00050.safetensors", "transformer.h.25.post_attention_layernorm.weight": "model-00014-of-00050.safetensors", "transformer.h.25.self_attention.dense.weight": "model-00014-of-00050.safetensors", "transformer.h.25.self_attention.dense.bias": "model-00014-of-00050.safetensors", "transformer.h.25.self_attention.query.weight": "model-00014-of-00050.safetensors", "transformer.h.25.self_attention.key_value.weight": "model-00014-of-00050.safetensors", "transformer.h.25.mlp.gate_proj.weight": "model-00014-of-00050.safetensors", "transformer.h.25.mlp.down_proj.weight": "model-00014-of-00050.safetensors", "transformer.h.25.mlp.down_proj.bias": "model-00014-of-00050.safetensors", "transformer.h.25.mlp.up_proj.weight": "model-00014-of-00050.safetensors", "transformer.h.26.input_layernorm.weight": "model-00014-of-00050.safetensors", "transformer.h.26.post_attention_layernorm.weight": "model-00014-of-00050.safetensors", "transformer.h.26.self_attention.dense.weight": "model-00014-of-00050.safetensors", "transformer.h.26.self_attention.dense.bias": "model-00014-of-00050.safetensors", "transformer.h.26.self_attention.query.weight": "model-00014-of-00050.safetensors", "transformer.h.26.self_attention.key_value.weight": "model-00014-of-00050.safetensors", "transformer.h.26.mlp.gate_proj.weight": "model-00014-of-00050.safetensors", "transformer.h.26.mlp.down_proj.weight": "model-00014-of-00050.safetensors", "transformer.h.26.mlp.down_proj.bias": "model-00014-of-00050.safetensors", "transformer.h.26.mlp.up_proj.weight": "model-00014-of-00050.safetensors", "transformer.h.27.input_layernorm.weight": "model-00015-of-00050.safetensors", "transformer.h.27.post_attention_layernorm.weight": "model-00015-of-00050.safetensors", "transformer.h.27.self_attention.dense.weight": "model-00015-of-00050.safetensors", "transformer.h.27.self_attention.dense.bias": "model-00015-of-00050.safetensors", "transformer.h.27.self_attention.query.weight": "model-00015-of-00050.safetensors", "transformer.h.27.self_attention.key_value.weight": "model-00015-of-00050.safetensors", "transformer.h.27.mlp.gate_proj.weight": "model-00015-of-00050.safetensors", "transformer.h.27.mlp.down_proj.weight": "model-00015-of-00050.safetensors", "transformer.h.27.mlp.down_proj.bias": "model-00015-of-00050.safetensors", "transformer.h.27.mlp.up_proj.weight": "model-00015-of-00050.safetensors", "transformer.h.28.input_layernorm.weight": "model-00015-of-00050.safetensors", "transformer.h.28.post_attention_layernorm.weight": "model-00015-of-00050.safetensors", "transformer.h.28.self_attention.dense.weight": "model-00015-of-00050.safetensors", "transformer.h.28.self_attention.dense.bias": "model-00015-of-00050.safetensors", "transformer.h.28.self_attention.query.weight": "model-00015-of-00050.safetensors", "transformer.h.28.self_attention.key_value.weight": "model-00015-of-00050.safetensors", "transformer.h.28.mlp.gate_proj.weight": "model-00015-of-00050.safetensors", "transformer.h.28.mlp.down_proj.weight": "model-00015-of-00050.safetensors", "transformer.h.28.mlp.down_proj.bias": "model-00015-of-00050.safetensors", "transformer.h.28.mlp.up_proj.weight": "model-00015-of-00050.safetensors", "transformer.h.29.input_layernorm.weight": "model-00016-of-00050.safetensors", "transformer.h.29.post_attention_layernorm.weight": "model-00016-of-00050.safetensors", "transformer.h.29.self_attention.dense.weight": "model-00016-of-00050.safetensors", "transformer.h.29.self_attention.dense.bias": "model-00016-of-00050.safetensors", "transformer.h.29.self_attention.query.weight": "model-00016-of-00050.safetensors", "transformer.h.29.self_attention.key_value.weight": "model-00016-of-00050.safetensors", "transformer.h.29.mlp.gate_proj.weight": "model-00016-of-00050.safetensors", "transformer.h.29.mlp.down_proj.weight": "model-00016-of-00050.safetensors", "transformer.h.29.mlp.down_proj.bias": "model-00016-of-00050.safetensors", "transformer.h.29.mlp.up_proj.weight": "model-00016-of-00050.safetensors", "transformer.h.30.input_layernorm.weight": "model-00016-of-00050.safetensors", "transformer.h.30.post_attention_layernorm.weight": "model-00016-of-00050.safetensors", "transformer.h.30.self_attention.dense.weight": "model-00016-of-00050.safetensors", "transformer.h.30.self_attention.dense.bias": "model-00016-of-00050.safetensors", "transformer.h.30.self_attention.query.weight": "model-00016-of-00050.safetensors", "transformer.h.30.self_attention.key_value.weight": "model-00016-of-00050.safetensors", "transformer.h.30.mlp.gate_proj.weight": "model-00016-of-00050.safetensors", "transformer.h.30.mlp.down_proj.weight": "model-00016-of-00050.safetensors", "transformer.h.30.mlp.down_proj.bias": "model-00016-of-00050.safetensors", "transformer.h.30.mlp.up_proj.weight": "model-00016-of-00050.safetensors", "transformer.h.31.input_layernorm.weight": "model-00017-of-00050.safetensors", "transformer.h.31.post_attention_layernorm.weight": "model-00017-of-00050.safetensors", "transformer.h.31.self_attention.dense.weight": "model-00017-of-00050.safetensors", "transformer.h.31.self_attention.dense.bias": "model-00017-of-00050.safetensors", "transformer.h.31.self_attention.query.weight": "model-00017-of-00050.safetensors", "transformer.h.31.self_attention.key_value.weight": "model-00017-of-00050.safetensors", "transformer.h.31.mlp.gate_proj.weight": "model-00017-of-00050.safetensors", "transformer.h.31.mlp.down_proj.weight": "model-00017-of-00050.safetensors", "transformer.h.31.mlp.down_proj.bias": "model-00017-of-00050.safetensors", "transformer.h.31.mlp.up_proj.weight": "model-00017-of-00050.safetensors", "transformer.h.32.input_layernorm.weight": "model-00017-of-00050.safetensors", "transformer.h.32.post_attention_layernorm.weight": "model-00017-of-00050.safetensors", "transformer.h.32.self_attention.dense.weight": "model-00017-of-00050.safetensors", "transformer.h.32.self_attention.dense.bias": "model-00017-of-00050.safetensors", "transformer.h.32.self_attention.query.weight": "model-00017-of-00050.safetensors", "transformer.h.32.self_attention.key_value.weight": "model-00017-of-00050.safetensors", "transformer.h.32.mlp.gate_proj.weight": "model-00017-of-00050.safetensors", "transformer.h.32.mlp.down_proj.weight": "model-00017-of-00050.safetensors", "transformer.h.32.mlp.down_proj.bias": "model-00017-of-00050.safetensors", "transformer.h.32.mlp.up_proj.weight": "model-00017-of-00050.safetensors", "transformer.h.33.input_layernorm.weight": "model-00018-of-00050.safetensors", "transformer.h.33.post_attention_layernorm.weight": "model-00018-of-00050.safetensors", "transformer.h.33.self_attention.dense.weight": "model-00018-of-00050.safetensors", "transformer.h.33.self_attention.dense.bias": "model-00018-of-00050.safetensors", "transformer.h.33.self_attention.query.weight": "model-00018-of-00050.safetensors", "transformer.h.33.self_attention.key_value.weight": "model-00018-of-00050.safetensors", "transformer.h.33.mlp.gate_proj.weight": "model-00018-of-00050.safetensors", "transformer.h.33.mlp.down_proj.weight": "model-00018-of-00050.safetensors", "transformer.h.33.mlp.down_proj.bias": "model-00018-of-00050.safetensors", "transformer.h.33.mlp.up_proj.weight": "model-00018-of-00050.safetensors", "transformer.h.34.input_layernorm.weight": "model-00018-of-00050.safetensors", "transformer.h.34.post_attention_layernorm.weight": "model-00018-of-00050.safetensors", "transformer.h.34.self_attention.dense.weight": "model-00018-of-00050.safetensors", "transformer.h.34.self_attention.dense.bias": "model-00018-of-00050.safetensors", "transformer.h.34.self_attention.query.weight": "model-00018-of-00050.safetensors", "transformer.h.34.self_attention.key_value.weight": "model-00018-of-00050.safetensors", "transformer.h.34.mlp.gate_proj.weight": "model-00018-of-00050.safetensors", "transformer.h.34.mlp.down_proj.weight": "model-00018-of-00050.safetensors", "transformer.h.34.mlp.down_proj.bias": "model-00018-of-00050.safetensors", "transformer.h.34.mlp.up_proj.weight": "model-00018-of-00050.safetensors", "transformer.h.35.input_layernorm.weight": "model-00019-of-00050.safetensors", "transformer.h.35.post_attention_layernorm.weight": "model-00019-of-00050.safetensors", "transformer.h.35.self_attention.dense.weight": "model-00019-of-00050.safetensors", "transformer.h.35.self_attention.dense.bias": "model-00019-of-00050.safetensors", "transformer.h.35.self_attention.query.weight": "model-00019-of-00050.safetensors", "transformer.h.35.self_attention.key_value.weight": "model-00019-of-00050.safetensors", "transformer.h.35.mlp.gate_proj.weight": "model-00019-of-00050.safetensors", "transformer.h.35.mlp.down_proj.weight": "model-00019-of-00050.safetensors", "transformer.h.35.mlp.down_proj.bias": "model-00019-of-00050.safetensors", "transformer.h.35.mlp.up_proj.weight": "model-00019-of-00050.safetensors", "transformer.h.36.input_layernorm.weight": "model-00019-of-00050.safetensors", "transformer.h.36.post_attention_layernorm.weight": "model-00019-of-00050.safetensors", "transformer.h.36.self_attention.dense.weight": "model-00019-of-00050.safetensors", "transformer.h.36.self_attention.dense.bias": "model-00019-of-00050.safetensors", "transformer.h.36.self_attention.query.weight": "model-00019-of-00050.safetensors", "transformer.h.36.self_attention.key_value.weight": "model-00019-of-00050.safetensors", "transformer.h.36.mlp.gate_proj.weight": "model-00019-of-00050.safetensors", "transformer.h.36.mlp.down_proj.weight": "model-00019-of-00050.safetensors", "transformer.h.36.mlp.down_proj.bias": "model-00019-of-00050.safetensors", "transformer.h.36.mlp.up_proj.weight": "model-00019-of-00050.safetensors", "transformer.h.37.input_layernorm.weight": "model-00020-of-00050.safetensors", "transformer.h.37.post_attention_layernorm.weight": "model-00020-of-00050.safetensors", "transformer.h.37.self_attention.dense.weight": "model-00020-of-00050.safetensors", "transformer.h.37.self_attention.dense.bias": "model-00020-of-00050.safetensors", "transformer.h.37.self_attention.query.weight": "model-00020-of-00050.safetensors", "transformer.h.37.self_attention.key_value.weight": "model-00020-of-00050.safetensors", "transformer.h.37.mlp.gate_proj.weight": "model-00020-of-00050.safetensors", "transformer.h.37.mlp.down_proj.weight": "model-00020-of-00050.safetensors", "transformer.h.37.mlp.down_proj.bias": "model-00020-of-00050.safetensors", "transformer.h.37.mlp.up_proj.weight": "model-00020-of-00050.safetensors", "transformer.h.38.input_layernorm.weight": "model-00020-of-00050.safetensors", "transformer.h.38.post_attention_layernorm.weight": "model-00020-of-00050.safetensors", "transformer.h.38.self_attention.dense.weight": "model-00020-of-00050.safetensors", "transformer.h.38.self_attention.dense.bias": "model-00020-of-00050.safetensors", "transformer.h.38.self_attention.query.weight": "model-00020-of-00050.safetensors", "transformer.h.38.self_attention.key_value.weight": "model-00020-of-00050.safetensors", "transformer.h.38.mlp.gate_proj.weight": "model-00020-of-00050.safetensors", "transformer.h.38.mlp.down_proj.weight": "model-00020-of-00050.safetensors", "transformer.h.38.mlp.down_proj.bias": "model-00020-of-00050.safetensors", "transformer.h.38.mlp.up_proj.weight": "model-00020-of-00050.safetensors", "transformer.h.39.input_layernorm.weight": "model-00021-of-00050.safetensors", "transformer.h.39.post_attention_layernorm.weight": "model-00021-of-00050.safetensors", "transformer.h.39.self_attention.dense.weight": "model-00021-of-00050.safetensors", "transformer.h.39.self_attention.dense.bias": "model-00021-of-00050.safetensors", "transformer.h.39.self_attention.query.weight": "model-00021-of-00050.safetensors", "transformer.h.39.self_attention.key_value.weight": "model-00021-of-00050.safetensors", "transformer.h.39.mlp.gate_proj.weight": "model-00021-of-00050.safetensors", "transformer.h.39.mlp.down_proj.weight": "model-00021-of-00050.safetensors", "transformer.h.39.mlp.down_proj.bias": "model-00021-of-00050.safetensors", "transformer.h.39.mlp.up_proj.weight": "model-00021-of-00050.safetensors", "transformer.h.40.input_layernorm.weight": "model-00021-of-00050.safetensors", "transformer.h.40.post_attention_layernorm.weight": "model-00021-of-00050.safetensors", "transformer.h.40.self_attention.dense.weight": "model-00021-of-00050.safetensors", "transformer.h.40.self_attention.dense.bias": "model-00021-of-00050.safetensors", "transformer.h.40.self_attention.query.weight": "model-00021-of-00050.safetensors", "transformer.h.40.self_attention.key_value.weight": "model-00021-of-00050.safetensors", "transformer.h.40.mlp.gate_proj.weight": "model-00021-of-00050.safetensors", "transformer.h.40.mlp.down_proj.weight": "model-00021-of-00050.safetensors", "transformer.h.40.mlp.down_proj.bias": "model-00021-of-00050.safetensors", "transformer.h.40.mlp.up_proj.weight": "model-00021-of-00050.safetensors", "transformer.h.41.input_layernorm.weight": "model-00022-of-00050.safetensors", "transformer.h.41.post_attention_layernorm.weight": "model-00022-of-00050.safetensors", "transformer.h.41.self_attention.dense.weight": "model-00022-of-00050.safetensors", "transformer.h.41.self_attention.dense.bias": "model-00022-of-00050.safetensors", "transformer.h.41.self_attention.query.weight": "model-00022-of-00050.safetensors", "transformer.h.41.self_attention.key_value.weight": "model-00022-of-00050.safetensors", "transformer.h.41.mlp.gate_proj.weight": "model-00022-of-00050.safetensors", "transformer.h.41.mlp.down_proj.weight": "model-00022-of-00050.safetensors", "transformer.h.41.mlp.down_proj.bias": "model-00022-of-00050.safetensors", "transformer.h.41.mlp.up_proj.weight": "model-00022-of-00050.safetensors", "transformer.h.42.input_layernorm.weight": "model-00022-of-00050.safetensors", "transformer.h.42.post_attention_layernorm.weight": "model-00022-of-00050.safetensors", "transformer.h.42.self_attention.dense.weight": "model-00022-of-00050.safetensors", "transformer.h.42.self_attention.dense.bias": "model-00022-of-00050.safetensors", "transformer.h.42.self_attention.query.weight": "model-00022-of-00050.safetensors", "transformer.h.42.self_attention.key_value.weight": "model-00022-of-00050.safetensors", "transformer.h.42.mlp.gate_proj.weight": "model-00022-of-00050.safetensors", "transformer.h.42.mlp.down_proj.weight": "model-00022-of-00050.safetensors", "transformer.h.42.mlp.down_proj.bias": "model-00022-of-00050.safetensors", "transformer.h.42.mlp.up_proj.weight": "model-00022-of-00050.safetensors", "transformer.h.43.input_layernorm.weight": "model-00023-of-00050.safetensors", "transformer.h.43.post_attention_layernorm.weight": "model-00023-of-00050.safetensors", "transformer.h.43.self_attention.dense.weight": "model-00023-of-00050.safetensors", "transformer.h.43.self_attention.dense.bias": "model-00023-of-00050.safetensors", "transformer.h.43.self_attention.query.weight": "model-00023-of-00050.safetensors", "transformer.h.43.self_attention.key_value.weight": "model-00023-of-00050.safetensors", "transformer.h.43.mlp.gate_proj.weight": "model-00023-of-00050.safetensors", "transformer.h.43.mlp.down_proj.weight": "model-00023-of-00050.safetensors", "transformer.h.43.mlp.down_proj.bias": "model-00023-of-00050.safetensors", "transformer.h.43.mlp.up_proj.weight": "model-00023-of-00050.safetensors", "transformer.h.44.input_layernorm.weight": "model-00023-of-00050.safetensors", "transformer.h.44.post_attention_layernorm.weight": "model-00023-of-00050.safetensors", "transformer.h.44.self_attention.dense.weight": "model-00023-of-00050.safetensors", "transformer.h.44.self_attention.dense.bias": "model-00023-of-00050.safetensors", "transformer.h.44.self_attention.query.weight": "model-00023-of-00050.safetensors", "transformer.h.44.self_attention.key_value.weight": "model-00023-of-00050.safetensors", "transformer.h.44.mlp.gate_proj.weight": "model-00023-of-00050.safetensors", "transformer.h.44.mlp.down_proj.weight": "model-00023-of-00050.safetensors", "transformer.h.44.mlp.down_proj.bias": "model-00023-of-00050.safetensors", "transformer.h.44.mlp.up_proj.weight": "model-00023-of-00050.safetensors", "transformer.h.45.input_layernorm.weight": "model-00024-of-00050.safetensors", "transformer.h.45.post_attention_layernorm.weight": "model-00024-of-00050.safetensors", "transformer.h.45.self_attention.dense.weight": "model-00024-of-00050.safetensors", "transformer.h.45.self_attention.dense.bias": "model-00024-of-00050.safetensors", "transformer.h.45.self_attention.query.weight": "model-00024-of-00050.safetensors", "transformer.h.45.self_attention.key_value.weight": "model-00024-of-00050.safetensors", "transformer.h.45.mlp.gate_proj.weight": "model-00024-of-00050.safetensors", "transformer.h.45.mlp.down_proj.weight": "model-00024-of-00050.safetensors", "transformer.h.45.mlp.down_proj.bias": "model-00024-of-00050.safetensors", "transformer.h.45.mlp.up_proj.weight": "model-00024-of-00050.safetensors", "transformer.h.46.input_layernorm.weight": "model-00024-of-00050.safetensors", "transformer.h.46.post_attention_layernorm.weight": "model-00024-of-00050.safetensors", "transformer.h.46.self_attention.dense.weight": "model-00024-of-00050.safetensors", "transformer.h.46.self_attention.dense.bias": "model-00024-of-00050.safetensors", "transformer.h.46.self_attention.query.weight": "model-00024-of-00050.safetensors", "transformer.h.46.self_attention.key_value.weight": "model-00024-of-00050.safetensors", "transformer.h.46.mlp.gate_proj.weight": "model-00024-of-00050.safetensors", "transformer.h.46.mlp.down_proj.weight": "model-00024-of-00050.safetensors", "transformer.h.46.mlp.down_proj.bias": "model-00024-of-00050.safetensors", "transformer.h.46.mlp.up_proj.weight": "model-00024-of-00050.safetensors", "transformer.h.47.input_layernorm.weight": "model-00025-of-00050.safetensors", "transformer.h.47.post_attention_layernorm.weight": "model-00025-of-00050.safetensors", "transformer.h.47.self_attention.dense.weight": "model-00025-of-00050.safetensors", "transformer.h.47.self_attention.dense.bias": "model-00025-of-00050.safetensors", "transformer.h.47.self_attention.query.weight": "model-00025-of-00050.safetensors", "transformer.h.47.self_attention.key_value.weight": "model-00025-of-00050.safetensors", "transformer.h.47.mlp.gate_proj.weight": "model-00025-of-00050.safetensors", "transformer.h.47.mlp.down_proj.weight": "model-00025-of-00050.safetensors", "transformer.h.47.mlp.down_proj.bias": "model-00025-of-00050.safetensors", "transformer.h.47.mlp.up_proj.weight": "model-00025-of-00050.safetensors", "transformer.h.48.input_layernorm.weight": "model-00025-of-00050.safetensors", "transformer.h.48.post_attention_layernorm.weight": "model-00025-of-00050.safetensors", "transformer.h.48.self_attention.dense.weight": "model-00025-of-00050.safetensors", "transformer.h.48.self_attention.dense.bias": "model-00025-of-00050.safetensors", "transformer.h.48.self_attention.query.weight": "model-00025-of-00050.safetensors", "transformer.h.48.self_attention.key_value.weight": "model-00025-of-00050.safetensors", "transformer.h.48.mlp.gate_proj.weight": "model-00025-of-00050.safetensors", "transformer.h.48.mlp.down_proj.weight": "model-00025-of-00050.safetensors", "transformer.h.48.mlp.down_proj.bias": "model-00025-of-00050.safetensors", "transformer.h.48.mlp.up_proj.weight": "model-00025-of-00050.safetensors", "transformer.h.49.input_layernorm.weight": "model-00026-of-00050.safetensors", "transformer.h.49.post_attention_layernorm.weight": "model-00026-of-00050.safetensors", "transformer.h.49.self_attention.dense.weight": "model-00026-of-00050.safetensors", "transformer.h.49.self_attention.dense.bias": "model-00026-of-00050.safetensors", "transformer.h.49.self_attention.query.weight": "model-00026-of-00050.safetensors", "transformer.h.49.self_attention.key_value.weight": "model-00026-of-00050.safetensors", "transformer.h.49.mlp.gate_proj.weight": "model-00026-of-00050.safetensors", "transformer.h.49.mlp.down_proj.weight": "model-00026-of-00050.safetensors", "transformer.h.49.mlp.down_proj.bias": "model-00026-of-00050.safetensors", "transformer.h.49.mlp.up_proj.weight": "model-00026-of-00050.safetensors", "transformer.h.50.input_layernorm.weight": "model-00026-of-00050.safetensors", "transformer.h.50.post_attention_layernorm.weight": "model-00026-of-00050.safetensors", "transformer.h.50.self_attention.dense.weight": "model-00026-of-00050.safetensors", "transformer.h.50.self_attention.dense.bias": "model-00026-of-00050.safetensors", "transformer.h.50.self_attention.query.weight": "model-00026-of-00050.safetensors", "transformer.h.50.self_attention.key_value.weight": "model-00026-of-00050.safetensors", "transformer.h.50.mlp.gate_proj.weight": "model-00026-of-00050.safetensors", "transformer.h.50.mlp.down_proj.weight": "model-00026-of-00050.safetensors", "transformer.h.50.mlp.down_proj.bias": "model-00026-of-00050.safetensors", "transformer.h.50.mlp.up_proj.weight": "model-00026-of-00050.safetensors", "transformer.h.51.input_layernorm.weight": "model-00027-of-00050.safetensors", "transformer.h.51.post_attention_layernorm.weight": "model-00027-of-00050.safetensors", "transformer.h.51.self_attention.dense.weight": "model-00027-of-00050.safetensors", "transformer.h.51.self_attention.dense.bias": "model-00027-of-00050.safetensors", "transformer.h.51.self_attention.query.weight": "model-00027-of-00050.safetensors", "transformer.h.51.self_attention.key_value.weight": "model-00027-of-00050.safetensors", "transformer.h.51.mlp.gate_proj.weight": "model-00027-of-00050.safetensors", "transformer.h.51.mlp.down_proj.weight": "model-00027-of-00050.safetensors", "transformer.h.51.mlp.down_proj.bias": "model-00027-of-00050.safetensors", "transformer.h.51.mlp.up_proj.weight": "model-00027-of-00050.safetensors", "transformer.h.52.input_layernorm.weight": "model-00027-of-00050.safetensors", "transformer.h.52.post_attention_layernorm.weight": "model-00027-of-00050.safetensors", "transformer.h.52.self_attention.dense.weight": "model-00027-of-00050.safetensors", "transformer.h.52.self_attention.dense.bias": "model-00027-of-00050.safetensors", "transformer.h.52.self_attention.query.weight": "model-00027-of-00050.safetensors", "transformer.h.52.self_attention.key_value.weight": "model-00027-of-00050.safetensors", "transformer.h.52.mlp.gate_proj.weight": "model-00027-of-00050.safetensors", "transformer.h.52.mlp.down_proj.weight": "model-00027-of-00050.safetensors", "transformer.h.52.mlp.down_proj.bias": "model-00027-of-00050.safetensors", "transformer.h.52.mlp.up_proj.weight": "model-00027-of-00050.safetensors", "transformer.h.53.input_layernorm.weight": "model-00028-of-00050.safetensors", "transformer.h.53.post_attention_layernorm.weight": "model-00028-of-00050.safetensors", "transformer.h.53.self_attention.dense.weight": "model-00028-of-00050.safetensors", "transformer.h.53.self_attention.dense.bias": "model-00028-of-00050.safetensors", "transformer.h.53.self_attention.query.weight": "model-00028-of-00050.safetensors", "transformer.h.53.self_attention.key_value.weight": "model-00028-of-00050.safetensors", "transformer.h.53.mlp.gate_proj.weight": "model-00028-of-00050.safetensors", "transformer.h.53.mlp.down_proj.weight": "model-00028-of-00050.safetensors", "transformer.h.53.mlp.down_proj.bias": "model-00028-of-00050.safetensors", "transformer.h.53.mlp.up_proj.weight": "model-00028-of-00050.safetensors", "transformer.h.54.input_layernorm.weight": "model-00028-of-00050.safetensors", "transformer.h.54.post_attention_layernorm.weight": "model-00028-of-00050.safetensors", "transformer.h.54.self_attention.dense.weight": "model-00028-of-00050.safetensors", "transformer.h.54.self_attention.dense.bias": "model-00028-of-00050.safetensors", "transformer.h.54.self_attention.query.weight": "model-00028-of-00050.safetensors", "transformer.h.54.self_attention.key_value.weight": "model-00028-of-00050.safetensors", "transformer.h.54.mlp.gate_proj.weight": "model-00028-of-00050.safetensors", "transformer.h.54.mlp.down_proj.weight": "model-00028-of-00050.safetensors", "transformer.h.54.mlp.down_proj.bias": "model-00028-of-00050.safetensors", "transformer.h.54.mlp.up_proj.weight": "model-00028-of-00050.safetensors", "transformer.h.55.input_layernorm.weight": "model-00029-of-00050.safetensors", "transformer.h.55.post_attention_layernorm.weight": "model-00029-of-00050.safetensors", "transformer.h.55.self_attention.dense.weight": "model-00029-of-00050.safetensors", "transformer.h.55.self_attention.dense.bias": "model-00029-of-00050.safetensors", "transformer.h.55.self_attention.query.weight": "model-00029-of-00050.safetensors", "transformer.h.55.self_attention.key_value.weight": "model-00029-of-00050.safetensors", "transformer.h.55.mlp.gate_proj.weight": "model-00029-of-00050.safetensors", "transformer.h.55.mlp.down_proj.weight": "model-00029-of-00050.safetensors", "transformer.h.55.mlp.down_proj.bias": "model-00029-of-00050.safetensors", "transformer.h.55.mlp.up_proj.weight": "model-00029-of-00050.safetensors", "transformer.h.56.input_layernorm.weight": "model-00029-of-00050.safetensors", "transformer.h.56.post_attention_layernorm.weight": "model-00029-of-00050.safetensors", "transformer.h.56.self_attention.dense.weight": "model-00029-of-00050.safetensors", "transformer.h.56.self_attention.dense.bias": "model-00029-of-00050.safetensors", "transformer.h.56.self_attention.query.weight": "model-00029-of-00050.safetensors", "transformer.h.56.self_attention.key_value.weight": "model-00029-of-00050.safetensors", "transformer.h.56.mlp.gate_proj.weight": "model-00029-of-00050.safetensors", "transformer.h.56.mlp.down_proj.weight": "model-00029-of-00050.safetensors", "transformer.h.56.mlp.down_proj.bias": "model-00029-of-00050.safetensors", "transformer.h.56.mlp.up_proj.weight": "model-00029-of-00050.safetensors", "transformer.h.57.input_layernorm.weight": "model-00030-of-00050.safetensors", "transformer.h.57.post_attention_layernorm.weight": "model-00030-of-00050.safetensors", "transformer.h.57.self_attention.dense.weight": "model-00030-of-00050.safetensors", "transformer.h.57.self_attention.dense.bias": "model-00030-of-00050.safetensors", "transformer.h.57.self_attention.query.weight": "model-00030-of-00050.safetensors", "transformer.h.57.self_attention.key_value.weight": "model-00030-of-00050.safetensors", "transformer.h.57.mlp.gate_proj.weight": "model-00030-of-00050.safetensors", "transformer.h.57.mlp.down_proj.weight": "model-00030-of-00050.safetensors", "transformer.h.57.mlp.down_proj.bias": "model-00030-of-00050.safetensors", "transformer.h.57.mlp.up_proj.weight": "model-00030-of-00050.safetensors", "transformer.h.58.input_layernorm.weight": "model-00030-of-00050.safetensors", "transformer.h.58.post_attention_layernorm.weight": "model-00030-of-00050.safetensors", "transformer.h.58.self_attention.dense.weight": "model-00030-of-00050.safetensors", "transformer.h.58.self_attention.dense.bias": "model-00030-of-00050.safetensors", "transformer.h.58.self_attention.query.weight": "model-00030-of-00050.safetensors", "transformer.h.58.self_attention.key_value.weight": "model-00030-of-00050.safetensors", "transformer.h.58.mlp.gate_proj.weight": "model-00030-of-00050.safetensors", "transformer.h.58.mlp.down_proj.weight": "model-00030-of-00050.safetensors", "transformer.h.58.mlp.down_proj.bias": "model-00030-of-00050.safetensors", "transformer.h.58.mlp.up_proj.weight": "model-00030-of-00050.safetensors", "transformer.h.59.input_layernorm.weight": "model-00031-of-00050.safetensors", "transformer.h.59.post_attention_layernorm.weight": "model-00031-of-00050.safetensors", "transformer.h.59.self_attention.dense.weight": "model-00031-of-00050.safetensors", "transformer.h.59.self_attention.dense.bias": "model-00031-of-00050.safetensors", "transformer.h.59.self_attention.query.weight": "model-00031-of-00050.safetensors", "transformer.h.59.self_attention.key_value.weight": "model-00031-of-00050.safetensors", "transformer.h.59.mlp.gate_proj.weight": "model-00031-of-00050.safetensors", "transformer.h.59.mlp.down_proj.weight": "model-00031-of-00050.safetensors", "transformer.h.59.mlp.down_proj.bias": "model-00031-of-00050.safetensors", "transformer.h.59.mlp.up_proj.weight": "model-00031-of-00050.safetensors", "transformer.h.60.input_layernorm.weight": "model-00031-of-00050.safetensors", "transformer.h.60.post_attention_layernorm.weight": "model-00031-of-00050.safetensors", "transformer.h.60.self_attention.dense.weight": "model-00031-of-00050.safetensors", "transformer.h.60.self_attention.dense.bias": "model-00031-of-00050.safetensors", "transformer.h.60.self_attention.query.weight": "model-00031-of-00050.safetensors", "transformer.h.60.self_attention.key_value.weight": "model-00031-of-00050.safetensors", "transformer.h.60.mlp.gate_proj.weight": "model-00031-of-00050.safetensors", "transformer.h.60.mlp.down_proj.weight": "model-00031-of-00050.safetensors", "transformer.h.60.mlp.down_proj.bias": "model-00031-of-00050.safetensors", "transformer.h.60.mlp.up_proj.weight": "model-00031-of-00050.safetensors", "transformer.h.61.input_layernorm.weight": "model-00032-of-00050.safetensors", "transformer.h.61.post_attention_layernorm.weight": "model-00032-of-00050.safetensors", "transformer.h.61.self_attention.dense.weight": "model-00032-of-00050.safetensors", "transformer.h.61.self_attention.dense.bias": "model-00032-of-00050.safetensors", "transformer.h.61.self_attention.query.weight": "model-00032-of-00050.safetensors", "transformer.h.61.self_attention.key_value.weight": "model-00032-of-00050.safetensors", "transformer.h.61.mlp.gate_proj.weight": "model-00032-of-00050.safetensors", "transformer.h.61.mlp.down_proj.weight": "model-00032-of-00050.safetensors", "transformer.h.61.mlp.down_proj.bias": "model-00032-of-00050.safetensors", "transformer.h.61.mlp.up_proj.weight": "model-00032-of-00050.safetensors", "transformer.h.62.input_layernorm.weight": "model-00032-of-00050.safetensors", "transformer.h.62.post_attention_layernorm.weight": "model-00032-of-00050.safetensors", "transformer.h.62.self_attention.dense.weight": "model-00032-of-00050.safetensors", "transformer.h.62.self_attention.dense.bias": "model-00032-of-00050.safetensors", "transformer.h.62.self_attention.query.weight": "model-00032-of-00050.safetensors", "transformer.h.62.self_attention.key_value.weight": "model-00032-of-00050.safetensors", "transformer.h.62.mlp.gate_proj.weight": "model-00032-of-00050.safetensors", "transformer.h.62.mlp.down_proj.weight": "model-00032-of-00050.safetensors", "transformer.h.62.mlp.down_proj.bias": "model-00032-of-00050.safetensors", "transformer.h.62.mlp.up_proj.weight": "model-00032-of-00050.safetensors", "transformer.h.63.input_layernorm.weight": "model-00033-of-00050.safetensors", "transformer.h.63.post_attention_layernorm.weight": "model-00033-of-00050.safetensors", "transformer.h.63.self_attention.dense.weight": "model-00033-of-00050.safetensors", "transformer.h.63.self_attention.dense.bias": "model-00033-of-00050.safetensors", "transformer.h.63.self_attention.query.weight": "model-00033-of-00050.safetensors", "transformer.h.63.self_attention.key_value.weight": "model-00033-of-00050.safetensors", "transformer.h.63.mlp.gate_proj.weight": "model-00033-of-00050.safetensors", "transformer.h.63.mlp.down_proj.weight": "model-00033-of-00050.safetensors", "transformer.h.63.mlp.down_proj.bias": "model-00033-of-00050.safetensors", "transformer.h.63.mlp.up_proj.weight": "model-00033-of-00050.safetensors", "transformer.h.64.input_layernorm.weight": "model-00033-of-00050.safetensors", "transformer.h.64.post_attention_layernorm.weight": "model-00033-of-00050.safetensors", "transformer.h.64.self_attention.dense.weight": "model-00033-of-00050.safetensors", "transformer.h.64.self_attention.dense.bias": "model-00033-of-00050.safetensors", "transformer.h.64.self_attention.query.weight": "model-00033-of-00050.safetensors", "transformer.h.64.self_attention.key_value.weight": "model-00033-of-00050.safetensors", "transformer.h.64.mlp.gate_proj.weight": "model-00033-of-00050.safetensors", "transformer.h.64.mlp.down_proj.weight": "model-00033-of-00050.safetensors", "transformer.h.64.mlp.down_proj.bias": "model-00033-of-00050.safetensors", "transformer.h.64.mlp.up_proj.weight": "model-00033-of-00050.safetensors", "transformer.h.65.input_layernorm.weight": "model-00034-of-00050.safetensors", "transformer.h.65.post_attention_layernorm.weight": "model-00034-of-00050.safetensors", "transformer.h.65.self_attention.dense.weight": "model-00034-of-00050.safetensors", "transformer.h.65.self_attention.dense.bias": "model-00034-of-00050.safetensors", "transformer.h.65.self_attention.query.weight": "model-00034-of-00050.safetensors", "transformer.h.65.self_attention.key_value.weight": "model-00034-of-00050.safetensors", "transformer.h.65.mlp.gate_proj.weight": "model-00034-of-00050.safetensors", "transformer.h.65.mlp.down_proj.weight": "model-00034-of-00050.safetensors", "transformer.h.65.mlp.down_proj.bias": "model-00034-of-00050.safetensors", "transformer.h.65.mlp.up_proj.weight": "model-00034-of-00050.safetensors", "transformer.h.66.input_layernorm.weight": "model-00034-of-00050.safetensors", "transformer.h.66.post_attention_layernorm.weight": "model-00034-of-00050.safetensors", "transformer.h.66.self_attention.dense.weight": "model-00034-of-00050.safetensors", "transformer.h.66.self_attention.dense.bias": "model-00034-of-00050.safetensors", "transformer.h.66.self_attention.query.weight": "model-00034-of-00050.safetensors", "transformer.h.66.self_attention.key_value.weight": "model-00034-of-00050.safetensors", "transformer.h.66.mlp.gate_proj.weight": "model-00034-of-00050.safetensors", "transformer.h.66.mlp.down_proj.weight": "model-00034-of-00050.safetensors", "transformer.h.66.mlp.down_proj.bias": "model-00034-of-00050.safetensors", "transformer.h.66.mlp.up_proj.weight": "model-00034-of-00050.safetensors", "transformer.h.67.input_layernorm.weight": "model-00035-of-00050.safetensors", "transformer.h.67.post_attention_layernorm.weight": "model-00035-of-00050.safetensors", "transformer.h.67.self_attention.dense.weight": "model-00035-of-00050.safetensors", "transformer.h.67.self_attention.dense.bias": "model-00035-of-00050.safetensors", "transformer.h.67.self_attention.query.weight": "model-00035-of-00050.safetensors", "transformer.h.67.self_attention.key_value.weight": "model-00035-of-00050.safetensors", "transformer.h.67.mlp.gate_proj.weight": "model-00035-of-00050.safetensors", "transformer.h.67.mlp.down_proj.weight": "model-00035-of-00050.safetensors", "transformer.h.67.mlp.down_proj.bias": "model-00035-of-00050.safetensors", "transformer.h.67.mlp.up_proj.weight": "model-00035-of-00050.safetensors", "transformer.h.68.input_layernorm.weight": "model-00035-of-00050.safetensors", "transformer.h.68.post_attention_layernorm.weight": "model-00035-of-00050.safetensors", "transformer.h.68.self_attention.dense.weight": "model-00035-of-00050.safetensors", "transformer.h.68.self_attention.dense.bias": "model-00035-of-00050.safetensors", "transformer.h.68.self_attention.query.weight": "model-00035-of-00050.safetensors", "transformer.h.68.self_attention.key_value.weight": "model-00035-of-00050.safetensors", "transformer.h.68.mlp.gate_proj.weight": "model-00035-of-00050.safetensors", "transformer.h.68.mlp.down_proj.weight": "model-00035-of-00050.safetensors", "transformer.h.68.mlp.down_proj.bias": "model-00035-of-00050.safetensors", "transformer.h.68.mlp.up_proj.weight": "model-00035-of-00050.safetensors", "transformer.h.69.input_layernorm.weight": "model-00036-of-00050.safetensors", "transformer.h.69.post_attention_layernorm.weight": "model-00036-of-00050.safetensors", "transformer.h.69.self_attention.dense.weight": "model-00036-of-00050.safetensors", "transformer.h.69.self_attention.dense.bias": "model-00036-of-00050.safetensors", "transformer.h.69.self_attention.query.weight": "model-00036-of-00050.safetensors", "transformer.h.69.self_attention.key_value.weight": "model-00036-of-00050.safetensors", "transformer.h.69.mlp.gate_proj.weight": "model-00036-of-00050.safetensors", "transformer.h.69.mlp.down_proj.weight": "model-00036-of-00050.safetensors", "transformer.h.69.mlp.down_proj.bias": "model-00036-of-00050.safetensors", "transformer.h.69.mlp.up_proj.weight": "model-00036-of-00050.safetensors", "transformer.h.70.input_layernorm.weight": "model-00036-of-00050.safetensors", "transformer.h.70.post_attention_layernorm.weight": "model-00036-of-00050.safetensors", "transformer.h.70.self_attention.dense.weight": "model-00036-of-00050.safetensors", "transformer.h.70.self_attention.dense.bias": "model-00036-of-00050.safetensors", "transformer.h.70.self_attention.query.weight": "model-00036-of-00050.safetensors", "transformer.h.70.self_attention.key_value.weight": "model-00036-of-00050.safetensors", "transformer.h.70.mlp.gate_proj.weight": "model-00036-of-00050.safetensors", "transformer.h.70.mlp.down_proj.weight": "model-00036-of-00050.safetensors", "transformer.h.70.mlp.down_proj.bias": "model-00036-of-00050.safetensors", "transformer.h.70.mlp.up_proj.weight": "model-00036-of-00050.safetensors", "transformer.h.71.input_layernorm.weight": "model-00037-of-00050.safetensors", "transformer.h.71.post_attention_layernorm.weight": "model-00037-of-00050.safetensors", "transformer.h.71.self_attention.dense.weight": "model-00037-of-00050.safetensors", "transformer.h.71.self_attention.dense.bias": "model-00037-of-00050.safetensors", "transformer.h.71.self_attention.query.weight": "model-00037-of-00050.safetensors", "transformer.h.71.self_attention.key_value.weight": "model-00037-of-00050.safetensors", "transformer.h.71.mlp.gate_proj.weight": "model-00037-of-00050.safetensors", "transformer.h.71.mlp.down_proj.weight": "model-00037-of-00050.safetensors", "transformer.h.71.mlp.down_proj.bias": "model-00037-of-00050.safetensors", "transformer.h.71.mlp.up_proj.weight": "model-00037-of-00050.safetensors", "transformer.h.72.input_layernorm.weight": "model-00037-of-00050.safetensors", "transformer.h.72.post_attention_layernorm.weight": "model-00037-of-00050.safetensors", "transformer.h.72.self_attention.dense.weight": "model-00037-of-00050.safetensors", "transformer.h.72.self_attention.dense.bias": "model-00037-of-00050.safetensors", "transformer.h.72.self_attention.query.weight": "model-00037-of-00050.safetensors", "transformer.h.72.self_attention.key_value.weight": "model-00037-of-00050.safetensors", "transformer.h.72.mlp.gate_proj.weight": "model-00037-of-00050.safetensors", "transformer.h.72.mlp.down_proj.weight": "model-00037-of-00050.safetensors", "transformer.h.72.mlp.down_proj.bias": "model-00037-of-00050.safetensors", "transformer.h.72.mlp.up_proj.weight": "model-00037-of-00050.safetensors", "transformer.h.73.input_layernorm.weight": "model-00038-of-00050.safetensors", "transformer.h.73.post_attention_layernorm.weight": "model-00038-of-00050.safetensors", "transformer.h.73.self_attention.dense.weight": "model-00038-of-00050.safetensors", "transformer.h.73.self_attention.dense.bias": "model-00038-of-00050.safetensors", "transformer.h.73.self_attention.query.weight": "model-00038-of-00050.safetensors", "transformer.h.73.self_attention.key_value.weight": "model-00038-of-00050.safetensors", "transformer.h.73.mlp.gate_proj.weight": "model-00038-of-00050.safetensors", "transformer.h.73.mlp.down_proj.weight": "model-00038-of-00050.safetensors", "transformer.h.73.mlp.down_proj.bias": "model-00038-of-00050.safetensors", "transformer.h.73.mlp.up_proj.weight": "model-00038-of-00050.safetensors", "transformer.h.74.input_layernorm.weight": "model-00038-of-00050.safetensors", "transformer.h.74.post_attention_layernorm.weight": "model-00038-of-00050.safetensors", "transformer.h.74.self_attention.dense.weight": "model-00038-of-00050.safetensors", "transformer.h.74.self_attention.dense.bias": "model-00038-of-00050.safetensors", "transformer.h.74.self_attention.query.weight": "model-00038-of-00050.safetensors", "transformer.h.74.self_attention.key_value.weight": "model-00038-of-00050.safetensors", "transformer.h.74.mlp.gate_proj.weight": "model-00038-of-00050.safetensors", "transformer.h.74.mlp.down_proj.weight": "model-00038-of-00050.safetensors", "transformer.h.74.mlp.down_proj.bias": "model-00038-of-00050.safetensors", "transformer.h.74.mlp.up_proj.weight": "model-00038-of-00050.safetensors", "transformer.h.75.input_layernorm.weight": "model-00039-of-00050.safetensors", "transformer.h.75.post_attention_layernorm.weight": "model-00039-of-00050.safetensors", "transformer.h.75.self_attention.dense.weight": "model-00039-of-00050.safetensors", "transformer.h.75.self_attention.dense.bias": "model-00039-of-00050.safetensors", "transformer.h.75.self_attention.query.weight": "model-00039-of-00050.safetensors", "transformer.h.75.self_attention.key_value.weight": "model-00039-of-00050.safetensors", "transformer.h.75.mlp.gate_proj.weight": "model-00039-of-00050.safetensors", "transformer.h.75.mlp.down_proj.weight": "model-00039-of-00050.safetensors", "transformer.h.75.mlp.down_proj.bias": "model-00039-of-00050.safetensors", "transformer.h.75.mlp.up_proj.weight": "model-00039-of-00050.safetensors", "transformer.h.76.input_layernorm.weight": "model-00039-of-00050.safetensors", "transformer.h.76.post_attention_layernorm.weight": "model-00039-of-00050.safetensors", "transformer.h.76.self_attention.dense.weight": "model-00039-of-00050.safetensors", "transformer.h.76.self_attention.dense.bias": "model-00039-of-00050.safetensors", "transformer.h.76.self_attention.query.weight": "model-00039-of-00050.safetensors", "transformer.h.76.self_attention.key_value.weight": "model-00039-of-00050.safetensors", "transformer.h.76.mlp.gate_proj.weight": "model-00039-of-00050.safetensors", "transformer.h.76.mlp.down_proj.weight": "model-00039-of-00050.safetensors", "transformer.h.76.mlp.down_proj.bias": "model-00039-of-00050.safetensors", "transformer.h.76.mlp.up_proj.weight": "model-00039-of-00050.safetensors", "transformer.h.77.input_layernorm.weight": "model-00040-of-00050.safetensors", "transformer.h.77.post_attention_layernorm.weight": "model-00040-of-00050.safetensors", "transformer.h.77.self_attention.dense.weight": "model-00040-of-00050.safetensors", "transformer.h.77.self_attention.dense.bias": "model-00040-of-00050.safetensors", "transformer.h.77.self_attention.query.weight": "model-00040-of-00050.safetensors", "transformer.h.77.self_attention.key_value.weight": "model-00040-of-00050.safetensors", "transformer.h.77.mlp.gate_proj.weight": "model-00040-of-00050.safetensors", "transformer.h.77.mlp.down_proj.weight": "model-00040-of-00050.safetensors", "transformer.h.77.mlp.down_proj.bias": "model-00040-of-00050.safetensors", "transformer.h.77.mlp.up_proj.weight": "model-00040-of-00050.safetensors", "transformer.h.78.input_layernorm.weight": "model-00040-of-00050.safetensors", "transformer.h.78.post_attention_layernorm.weight": "model-00040-of-00050.safetensors", "transformer.h.78.self_attention.dense.weight": "model-00040-of-00050.safetensors", "transformer.h.78.self_attention.dense.bias": "model-00040-of-00050.safetensors", "transformer.h.78.self_attention.query.weight": "model-00040-of-00050.safetensors", "transformer.h.78.self_attention.key_value.weight": "model-00040-of-00050.safetensors", "transformer.h.78.mlp.gate_proj.weight": "model-00040-of-00050.safetensors", "transformer.h.78.mlp.down_proj.weight": "model-00040-of-00050.safetensors", "transformer.h.78.mlp.down_proj.bias": "model-00040-of-00050.safetensors", "transformer.h.78.mlp.up_proj.weight": "model-00040-of-00050.safetensors", "transformer.h.79.input_layernorm.weight": "model-00041-of-00050.safetensors", "transformer.h.79.post_attention_layernorm.weight": "model-00041-of-00050.safetensors", "transformer.h.79.self_attention.dense.weight": "model-00041-of-00050.safetensors", "transformer.h.79.self_attention.dense.bias": "model-00041-of-00050.safetensors", "transformer.h.79.self_attention.query.weight": "model-00041-of-00050.safetensors", "transformer.h.79.self_attention.key_value.weight": "model-00041-of-00050.safetensors", "transformer.h.79.mlp.gate_proj.weight": "model-00041-of-00050.safetensors", "transformer.h.79.mlp.down_proj.weight": "model-00041-of-00050.safetensors", "transformer.h.79.mlp.down_proj.bias": "model-00041-of-00050.safetensors", "transformer.h.79.mlp.up_proj.weight": "model-00041-of-00050.safetensors", "transformer.h.80.input_layernorm.weight": "model-00041-of-00050.safetensors", "transformer.h.80.post_attention_layernorm.weight": "model-00041-of-00050.safetensors", "transformer.h.80.self_attention.dense.weight": "model-00041-of-00050.safetensors", "transformer.h.80.self_attention.dense.bias": "model-00041-of-00050.safetensors", "transformer.h.80.self_attention.query.weight": "model-00041-of-00050.safetensors", "transformer.h.80.self_attention.key_value.weight": "model-00041-of-00050.safetensors", "transformer.h.80.mlp.gate_proj.weight": "model-00041-of-00050.safetensors", "transformer.h.80.mlp.down_proj.weight": "model-00041-of-00050.safetensors", "transformer.h.80.mlp.down_proj.bias": "model-00041-of-00050.safetensors", "transformer.h.80.mlp.up_proj.weight": "model-00041-of-00050.safetensors", "transformer.h.81.input_layernorm.weight": "model-00042-of-00050.safetensors", "transformer.h.81.post_attention_layernorm.weight": "model-00042-of-00050.safetensors", "transformer.h.81.self_attention.dense.weight": "model-00042-of-00050.safetensors", "transformer.h.81.self_attention.dense.bias": "model-00042-of-00050.safetensors", "transformer.h.81.self_attention.query.weight": "model-00042-of-00050.safetensors", "transformer.h.81.self_attention.key_value.weight": "model-00042-of-00050.safetensors", "transformer.h.81.mlp.gate_proj.weight": "model-00042-of-00050.safetensors", "transformer.h.81.mlp.down_proj.weight": "model-00042-of-00050.safetensors", "transformer.h.81.mlp.down_proj.bias": "model-00042-of-00050.safetensors", "transformer.h.81.mlp.up_proj.weight": "model-00042-of-00050.safetensors", "transformer.h.82.input_layernorm.weight": "model-00042-of-00050.safetensors", "transformer.h.82.post_attention_layernorm.weight": "model-00042-of-00050.safetensors", "transformer.h.82.self_attention.dense.weight": "model-00042-of-00050.safetensors", "transformer.h.82.self_attention.dense.bias": "model-00042-of-00050.safetensors", "transformer.h.82.self_attention.query.weight": "model-00042-of-00050.safetensors", "transformer.h.82.self_attention.key_value.weight": "model-00042-of-00050.safetensors", "transformer.h.82.mlp.gate_proj.weight": "model-00042-of-00050.safetensors", "transformer.h.82.mlp.down_proj.weight": "model-00042-of-00050.safetensors", "transformer.h.82.mlp.down_proj.bias": "model-00042-of-00050.safetensors", "transformer.h.82.mlp.up_proj.weight": "model-00042-of-00050.safetensors", "transformer.h.83.input_layernorm.weight": "model-00043-of-00050.safetensors", "transformer.h.83.post_attention_layernorm.weight": "model-00043-of-00050.safetensors", "transformer.h.83.self_attention.dense.weight": "model-00043-of-00050.safetensors", "transformer.h.83.self_attention.dense.bias": "model-00043-of-00050.safetensors", "transformer.h.83.self_attention.query.weight": "model-00043-of-00050.safetensors", "transformer.h.83.self_attention.key_value.weight": "model-00043-of-00050.safetensors", "transformer.h.83.mlp.gate_proj.weight": "model-00043-of-00050.safetensors", "transformer.h.83.mlp.down_proj.weight": "model-00043-of-00050.safetensors", "transformer.h.83.mlp.down_proj.bias": "model-00043-of-00050.safetensors", "transformer.h.83.mlp.up_proj.weight": "model-00043-of-00050.safetensors", "transformer.h.84.input_layernorm.weight": "model-00043-of-00050.safetensors", "transformer.h.84.post_attention_layernorm.weight": "model-00043-of-00050.safetensors", "transformer.h.84.self_attention.dense.weight": "model-00043-of-00050.safetensors", "transformer.h.84.self_attention.dense.bias": "model-00043-of-00050.safetensors", "transformer.h.84.self_attention.query.weight": "model-00043-of-00050.safetensors", "transformer.h.84.self_attention.key_value.weight": "model-00043-of-00050.safetensors", "transformer.h.84.mlp.gate_proj.weight": "model-00043-of-00050.safetensors", "transformer.h.84.mlp.down_proj.weight": "model-00043-of-00050.safetensors", "transformer.h.84.mlp.down_proj.bias": "model-00043-of-00050.safetensors", "transformer.h.84.mlp.up_proj.weight": "model-00043-of-00050.safetensors", "transformer.h.85.input_layernorm.weight": "model-00044-of-00050.safetensors", "transformer.h.85.post_attention_layernorm.weight": "model-00044-of-00050.safetensors", "transformer.h.85.self_attention.dense.weight": "model-00044-of-00050.safetensors", "transformer.h.85.self_attention.dense.bias": "model-00044-of-00050.safetensors", "transformer.h.85.self_attention.query.weight": "model-00044-of-00050.safetensors", "transformer.h.85.self_attention.key_value.weight": "model-00044-of-00050.safetensors", "transformer.h.85.mlp.gate_proj.weight": "model-00044-of-00050.safetensors", "transformer.h.85.mlp.down_proj.weight": "model-00044-of-00050.safetensors", "transformer.h.85.mlp.down_proj.bias": "model-00044-of-00050.safetensors", "transformer.h.85.mlp.up_proj.weight": "model-00044-of-00050.safetensors", "transformer.h.86.input_layernorm.weight": "model-00044-of-00050.safetensors", "transformer.h.86.post_attention_layernorm.weight": "model-00044-of-00050.safetensors", "transformer.h.86.self_attention.dense.weight": "model-00044-of-00050.safetensors", "transformer.h.86.self_attention.dense.bias": "model-00044-of-00050.safetensors", "transformer.h.86.self_attention.query.weight": "model-00044-of-00050.safetensors", "transformer.h.86.self_attention.key_value.weight": "model-00044-of-00050.safetensors", "transformer.h.86.mlp.gate_proj.weight": "model-00044-of-00050.safetensors", "transformer.h.86.mlp.down_proj.weight": "model-00044-of-00050.safetensors", "transformer.h.86.mlp.down_proj.bias": "model-00044-of-00050.safetensors", "transformer.h.86.mlp.up_proj.weight": "model-00044-of-00050.safetensors", "transformer.h.87.input_layernorm.weight": "model-00045-of-00050.safetensors", "transformer.h.87.post_attention_layernorm.weight": "model-00045-of-00050.safetensors", "transformer.h.87.self_attention.dense.weight": "model-00045-of-00050.safetensors", "transformer.h.87.self_attention.dense.bias": "model-00045-of-00050.safetensors", "transformer.h.87.self_attention.query.weight": "model-00045-of-00050.safetensors", "transformer.h.87.self_attention.key_value.weight": "model-00045-of-00050.safetensors", "transformer.h.87.mlp.gate_proj.weight": "model-00045-of-00050.safetensors", "transformer.h.87.mlp.down_proj.weight": "model-00045-of-00050.safetensors", "transformer.h.87.mlp.down_proj.bias": "model-00045-of-00050.safetensors", "transformer.h.87.mlp.up_proj.weight": "model-00045-of-00050.safetensors", "transformer.h.88.input_layernorm.weight": "model-00045-of-00050.safetensors", "transformer.h.88.post_attention_layernorm.weight": "model-00045-of-00050.safetensors", "transformer.h.88.self_attention.dense.weight": "model-00045-of-00050.safetensors", "transformer.h.88.self_attention.dense.bias": "model-00045-of-00050.safetensors", "transformer.h.88.self_attention.query.weight": "model-00045-of-00050.safetensors", "transformer.h.88.self_attention.key_value.weight": "model-00045-of-00050.safetensors", "transformer.h.88.mlp.gate_proj.weight": "model-00045-of-00050.safetensors", "transformer.h.88.mlp.down_proj.weight": "model-00045-of-00050.safetensors", "transformer.h.88.mlp.down_proj.bias": "model-00045-of-00050.safetensors", "transformer.h.88.mlp.up_proj.weight": "model-00045-of-00050.safetensors", "transformer.h.89.input_layernorm.weight": "model-00046-of-00050.safetensors", "transformer.h.89.post_attention_layernorm.weight": "model-00046-of-00050.safetensors", "transformer.h.89.self_attention.dense.weight": "model-00046-of-00050.safetensors", "transformer.h.89.self_attention.dense.bias": "model-00046-of-00050.safetensors", "transformer.h.89.self_attention.query.weight": "model-00046-of-00050.safetensors", "transformer.h.89.self_attention.key_value.weight": "model-00046-of-00050.safetensors", "transformer.h.89.mlp.gate_proj.weight": "model-00046-of-00050.safetensors", "transformer.h.89.mlp.down_proj.weight": "model-00046-of-00050.safetensors", "transformer.h.89.mlp.down_proj.bias": "model-00046-of-00050.safetensors", "transformer.h.89.mlp.up_proj.weight": "model-00046-of-00050.safetensors", "transformer.h.90.input_layernorm.weight": "model-00046-of-00050.safetensors", "transformer.h.90.post_attention_layernorm.weight": "model-00046-of-00050.safetensors", "transformer.h.90.self_attention.dense.weight": "model-00046-of-00050.safetensors", "transformer.h.90.self_attention.dense.bias": "model-00046-of-00050.safetensors", "transformer.h.90.self_attention.query.weight": "model-00046-of-00050.safetensors", "transformer.h.90.self_attention.key_value.weight": "model-00046-of-00050.safetensors", "transformer.h.90.mlp.gate_proj.weight": "model-00046-of-00050.safetensors", "transformer.h.90.mlp.down_proj.weight": "model-00046-of-00050.safetensors", "transformer.h.90.mlp.down_proj.bias": "model-00046-of-00050.safetensors", "transformer.h.90.mlp.up_proj.weight": "model-00046-of-00050.safetensors", "transformer.h.91.input_layernorm.weight": "model-00047-of-00050.safetensors", "transformer.h.91.post_attention_layernorm.weight": "model-00047-of-00050.safetensors", "transformer.h.91.self_attention.dense.weight": "model-00047-of-00050.safetensors", "transformer.h.91.self_attention.dense.bias": "model-00047-of-00050.safetensors", "transformer.h.91.self_attention.query.weight": "model-00047-of-00050.safetensors", "transformer.h.91.self_attention.key_value.weight": "model-00047-of-00050.safetensors", "transformer.h.91.mlp.gate_proj.weight": "model-00047-of-00050.safetensors", "transformer.h.91.mlp.down_proj.weight": "model-00047-of-00050.safetensors", "transformer.h.91.mlp.down_proj.bias": "model-00047-of-00050.safetensors", "transformer.h.91.mlp.up_proj.weight": "model-00047-of-00050.safetensors", "transformer.h.92.input_layernorm.weight": "model-00047-of-00050.safetensors", "transformer.h.92.post_attention_layernorm.weight": "model-00047-of-00050.safetensors", "transformer.h.92.self_attention.dense.weight": "model-00047-of-00050.safetensors", "transformer.h.92.self_attention.dense.bias": "model-00047-of-00050.safetensors", "transformer.h.92.self_attention.query.weight": "model-00047-of-00050.safetensors", "transformer.h.92.self_attention.key_value.weight": "model-00047-of-00050.safetensors", "transformer.h.92.mlp.gate_proj.weight": "model-00047-of-00050.safetensors", "transformer.h.92.mlp.down_proj.weight": "model-00047-of-00050.safetensors", "transformer.h.92.mlp.down_proj.bias": "model-00047-of-00050.safetensors", "transformer.h.92.mlp.up_proj.weight": "model-00047-of-00050.safetensors", "transformer.h.93.input_layernorm.weight": "model-00048-of-00050.safetensors", "transformer.h.93.post_attention_layernorm.weight": "model-00048-of-00050.safetensors", "transformer.h.93.self_attention.dense.weight": "model-00048-of-00050.safetensors", "transformer.h.93.self_attention.dense.bias": "model-00048-of-00050.safetensors", "transformer.h.93.self_attention.query.weight": "model-00048-of-00050.safetensors", "transformer.h.93.self_attention.key_value.weight": "model-00048-of-00050.safetensors", "transformer.h.93.mlp.gate_proj.weight": "model-00048-of-00050.safetensors", "transformer.h.93.mlp.down_proj.weight": "model-00048-of-00050.safetensors", "transformer.h.93.mlp.down_proj.bias": "model-00048-of-00050.safetensors", "transformer.h.93.mlp.up_proj.weight": "model-00048-of-00050.safetensors", "transformer.h.94.input_layernorm.weight": "model-00048-of-00050.safetensors", "transformer.h.94.post_attention_layernorm.weight": "model-00048-of-00050.safetensors", "transformer.h.94.self_attention.dense.weight": "model-00048-of-00050.safetensors", "transformer.h.94.self_attention.dense.bias": "model-00048-of-00050.safetensors", "transformer.h.94.self_attention.query.weight": "model-00048-of-00050.safetensors", "transformer.h.94.self_attention.key_value.weight": "model-00048-of-00050.safetensors", "transformer.h.94.mlp.gate_proj.weight": "model-00048-of-00050.safetensors", "transformer.h.94.mlp.down_proj.weight": "model-00048-of-00050.safetensors", "transformer.h.94.mlp.down_proj.bias": "model-00048-of-00050.safetensors", "transformer.h.94.mlp.up_proj.weight": "model-00048-of-00050.safetensors", "transformer.h.95.input_layernorm.weight": "model-00049-of-00050.safetensors", "transformer.h.95.post_attention_layernorm.weight": "model-00049-of-00050.safetensors", "transformer.h.95.self_attention.dense.weight": "model-00049-of-00050.safetensors", "transformer.h.95.self_attention.dense.bias": "model-00049-of-00050.safetensors", "transformer.h.95.self_attention.query.weight": "model-00049-of-00050.safetensors", "transformer.h.95.self_attention.key_value.weight": "model-00049-of-00050.safetensors", "transformer.h.95.mlp.gate_proj.weight": "model-00049-of-00050.safetensors", "transformer.h.95.mlp.down_proj.weight": "model-00049-of-00050.safetensors", "transformer.h.95.mlp.down_proj.bias": "model-00049-of-00050.safetensors", "transformer.h.95.mlp.up_proj.weight": "model-00049-of-00050.safetensors", "transformer.ln_f.weight": "model-00050-of-00050.safetensors", "lm_head.weight": "model-00050-of-00050.safetensors"}} \ No newline at end of file diff --git a/modeling_telechat.py b/modeling_telechat.py deleted file mode 100644 index a7e1c8890288de2ea68b5fb6d3f32248d34a39aa..0000000000000000000000000000000000000000 --- a/modeling_telechat.py +++ /dev/null @@ -1,939 +0,0 @@ -# coding=utf-8 -# Copyright 2022 HuggingFace Inc. team and BigScience workshop. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. - -# Copyright (c) 2021 EleutherAI -# This file is based on code by the authors denoted below and has been modified from its original version. -# -# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -"""PyTorch TELECHAT model.""" - -import warnings -from typing import Optional, Tuple, Union, List, Dict -from threading import Thread - -import torch -import math -import copy -from torch import nn -import torch.utils.checkpoint -from torch.nn import functional as F -from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss -from transformers.modeling_outputs import ( - BaseModelOutputWithPastAndCrossAttentions, - CausalLMOutputWithCrossAttentions -) -from transformers.modeling_utils import PreTrainedModel -from transformers.utils import logging -from transformers import GenerationConfig - -from .configuration_telechat import TelechatConfig -from .generation_utils import History, TelechatIterTextStreamer - -logger = logging.get_logger(__name__) - -_CHECKPOINT_FOR_DOC = "telechat" -_CONFIG_FOR_DOC = "TelechatConfig" - -TELECHAT_PRETRAINED_MODEL_ARCHIVE_LIST = [] - -try: - from einops import rearrange -except ImportError: - rearrange = None - -use_flash_attn = True -try: - from flash_attn.flash_attn_interface import flash_attn_unpadded_func -except ImportError: - try: - from flash_attn.flash_attn_interface import flash_attn_varlen_func as flash_attn_unpadded_func - except ImportError: - flash_attn_unpadded_func = None - - -class RotaryEmbedding(torch.nn.Module): - # Extracted from: https://github.com/EleutherAI/gpt-neox - def __init__(self, dim, config, base=10000, precision=torch.half): - super().__init__() - self.config = config - self.dim = dim - self.base = base - self.inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float().half() / dim)).cuda() - self.max_seq_len_cached = None - self.cos_cached = None - self.sin_cached = None - self.precision = precision - - def get_mscale(self, scale=1): - if scale <= 1: - return 1.0 - return 0.1 * math.log(scale) + 1.0 - - def get_ntk_alpha(self, true_seq_len): - context_value = math.log(true_seq_len / self.config.base_seqlen, 2) + 1 - # ntk_alpha = 2 ** context_value - 1 - ntk_alpha = 2 ** math.ceil(context_value) - 1 - ntk_alpha = max(ntk_alpha, 1) - return ntk_alpha - - def forward(self, x, seq_dim=0, seq_len=None): - if seq_len is None: - seq_len = x.shape[seq_dim] - seq_len = max(seq_len, self.config.training_seqlen) - ntk_alpha = self.get_ntk_alpha(seq_len) - self.mscale = float(self.get_mscale(seq_len / self.config.training_seqlen)) - if True: - base = self.base * ntk_alpha ** (self.dim / (self.dim - 2)) - self.inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2, device=x.device).float() / self.dim)) - self.max_seq_len_cached = seq_len - t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype) - freqs = torch.einsum('i,j->ij', t, self.inv_freq) - # Different from paper, but it uses a different permutation in order to obtain the same calculation - emb = torch.cat((freqs, freqs), dim=-1).to(x.device) - if self.precision == torch.bfloat16: - emb = emb.float() - # [sx, 1 (b * np), hn] - self.cos_cached = self.mscale * emb.cos()[:, None, :].half() - self.sin_cached = self.mscale * emb.sin()[:, None, :].half() - if self.precision == torch.bfloat16: - self.cos_cached = self.cos_cached.bfloat16() - self.sin_cached = self.sin_cached.bfloat16() - return self.cos_cached[:seq_len, ...], self.sin_cached[:seq_len, ...] - - -# rotary pos emb helpers: -def rotate_half(x): - x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:] - return torch.cat((-x2, x1), dim=x1.ndim - 1) # dim=-1 triggers a bug in earlier torch versions - - -def apply_rotary_pos_emb_torch(q, k, cos, sin, offset: int = 0): # jitting fails with bf16 - cos, sin = cos[offset:q.shape[0] + offset, ...], sin[offset:q.shape[0] + offset, ...] - return (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin) - - -class MixedFusedRMSNorm(nn.Module): - # Extracted from https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py - def __init__(self, hidden_size, eps=1e-6): - super().__init__() - self.weight = nn.Parameter(torch.ones(hidden_size)) - self.variance_epsilon = eps - - def forward(self, hidden_states): - input_dtype = hidden_states.dtype - hidden_states = hidden_states.to(torch.float32) - variance = hidden_states.pow(2).mean(-1, keepdim=True) - hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) - return self.weight * hidden_states.to(input_dtype) - - -class FlashSelfAttention(torch.nn.Module): - # Extracted from https://github.com/microsoft/Megatron-DeepSpeed/blob/main/megatron/model/transformer.py - """Implement the scaled dot product attention with softmax. - Arguments - --------- - softmax_scale: The temperature to use for the softmax attention. - (default: 1/sqrt(d_keys) where d_keys is computed at - runtime) - attention_dropout: The dropout rate to apply to the attention - (default: 0.0) - """ - - def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0, - device=None, dtype=None): - super().__init__() - assert flash_attn_unpadded_func is not None, ('Please install FlashAttention first, ' - 'e.g., with pip install flash-attn') - assert rearrange is not None, 'Please install einops first, e.g., with pip install einops' - self.causal = causal - self.softmax_scale = softmax_scale - self.dropout_p = attention_dropout - - def forward(self, q, k, v): - """Implements the multihead softmax attention. - Arguments - --------- - q, k, v: The tensor containing the query, key, and value. (B, S, H, D) - """ - assert all((i.dtype in [torch.float16, torch.bfloat16] for i in (q, k, v))) - assert all((i.is_cuda for i in (q, k, v))) - - batch_size, seqlen_q = q.shape[0], q.shape[1] - seqlen_k = k.shape[1] - - q, k, v = [rearrange(x, 'b s ... -> (b s) ...') for x in [q, k, v]] - cu_seqlens_q = torch.arange(0, (batch_size + 1) * seqlen_q, step=seqlen_q, dtype=torch.int32, - device=q.device) - # self.training = False - if self.training: - # during training q,k,v always have same seqlen - assert seqlen_k == seqlen_q - - is_causal = self.causal - cu_seqlens_k = cu_seqlens_q - dropout_p = self.dropout_p - else: - # turn off FA causal mask after first inference autoregressive iteration - # only on first autoregressive step q,k,v have same seqlen - is_causal = seqlen_q == seqlen_k - cu_seqlens_k = torch.arange(0, (batch_size + 1) * seqlen_k, step=seqlen_k, dtype=torch.int32, - device=q.device) - dropout_p = 0 - - output = flash_attn_unpadded_func( - q, k, v, cu_seqlens_q, cu_seqlens_k, seqlen_q, seqlen_k, - dropout_p=dropout_p, - softmax_scale=self.softmax_scale, causal=is_causal - ) - - output = rearrange(output, '(b s) ... -> b s ...', b=batch_size) - return output - - -def _make_causal_mask( - input_ids_shape: torch.Size, device: torch.device, past_key_values_length: int -) -> torch.BoolTensor: - """ - Make causal mask used for self-attention. - """ - batch_size, target_length = input_ids_shape - mask = torch.empty((target_length, target_length + past_key_values_length), dtype=torch.bool, device=device) - # ONNX doesn't support `torch.Tensor.triu` properly, thus we use this workaround - seq_ids = torch.arange(target_length, device=device) - mask[:, past_key_values_length:] = seq_ids[:, None] < seq_ids[None, :] - - if past_key_values_length > 0: - mask[:, :past_key_values_length] = False - - expanded_mask = mask[None, None, :, :].expand(batch_size, 1, target_length, target_length + past_key_values_length) - return expanded_mask - - -def _expand_mask(mask: torch.Tensor, tgt_length: int) -> torch.BoolTensor: - """ - Expands attention_mask from `[batch_size, src_length]` to `[batch_size, 1, tgt_length, src_length]`. - """ - batch_size, src_length = mask.shape - tgt_length = tgt_length if tgt_length is not None else src_length - - expanded_mask = ~(mask[:, None, None, :].to(torch.bool)) - return expanded_mask.expand(batch_size, 1, tgt_length, src_length) - - -def dropout_add(x: torch.Tensor, residual: torch.Tensor, prob: float, training: bool) -> torch.Tensor: - """ - Dropout add function - - Args: - x (`torch.tensor`, *required*): - input tensor - residual (`torch.tensor`, *required*): - residual tensor - prob (`float`, *required*): - dropout probability - training (`bool`, *required*): - training mode - """ - out = F.dropout(x, p=prob, training=training) - out = residual + out - return out - - -def telechat_gelu_forward(x: torch.Tensor) -> torch.Tensor: - """ - Custom bias GELU function. Adapted from Megatron-DeepSpeed code. Here we use a simple implementation (inference) to - make the model jitable. - - Args: - x (`torch.tensor`, *required*): - input hidden states - """ - return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))) - - -def telechat_gelu_back(g: torch.Tensor, x: torch.Tensor) -> torch.Tensor: - """ - gradient of tanh approximation of gelu gradient of actual gelu is: 0.5 * (1. + torch.erf(x * 0.70710678)) + - 0.3989423 * x * torch.exp(-0.5 * x * x) - - Args: - g (`torch.tensor`, *required*): - gradient output tensor - x (`torch.tensor`, *required*): - input tensor - """ - x = x[0] # x is a tuple of 1 element, needs to unpack it first - tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)) - # sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243 - ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (1 + tanh_out) - return ff * g - - -class GeLUFunction(torch.autograd.Function): - @staticmethod - def forward(ctx, input: torch.Tensor) -> torch.Tensor: - ctx.save_for_backward(input) - return telechat_gelu_forward(input) - - @staticmethod - def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor: - input = ctx.saved_tensors - tmp = telechat_gelu_back(grad_output, input) - return tmp - - -class TelechatGelu(nn.Module): - """ - TelechatBiasGelu wrapper function that make use of the simple function on inference mode to make the model - torchscriptable and use the autograd function in training mode to get the accurate results of the gradients Partly - copied from Megatron-DeepSpeed code and adapted for our needs - - See here why autograd functions are not torchscriptable: https://github.com/pytorch/pytorch/issues/22329 - """ - - def __init__(self): - super().__init__() - - def forward(self, x: torch.Tensor) -> torch.Tensor: - if self.training: - return GeLUFunction.apply(x) - else: - return telechat_gelu_forward(x) - - -class TelechatAttention(nn.Module): - def __init__(self, config: TelechatConfig, layer_idx): - super().__init__() - self.kv_cache = None - self.layer_idx = layer_idx - - self.hidden_size = config.hidden_size - self.num_heads = config.n_head - self.head_dim = self.hidden_size // self.num_heads - self.split_size = self.hidden_size - self.hidden_dropout = config.hidden_dropout - self.config = config - - if self.head_dim * self.num_heads != self.hidden_size: - raise ValueError( - f"`hidden_size` must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`:" - f" {self.num_heads})." - ) - - # Layer-wise attention scaling - self.inv_norm_factor = 1.0 / math.sqrt(self.head_dim) - self.beta = 1.0 - - self.num_key_value_heads = config.num_key_value_heads if config.num_key_value_heads else self.num_heads - self.kv_projection_size = self.head_dim * self.num_key_value_heads - self.num_key_value_groups = self.num_heads // self.num_key_value_heads - self.query = nn.Linear(self.hidden_size, self.hidden_size, bias=False) - self.key_value = nn.Linear(self.hidden_size, self.kv_projection_size * 2, bias=False) - self.dense = nn.Linear(self.hidden_size, self.hidden_size) - self.attention_dropout = nn.Dropout(config.attention_dropout) - self.rotary_emb = RotaryEmbedding(self.head_dim, config=config) - - self.core_attention_flash = FlashSelfAttention( - causal=True, attention_dropout=config.attention_dropout - ) - - self.last_key_layer = None - # logn_list = [math.log(i, 4096) if i > 4096 else 1 for i in range(1, 32768)] - # self.logn_tensor = torch.tensor(logn_list)[None, :, None, None].half().cuda() - - def repeat_kv(self, hidden_states, n_rep): - slen, batch, num_key_value_heads_per_partition, head_dim = hidden_states.shape - if n_rep == 1: - return hidden_states - hidden_states = hidden_states[:, :, :, None, :].expand(slen, batch, num_key_value_heads_per_partition, n_rep, - head_dim) - return hidden_states.reshape(slen, batch, num_key_value_heads_per_partition * n_rep, head_dim) - - def split_tensor_along_last_dim(self, - tensor: torch.Tensor, - num_partitions: int, - contiguous_split_chunks: bool = False, - ): - - # Get the size and dimension. - last_dim = tensor.dim() - 1 - last_dim_size = tensor.size()[last_dim] // num_partitions - # Split. - tensor_list = torch.split(tensor, last_dim_size, dim=last_dim) - # Note: torch.split does not create contiguous tensors by default. - if contiguous_split_chunks: - return tuple(chunk.contiguous() for chunk in tensor_list) - - return tensor_list - - def _merge_heads(self, x: torch.Tensor) -> torch.Tensor: - batch_size_and_num_heads, seq_length, _ = x.shape - batch_size = batch_size_and_num_heads // self.num_heads - x = x.view(batch_size, self.num_heads, seq_length, self.head_dim) - x = x.permute(0, 2, 1, 3) - return x.reshape(batch_size, seq_length, self.num_heads * self.head_dim) - - def forward( - self, - hidden_states: torch.Tensor, - residual: torch.Tensor, - attention_mask: torch.Tensor, - layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, - use_cache: bool = False, - output_attentions: bool = False, - ): - hidden_states = hidden_states.transpose(1, 0) - query_layer = self.query(hidden_states) - new_tensor_shape = query_layer.size()[:-1] + \ - (self.num_heads, - self.head_dim) - query_layer = query_layer.view(*new_tensor_shape) - - mixed_kv_layer = self.key_value(hidden_states) - new_tensor_shape = mixed_kv_layer.size()[:-1] + \ - (self.num_key_value_heads, - 2 * self.head_dim) - mixed_kv_layer = mixed_kv_layer.view(*new_tensor_shape) - (key_layer, value_layer) = self.split_tensor_along_last_dim(mixed_kv_layer, 2) - - output_size = (query_layer.size(1), - query_layer.size(2), - query_layer.size(0), - key_layer.size(0), - key_layer.size(2) - ) - - query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1) - key_layer = key_layer.view(output_size[3], output_size[0] * output_size[4], -1) - - apply_rotary_fn = apply_rotary_pos_emb_torch - - seq_len = key_layer.shape[0] - offset = 0 - - if use_cache and layer_past != None: - past_key, past_value = layer_past - offset = past_key.shape[0] - seq_len += offset - - cos, sin = self.rotary_emb(value_layer, seq_len=seq_len) - - query_layer, key_layer = apply_rotary_fn(query_layer, key_layer, cos, sin, offset=offset) - if use_cache: - if layer_past != None: - past_key, past_value = layer_past - key_layer = torch.cat((past_key, key_layer[-1, ...].unsqueeze(0)), dim=0) - value_layer = torch.cat((past_value, value_layer[-1, ...].unsqueeze(0)), dim=0) - layer_past = key_layer, value_layer - - s_value, bz, kv_head, dim = value_layer.shape - s_key = key_layer.shape[0] - s_query = query_layer.shape[0] - q_head = output_size[1] - - query_layer = query_layer.reshape((s_query, bz, q_head, dim)) - key_layer = key_layer.reshape((s_key, bz, kv_head, dim)) - - key_layer = self.repeat_kv(key_layer, self.num_key_value_groups) - value_layer = self.repeat_kv(value_layer, self.num_key_value_groups) - - if self.config.flash_attn: - q, k, v = [rearrange(x, 's b ... -> b s ...').contiguous() for x in - (query_layer, key_layer, value_layer)] - context_layer = self.core_attention_flash(q, k, v) - context_layer = rearrange(context_layer, 'b s h d -> b s (h d)').contiguous() - else: - ##[sq, b, np, hn] -> [sq, b * np, hn] - query_layer = query_layer.reshape(s_query, bz * self.num_heads, dim) - # [sk, b, np, hn] -> [sk, b * np, hn] - key_layer = key_layer.reshape(s_key, bz * self.num_heads, dim) - matmul_result = self.inv_norm_factor * torch.einsum('bik,bkj->bij', query_layer.transpose(0, 1), - key_layer.transpose(0, 1).transpose(1, 2)) - - attention_scores = matmul_result.view(bz, self.num_heads, s_query, s_key) - - input_dtype = attention_scores.dtype - if input_dtype == torch.float16: - attention_scores = attention_scores.to(torch.float) - attn_weights = torch.masked_fill(attention_scores, attention_mask, torch.finfo(attention_scores.dtype).min) - attention_probs = F.softmax(attn_weights, dim=-1).to(input_dtype) ##dtype = torch.float32 - attention_probs = self.attention_dropout(attention_probs) - attention_probs_reshaped = attention_probs.view(bz * self.num_heads, s_query, s_key) - - value_layer = value_layer.reshape(s_key, bz * self.num_heads, dim) - context_layer = torch.bmm(attention_probs_reshaped, value_layer.transpose(0, 1)) - context_layer = self._merge_heads(context_layer) - output_tensor = self.dense(context_layer) - - output_tensor = dropout_add(output_tensor, residual, self.hidden_dropout, self.training) - present = None - outputs = (output_tensor, present) - if output_attentions: - outputs += (attention_probs,) - - return output_tensor, layer_past - - -class TelechatMLP(nn.Module): - def __init__(self, config: TelechatConfig): - super().__init__() - hidden_size = config.hidden_size - self.gate_proj = nn.Linear(hidden_size, config.ffn_hidden_size, bias=False) - self.up_proj = nn.Linear(hidden_size, config.ffn_hidden_size, bias=False) - self.down_proj = nn.Linear(config.ffn_hidden_size, hidden_size, bias=True) - self.hidden_dropout = config.hidden_dropout - - def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor) -> torch.Tensor: - intermediate_output = self.down_proj(F.silu(self.gate_proj(hidden_states)) * self.up_proj(hidden_states)) - output = dropout_add(intermediate_output, residual, self.hidden_dropout, self.training) - return output - - -class TelechatBlock(nn.Module): - def __init__(self, config: TelechatConfig, layer_idx): - super().__init__() - hidden_size = config.hidden_size - - self.input_layernorm = MixedFusedRMSNorm(hidden_size, eps=config.layer_norm_epsilon) - self.num_heads = config.n_head - self.layer_idx = layer_idx - self.self_attention = TelechatAttention(config, layer_idx) - self.post_attention_layernorm = MixedFusedRMSNorm(hidden_size, eps=config.layer_norm_epsilon) - - self.mlp = TelechatMLP(config) - - self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm - self.hidden_dropout = config.hidden_dropout - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: torch.Tensor, - layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, - use_cache: bool = False, - output_attentions: bool = False, - ): - layernorm_output = self.input_layernorm(hidden_states) - if self.apply_residual_connection_post_layernorm: - residual = layernorm_output - else: - residual = hidden_states - - attn_outputs = self.self_attention( - layernorm_output, - residual, - layer_past=layer_past, - attention_mask=attention_mask, - use_cache=use_cache, - output_attentions=output_attentions, - ) - - attention_output = attn_outputs[0] - outputs = attn_outputs[1:] - layernorm_output = self.post_attention_layernorm(attention_output) - - if self.apply_residual_connection_post_layernorm: - residual = layernorm_output - else: - residual = attention_output - output = self.mlp(layernorm_output, residual) - - if use_cache: - outputs = (output,) + outputs - else: - outputs = (output,) + outputs[1:] - - return outputs - - -class TelechatPreTrainedModel(PreTrainedModel): - config_class = TelechatConfig - base_model_prefix = "transformer" - supports_gradient_checkpointing = True - _no_split_modules = ["TelechatBlock"] - _skip_keys_device_placement = "past_key_values" - - def __init__(self, *inputs, **kwargs): - super().__init__(*inputs, **kwargs) - - def _init_weights(self, module: nn.Module): - """Initialize the weights.""" - if isinstance(module, nn.Linear): - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) - if module.bias is not None: - module.bias.data.zero_() - - elif isinstance(module, nn.Embedding): - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) - if module.padding_idx is not None: - module.weight.data[module.padding_idx].zero_() - - elif isinstance(module, LayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - - def _set_gradient_checkpointing(self, module: nn.Module, value: bool = False): - if isinstance(module, TelechatModel): - module.gradient_checkpointing = value - - -class TelechatModel(TelechatPreTrainedModel): - def __init__(self, config: TelechatConfig): - super().__init__(config) - - self.embed_dim = config.hidden_size - self.num_heads = config.n_head - self.config = config - self.word_embeddings = nn.Embedding(config.vocab_size, self.embed_dim) - if self.config.embed_layernorm: - self.word_embeddings_layernorm = MixedFusedRMSNorm(self.embed_dim, eps=config.layer_norm_epsilon) - - self.h = nn.ModuleList([TelechatBlock(config, _) for _ in range(config.num_hidden_layers)]) - self.ln_f = MixedFusedRMSNorm(self.embed_dim, eps=config.layer_norm_epsilon) - self.gradient_checkpointing = False - self.post_init() - - def get_input_embeddings(self): - return self.word_embeddings - - def _prepare_attn_mask( - self, attention_mask: torch.Tensor, input_shape: Tuple[int, int], past_key_values_length: int - ) -> torch.BoolTensor: - combined_attention_mask = None - device = attention_mask.device - _, src_length = input_shape - - if src_length > 1: - combined_attention_mask = _make_causal_mask( - input_shape, device=device, past_key_values_length=past_key_values_length - ) - expanded_attn_mask = _expand_mask(attention_mask, tgt_length=src_length) - combined_attention_mask = ( - expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask | combined_attention_mask - ) - - return combined_attention_mask - - def set_input_embeddings(self, new_embeddings: torch.Tensor): - self.word_embeddings = new_embeddings - - def forward( - self, - input_ids: Optional[torch.LongTensor] = None, - past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, - attention_mask: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.LongTensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - **deprecated_arguments, - ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]: - - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - use_cache = use_cache if use_cache is not None else self.config.use_cache - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - if input_ids is not None: - batch_size, seq_length = input_ids.shape - elif inputs_embeds is not None: - batch_size, seq_length, _ = inputs_embeds.shape - - if past_key_values is None: - past_key_values = tuple([None] * len(self.h)) - # input_ids = torch.load("Megatron-LM-0624-3B/tensors/input_ids.pt").to(input_ids.device) - if inputs_embeds is None: - inputs_embeds = self.word_embeddings(input_ids) - hidden_states = inputs_embeds - # print(f"[INFO_Telechat]: inputs_embeds={inputs_embeds}") - if self.config.embed_layernorm: - hidden_states = self.word_embeddings_layernorm(inputs_embeds) - - presents = () if use_cache else None - all_self_attentions = () if output_attentions else None - all_hidden_states = () if output_hidden_states else None - - if self.gradient_checkpointing and self.training: - if use_cache: - use_cache = False - - seq_length_with_past = seq_length - past_key_values_length = 0 - if past_key_values[0] is not None: - past_key_values_length = past_key_values[0][0].shape[2] - seq_length_with_past = seq_length_with_past + past_key_values_length - if attention_mask is None: - attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device) - else: - attention_mask = attention_mask.to(hidden_states.device) - causal_mask = self._prepare_attn_mask( - attention_mask, - input_shape=(batch_size, seq_length), - past_key_values_length=past_key_values_length, - ) - - # print(f"[INFO_Telechat]: word_embeddings_layernorm={hidden_states}") - for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - if self.gradient_checkpointing and self.training: - - def create_custom_forward(module): - def custom_forward(*inputs): - # None for past_key_value - return module(*inputs, use_cache=use_cache, output_attentions=output_attentions) - - return custom_forward - - outputs = torch.utils.checkpoint.checkpoint( - create_custom_forward(block), - hidden_states, - causal_mask, - layer_past, - ) - else: - outputs = block( - hidden_states, - layer_past=layer_past, - attention_mask=causal_mask, - use_cache=use_cache, - output_attentions=output_attentions, - ) - - # print(f"[INFO_Telechat]: outputs{i}={outputs}") - hidden_states = outputs[0] - if use_cache is True: - presents = presents + (outputs[1],) - - if output_attentions: - all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) - hidden_states = self.ln_f(hidden_states) - # print(f"[INFO_Telechat]: hidden_states={hidden_states}") - # ref = torch.load("Megatron-LM-0624-3B/tensors/final_layernorm.pt") - # print(hidden_states.squeeze()[2048:]) - # print(ref.squeeze()) - # print(torch.max(hidden_states.squeeze()[2048:] - ref.squeeze().to(hidden_states.device))) - # exit() - # print(ref.shape,hidden_states.shape) - # print(hidden_states) - # exit() - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - if not return_dict: - return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) - return BaseModelOutputWithPastAndCrossAttentions( - last_hidden_state=hidden_states, - past_key_values=presents, - hidden_states=all_hidden_states, - attentions=all_self_attentions, - ) - - -class TelechatForCausalLM(TelechatPreTrainedModel): - # _tied_weights_keys = ["lm_head.weight"] - _keys_to_ignore_on_load_missing = [r"lm_head.weight"] - - def __init__(self, config: TelechatConfig): - super().__init__(config) - self.transformer = TelechatModel(config) - self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.post_init() - - def get_output_embeddings(self): - return self.lm_head - - def set_output_embeddings(self, new_embeddings: torch.Tensor): - self.lm_head = new_embeddings - - def prepare_inputs_for_generation( - self, - input_ids: torch.LongTensor, - past_key_values: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.Tensor] = None, - **kwargs, - ) -> dict: - if past_key_values: - input_ids = input_ids[:, -1].unsqueeze(-1) - if inputs_embeds is not None and past_key_values is None: - model_inputs = {"inputs_embeds": inputs_embeds} - else: - model_inputs = {"input_ids": input_ids} - - model_inputs.update( - { - "past_key_values": past_key_values, - "use_cache": kwargs.get("use_cache"), - "attention_mask": attention_mask, - } - ) - return model_inputs - - def forward( - self, - input_ids: Optional[torch.LongTensor] = None, - past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, - attention_mask: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.Tensor] = None, - labels: Optional[torch.Tensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - **deprecated_arguments, - ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: - - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - transformer_outputs = self.transformer( - input_ids, - past_key_values=past_key_values, - attention_mask=attention_mask, - inputs_embeds=inputs_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - hidden_states = transformer_outputs[0] - lm_logits = self.lm_head(hidden_states) - - loss = None - if labels is not None: - labels = labels.to(lm_logits.device) - shift_logits = lm_logits[..., :-1, :].contiguous() - shift_labels = labels[..., 1:].contiguous() - batch_size, seq_length, vocab_size = shift_logits.shape - loss_fct = CrossEntropyLoss() - loss = loss_fct( - shift_logits.view(batch_size * seq_length, vocab_size), shift_labels.view(batch_size * seq_length) - ) - - if not return_dict: - output = (lm_logits,) + transformer_outputs[1:] - return ((loss,) + output) if loss is not None else output - - return CausalLMOutputWithCrossAttentions( - loss=loss, - logits=lm_logits, - past_key_values=transformer_outputs.past_key_values, - hidden_states=transformer_outputs.hidden_states, - attentions=transformer_outputs.attentions, - ) - - def chat(self, tokenizer, question: str = '', history: Union[List[Dict], History] = None, stream: bool = False, - generation_config: Optional[GenerationConfig] = None, **kwargs): - """ - Args: - tokenizer: the tokenizer of telechat - question: question which the model reply in this turn - history: history which will format the input for telechat - stream: if return the full text at last or yield the text in token - generation_config: configuration for generation - **kwargs: args which will update the generation config or pass to model forward - """ - generation_config = generation_config or self.generation_config - if not generation_config: - logger.error("generation_config is None") - raise ValueError("generation_config must not be None") - if not question: - logger.error("question is empty") - raise ValueError("question must not be empty") - if history is None: - history = [] - - # we update and check generate_config here for building inputs. - - generation_config = copy.deepcopy(generation_config) - user_id = generation_config.user_token_id - bot_id = generation_config.bot_token_id - model_kwargs = generation_config.update(**kwargs) - generation_config.validate() - - # transfer to History - if not isinstance(history, History): - history = History(tokenizer, history) - - inputs = self.build_inputs_for_chat(tokenizer, question, history, generation_config, user_id, bot_id) - history.append({"role": "user", "content": question}) - if stream: - streamer = TelechatIterTextStreamer(tokenizer, history, skip_prompt=True) - Thread(target=self.generate, kwargs=dict( - inputs=inputs.to(self.device), streamer=streamer, - generation_config=generation_config, **model_kwargs - )).start() - return streamer - else: - outputs = self.generate(inputs.to(self.device), generation_config=generation_config, **model_kwargs) - response = tokenizer.decode(outputs[0][len(inputs[0]):-1]) - history.append({"role": "bot", "content": response}) - return response, history - - def build_inputs_for_chat(self, tokenizer, question, history, generation_config, usr_id, bot_id): - """ - check history and build inputs here - """ - # first tokenize question - q_token = tokenizer(question) - qa_history = copy.deepcopy(history) - - # get the max length we should build our inputs in - model_max_length = self.config.seq_length - build_max_length = max(0, model_max_length - generation_config.max_new_tokens - 1) \ - if generation_config.max_new_tokens else max(0, generation_config.max_length) - if build_max_length < 3: - logger.warning("the model can not meet the requirements of input length,Please check config") - raise ValueError("") - - # trunc left - input_tokens = [usr_id] + q_token["input_ids"][-build_max_length + 1:] + [bot_id] - length = len(input_tokens) - - while len(qa_history) != 0: - message = qa_history.pop() - if message["role"] == "user": - tokens = [usr_id] + message["input_ids"] - elif message["role"] == "bot": - tokens = [bot_id] + message["input_ids"] + [generation_config.eos_token_id] - else: - tokens = [] - if len(tokens) + length >= build_max_length: - break - else: - input_tokens = tokens + input_tokens - - input_tokens = [generation_config.bos_token_id] + input_tokens - - return torch.tensor([input_tokens], dtype=torch.int64) diff --git a/tokenization_telechat.py b/tokenization_telechat.py deleted file mode 100644 index 6ac4fb87adaa33ad7850e7964157b9f5b335b435..0000000000000000000000000000000000000000 --- a/tokenization_telechat.py +++ /dev/null @@ -1,220 +0,0 @@ -import os -from shutil import copyfile -from typing import Any, Dict, List, Optional, Tuple -import sentencepiece as spm -from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer -from transformers.utils import logging - -logger = logging.get_logger(__name__) - -VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"} - -# TODO: when we get download url from huggingface, refresh the map -PRETRAINED_VOCAB_FILES_MAP = { - "vocab_file": {}, - "tokenizer_file": {}, -} - - -class TelechatTokenizer(PreTrainedTokenizer): - - vocab_files_names = VOCAB_FILES_NAMES - pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP - model_input_names = ["input_ids", "attention_mask"] - - def __init__( - self, - vocab_file, - unk_token="", - bos_token="<_start>", - eos_token="<_end>", - pad_token="<_pad>", - sp_model_kwargs: Optional[Dict[str, Any]] = None, - add_bos_token=True, - add_eos_token=False, - clean_up_tokenization_spaces=False, - **kwargs, - ): - self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs - bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token - eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token - unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token - pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token - self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) - self.sp_model.Load(vocab_file) - super().__init__( - bos_token=bos_token, - eos_token=eos_token, - unk_token=unk_token, - pad_token=pad_token, - add_bos_token=add_bos_token, - add_eos_token=add_eos_token, - sp_model_kwargs=self.sp_model_kwargs, - clean_up_tokenization_spaces=clean_up_tokenization_spaces, - **kwargs, - ) - self.vocab_file = vocab_file - self.add_bos_token = add_bos_token - self.add_eos_token = add_eos_token - - - def __getstate__(self): - state = self.__dict__.copy() - state["sp_model"] = None - return state - - def __setstate__(self, d): - self.__dict__ = d - self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) - self.sp_model.Load(self.vocab_file) - - @property - def vocab_size(self): - """Returns vocab size""" - return self.sp_model.get_piece_size() - - def get_vocab(self): - """Returns vocab as a dict""" - vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} - vocab.update(self.added_tokens_encoder) - return vocab - - def _tokenize(self, text): - """Returns a tokenized string.""" - return self.sp_model.encode(text, out_type=str) - - def _convert_token_to_id(self, token): - """Converts a token (str) in an id using the vocab.""" - return self.sp_model.piece_to_id(token) - - def _convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - token = self.sp_model.IdToPiece(index) - return token - - def convert_tokens_to_string(self, tokens): - """Converts a sequence of tokens (string) in a single string.""" - current_sub_tokens = [] - out_string = "" - prev_is_special = False - for i, token in enumerate(tokens): - # make sure that special tokens are not decoded using sentencepiece model - if token in self.all_special_tokens: - if not prev_is_special and i != 0: - out_string += " " - out_string += self.sp_model.decode(current_sub_tokens) + token - prev_is_special = True - current_sub_tokens = [] - else: - current_sub_tokens.append(token) - prev_is_special = False - out_string += self.sp_model.decode(current_sub_tokens) - return out_string - - def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]: - """ - Save the vocabulary and special tokens file to a directory. - - Args: - save_directory (`str`): - The directory in which to save the vocabulary. - - Returns: - `Tuple(str)`: Paths to the files saved. - """ - if not os.path.isdir(save_directory): - logger.error(f"Vocabulary path ({save_directory}) should be a directory") - return - out_vocab_file = os.path.join( - save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] - ) - - if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): - copyfile(self.vocab_file, out_vocab_file) - elif not os.path.isfile(self.vocab_file): - with open(out_vocab_file, "wb") as fi: - content_spiece_model = self.sp_model.serialized_model_proto() - fi.write(content_spiece_model) - - return (out_vocab_file,) - - def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): - bos_token_id = [self.bos_token_id] if self.add_bos_token else [] - eos_token_id = [self.eos_token_id] if self.add_eos_token else [] - - output = bos_token_id + token_ids_0 + eos_token_id - - if token_ids_1 is not None: - output = output + bos_token_id + token_ids_1 + eos_token_id - - return output - - def get_special_tokens_mask( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False - ) -> List[int]: - """ - Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding - special tokens using the tokenizer `prepare_for_model` method. - - Args: - token_ids_0 (`List[int]`): - List of IDs. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - already_has_special_tokens (`bool`, *optional*, defaults to `False`): - Whether or not the token list is already formatted with special tokens for the model. - - Returns: - `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. - """ - if already_has_special_tokens: - return super().get_special_tokens_mask( - token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True - ) - - bos_token_id = [1] if self.add_bos_token else [] - eos_token_id = [1] if self.add_eos_token else [] - - if token_ids_1 is None: - return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id - return ( - bos_token_id - + ([0] * len(token_ids_0)) - + eos_token_id - + bos_token_id - + ([0] * len(token_ids_1)) - + eos_token_id - ) - - def create_token_type_ids_from_sequences( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None - ) -> List[int]: - """ - Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT - sequence pair mask has the following format: - - ``` - 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 - | first sequence | second sequence | - ``` - - if token_ids_1 is None, only returns the first portion of the mask (0s). - - Args: - token_ids_0 (`List[int]`): - List of ids. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - - Returns: - `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). - """ - bos_token_id = [self.bos_token_id] if self.add_bos_token else [] - eos_token_id = [self.eos_token_id] if self.add_eos_token else [] - - output = [0] * len(bos_token_id + token_ids_0 + eos_token_id) - - if token_ids_1 is not None: - output += [1] * len(bos_token_id + token_ids_1 + eos_token_id) - - return output diff --git a/tokenizer.model b/tokenizer.model deleted file mode 100644 index cd47d1356749ba43803322ca2ca295c2c776b036..0000000000000000000000000000000000000000 --- a/tokenizer.model +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:db947024849f75ec4bd9af5d4c84fa71e96a26971eb353a70acd66194fc7a69b -size 2197489 diff --git a/tokenizer_config.json b/tokenizer_config.json deleted file mode 100644 index 8e8c261678ad76df6865abca3d3250e2ba3c04a7..0000000000000000000000000000000000000000 --- a/tokenizer_config.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "tokenizer_class": "TelechatTokenizer", - "auto_map": { - "AutoTokenizer": [ - "tokenization_telechat.TelechatTokenizer", - null - ] - }, - "add_bos_token": false, - "add_eos_token": false, - "use_fast": false, - "clean_up_tokenization_spaces": false, - "eos_token": { - "__type": "AddedToken", - "content": "<_end>", - "lstrip": false, - "normalized": true, - "rstrip": false, - "single_word": true - }, - "model_max_length": 100000000, - "sp_model_kwargs": {}, - "pad_token": { - "__type": "AddedToken", - "content": "<_pad>", - "lstrip": false, - "normalized": true, - "rstrip": false, - "single_word": true - }, - "unk_token": { - "__type": "AddedToken", - "content": "<_end>", - "lstrip": false, - "normalized": true, - "rstrip": false, - "single_word": true - } -}