diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..20fd87b81ab9df45db170cd77ed541641411bbe9 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,56 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00011-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00035-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00047-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00008-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00009-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00021-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00034-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00052-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00010-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00016-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00024-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00040-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00033-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00036-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00038-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00046-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00026-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00048-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00015-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00014-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00004-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00025-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00050-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00019-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00027-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00028-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00045-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00003-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00006-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00017-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00039-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00041-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00002-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00005-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00037-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00042-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00007-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00023-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00051-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00053-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00043-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00044-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00001-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00013-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00022-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00020-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00031-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00032-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00049-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00012-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00030-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00018-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00029-of-00053.bin filter=lfs diff=lfs merge=lfs -text
diff --git a/config.json b/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..ce719472ee890c62928ac723e660f760165a4eb2
--- /dev/null
+++ b/config.json
@@ -0,0 +1 @@
+{"architectures": ["SkyworkForCausalLM"], "auto_map": {"AutoConfig": "configuration_skywork_moe.SkyworkMoeConfig", "AutoModelForCausalLM": "modeling_skywork_moe.SkyworkForCausalLM"}, "model_type": "skywork", "vocab_size": 65532, "bos_token_id": 1, "eos_token_id": 2, "pad_token_id": 0, "hidden_act": "silu", "hidden_size": 4608, "initializer_range": 0.01, "intermediate_size": 12288, "max_position_embeddings": 8192, "num_attention_heads": 36, "num_key_value_heads": 36, "num_hidden_layers": 52, "num_experts": [16], "moe_use_skywork_gating": false, "moe_2layer_gate": false, "moe_use_logits_norm": true, "moe_gate_norm_std": 1.0, "moe_feature_no_mul_topk": true, "sliding_window": null, "moe_expert_interval": 1, "rms_norm_eps": 1e-06, "rotary_percent": 1.0, "tie_word_embeddings": false, "torch_dtype": "bfloat16", "use_cache": true, "transformers_version": "4.40.1", "rope_theta": 10000}
diff --git a/configuration_skywork_moe.py b/configuration_skywork_moe.py
new file mode 100644
index 0000000000000000000000000000000000000000..125dfbd7a4a35db994f81a4e8031e35140bb7e52
--- /dev/null
+++ b/configuration_skywork_moe.py
@@ -0,0 +1,106 @@
+# Copyright (c) SkyworkAI and the HuggingFace Inc. team. All rights reserved.
+# This code is built upon Huggingface's transformers repository.
+
+
+from transformers.configuration_utils import PretrainedConfig
+from transformers.utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
+
+
+class SkyworkMoeConfig(PretrainedConfig):
+
+ model_type = "skywork"
+ keys_to_ignore_at_inference = ["past_key_values"]
+
+ def __init__(
+ self,
+ vocab_size=32000,
+ hidden_size=4096,
+ intermediate_size=11008,
+ num_hidden_layers=32,
+ num_attention_heads=32,
+ num_key_value_heads=None,
+ hidden_act="silu",
+ max_position_embeddings=2048,
+ initializer_range=0.02,
+ rms_norm_eps=1e-6,
+ use_cache=True,
+ pad_token_id=None,
+ bos_token_id=1,
+ eos_token_id=2,
+ pretraining_tp=1,
+ tie_word_embeddings=False,
+ rope_theta=10000.0,
+ rope_scaling=None,
+ num_experts=[32],
+ moe_expert_interval=1,
+ moe_use_skywork_gating=False,
+ moe_2layer_gate=True,
+ moe_use_logits_norm=False,
+ moe_gate_norm_std=1.0,
+ moe_feature_no_mul_topk=False,
+ sliding_window=None,
+
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+
+ # for backward compatibility
+ if num_key_value_heads is None:
+ num_key_value_heads = num_attention_heads
+
+ self.num_key_value_heads = num_key_value_heads
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.rms_norm_eps = rms_norm_eps
+ self.pretraining_tp = pretraining_tp
+ self.use_cache = use_cache
+ self.rope_theta = rope_theta
+ self.rope_scaling = rope_scaling
+ self._rope_scaling_validation()
+ self.num_experts = num_experts
+ self.moe_expert_interval = moe_expert_interval
+ self.moe_use_skywork_gating = moe_use_skywork_gating
+ self.moe_2layer_gate = moe_2layer_gate
+ self.moe_use_logits_norm = moe_use_logits_norm
+ self.moe_gate_norm_std = moe_gate_norm_std
+ self.moe_feature_no_mul_topk = moe_feature_no_mul_topk
+ self.sliding_window = sliding_window
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ tie_word_embeddings=tie_word_embeddings,
+ **kwargs,
+ )
+
+ def _rope_scaling_validation(self):
+ """
+ Validate the `rope_scaling` configuration.
+ """
+ if self.rope_scaling is None:
+ return
+
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
+ raise ValueError(
+ "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
+ f"got {self.rope_scaling}"
+ )
+ rope_scaling_type = self.rope_scaling.get("type", None)
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic", "ntk"]:
+ raise ValueError(
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
+ )
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
+ raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
diff --git a/generation_config.json b/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f824ac627297f6b60c61172c100139f913676d74
--- /dev/null
+++ b/generation_config.json
@@ -0,0 +1 @@
+{"_from_model_config": true, "bos_token_id": 1, "eos_token_id": 2, "pad_token_id": 0, "transformers_version": "4.40.1"}
\ No newline at end of file
diff --git a/modeling_skywork_moe.py b/modeling_skywork_moe.py
new file mode 100644
index 0000000000000000000000000000000000000000..523c1e9d9bdbd6a4634346779c5fa2a3c4aaa332
--- /dev/null
+++ b/modeling_skywork_moe.py
@@ -0,0 +1,1604 @@
+# coding=utf-8
+# Copyright 2023 Mistral AI and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Skywork model."""
+import inspect
+import math
+import warnings
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.nn.functional as F
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from transformers.activations import ACT2FN
+from transformers.cache_utils import Cache, DynamicCache
+from transformers.modeling_attn_mask_utils import (
+ _prepare_4d_causal_attention_mask,
+ _prepare_4d_causal_attention_mask_for_sdpa,
+)
+from transformers.modeling_outputs import (
+ MoeCausalLMOutputWithPast,
+ MoeModelOutputWithPast,
+ SequenceClassifierOutputWithPast,
+)
+from transformers.modeling_utils import PreTrainedModel
+from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_13
+from transformers.utils import (
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_flash_attn_2_available,
+ is_flash_attn_greater_or_equal_2_10,
+ logging,
+ replace_return_docstrings,
+)
+from transformers.utils.import_utils import is_torch_fx_available
+from .configuration_skywork_moe import SkyworkMoeConfig
+
+
+if is_flash_attn_2_available():
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
+
+ _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
+
+# This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.
+# It means that the function will not be traced through and simply appear as a node in the graph.
+if is_torch_fx_available():
+ if not is_torch_greater_or_equal_than_1_13:
+ import torch.fx
+
+ _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask)
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "SkyworkMoeConfig"
+
+
+def load_balancing_loss_func(gate_logits: torch.Tensor, num_experts: torch.Tensor = None, top_k=2) -> float:
+ r"""
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
+
+ See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
+ experts is too unbalanced.
+
+ Args:
+ gate_logits (Union[`torch.Tensor`, Tuple[torch.Tensor]):
+ Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
+ shape [batch_size X sequence_length, num_experts].
+ num_experts (`int`, *optional*):
+ Number of experts
+
+ Returns:
+ The auxiliary loss.
+ """
+ if gate_logits is None or not isinstance(gate_logits, tuple):
+ return 0
+
+ if isinstance(gate_logits, tuple):
+ compute_device = gate_logits[0].device
+ concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
+
+ routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
+
+ _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
+
+ # treat `top_k` as tokens (shape is `top_k X [batch_size X sequence_length]`)
+ selected_experts = selected_experts.reshape(-1)
+
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
+ expert_mask = torch.max(expert_mask, dim=-2).values
+
+ # Compute the percentage of tokens routed to each experts
+ tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
+
+ # Compute the average probability of routing to these experts
+ router_prob_per_expert = torch.mean(routing_weights, dim=0)
+
+ overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(-1))
+ return overall_loss * num_experts
+
+
+# Copied from transformers.models.llama.modeling_llama._get_unpad_data
+def _get_unpad_data(attention_mask):
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
+ return (
+ indices,
+ cu_seqlens,
+ max_seqlen_in_batch,
+ )
+
+
+# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Skywork
+class SkyworkRMSNorm(nn.Module):
+ def __init__(self, hidden_size, eps=1e-6):
+ """
+ SkyworkRMSNorm is equivalent to T5LayerNorm
+ """
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(hidden_size))
+ self.variance_epsilon = eps
+
+ def forward(self, hidden_states):
+ input_dtype = hidden_states.dtype
+ hidden_states = hidden_states.to(torch.float32)
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
+ return self.weight * hidden_states.to(input_dtype)
+
+
+# Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->Skywork
+class SkyworkRotaryEmbedding(nn.Module):
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
+ super().__init__()
+
+ self.dim = dim
+ self.max_position_embeddings = max_position_embeddings
+ self.base = base
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+
+ # Build here to make `torch.jit.trace` work.
+ self._set_cos_sin_cache(
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
+ )
+
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
+ self.max_seq_len_cached = seq_len
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
+
+ freqs = torch.outer(t, self.inv_freq)
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
+ emb = torch.cat((freqs, freqs), dim=-1)
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
+
+ def forward(self, x, seq_len=None):
+ # x: [bs, num_attention_heads, seq_len, head_size]
+ if seq_len > self.max_seq_len_cached:
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
+
+ return (
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
+ )
+
+
+# Copied from transformers.models.llama.modeling_llama.rotate_half
+def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
+ x1 = x[..., : x.shape[-1] // 2]
+ x2 = x[..., x.shape[-1] // 2 :]
+ return torch.cat((-x2, x1), dim=-1)
+
+
+# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding to the query and key tensors.
+
+ Args:
+ q (`torch.Tensor`): The query tensor.
+ k (`torch.Tensor`): The key tensor.
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
+ position_ids (`torch.Tensor`):
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
+ used to pass offsetted position ids when working with a KV-cache.
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
+ Returns:
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
+ """
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
+ q_embed = (q * cos) + (rotate_half(q) * sin)
+ k_embed = (k * cos) + (rotate_half(k) * sin)
+ return q_embed, k_embed
+
+
+# Copied from transformers.models.llama.modeling_llama.repeat_kv
+def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
+ """
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
+ """
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
+ if n_rep == 1:
+ return hidden_states
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
+
+
+# Copied from transformers.models.mistral.modeling_mistral.MistralAttention with Mistral->Skywork
+class SkyworkAttention(nn.Module):
+ """
+ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
+ and "Generating Long Sequences with Sparse Transformers".
+ """
+
+ def __init__(self, config: SkyworkMoeConfig, layer_idx: Optional[int] = None):
+ super().__init__()
+ self.config = config
+ self.layer_idx = layer_idx
+ if layer_idx is None:
+ logger.warning_once(
+ f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
+ "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
+ "when creating this class."
+ )
+
+ self.hidden_size = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.hidden_size // self.num_heads
+ self.num_key_value_heads = config.num_key_value_heads
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
+ self.max_position_embeddings = config.max_position_embeddings
+ self.rope_theta = config.rope_theta
+ self.is_causal = True
+ self.attention_dropout = 0.0 # notice: support inference only.
+
+ if (self.head_dim * self.num_heads) != self.hidden_size:
+ raise ValueError(
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
+ f" and `num_heads`: {self.num_heads})."
+ )
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
+
+ self.rotary_emb = SkyworkRotaryEmbedding(
+ self.head_dim,
+ max_position_embeddings=self.max_position_embeddings,
+ base=self.rope_theta,
+ )
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ **kwargs,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ if "padding_mask" in kwargs:
+ warnings.warn(
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
+ )
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ if self.layer_idx is None:
+ raise ValueError(
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
+ "with a layer index."
+ )
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
+
+ if past_key_value is not None:
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ # repeat k/v heads if n_kv_heads < n_heads
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
+
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
+ )
+
+ attn_weights = attn_weights + attention_mask
+
+ # upcast attention to fp32
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
+ attn_output = torch.matmul(attn_weights, value_states)
+
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.transpose(1, 2).contiguous()
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
+
+ attn_output = self.o_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+
+# Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2 with Mistral->Skywork
+class SkyworkFlashAttention2(SkyworkAttention):
+ """
+ Skywork flash attention module. This module inherits from `SkyworkAttention` as the weights of the module stays
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
+ flash attention and deal with padding tokens in case the input contains any of them.
+ """
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ **kwargs,
+ ):
+ if "padding_mask" in kwargs:
+ warnings.warn(
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
+ )
+
+ # overwrite attention_mask with padding_mask
+ attention_mask = kwargs.pop("padding_mask")
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ if self.layer_idx is None:
+ raise ValueError(
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
+ "with a layer index."
+ )
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
+
+ # Because the input can be padded, the absolute sequence length depends on the max position id.
+ rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
+ cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len)
+
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
+
+ use_sliding_windows = (
+ _flash_supports_window_size
+ and getattr(self.config, "sliding_window", None) is not None
+ and kv_seq_len > self.config.sliding_window
+ )
+
+ if not _flash_supports_window_size:
+ logger.warning_once(
+ "The current flash attention version does not support sliding window attention, for a more memory efficient implementation"
+ " make sure to upgrade flash-attn library."
+ )
+
+ if past_key_value is not None:
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
+ cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
+ if (
+ getattr(self.config, "sliding_window", None) is not None
+ and kv_seq_len > self.config.sliding_window
+ and cache_has_contents
+ ):
+ slicing_tokens = 1 - self.config.sliding_window
+
+ past_key = past_key_value[self.layer_idx][0]
+ past_value = past_key_value[self.layer_idx][1]
+
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
+
+ if past_key.shape[-2] != self.config.sliding_window - 1:
+ raise ValueError(
+ f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
+ f" {past_key.shape}"
+ )
+
+ if attention_mask is not None:
+ attention_mask = attention_mask[:, slicing_tokens:]
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
+
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ # repeat k/v heads if n_kv_heads < n_heads
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
+
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
+ # cast them back in float16 just to be sure everything works as expected.
+ input_dtype = query_states.dtype
+ if input_dtype == torch.float32:
+ if torch.is_autocast_enabled():
+ target_dtype = torch.get_autocast_gpu_dtype()
+ # Handle the case where the model is quantized
+ elif hasattr(self.config, "_pre_quantization_dtype"):
+ target_dtype = self.config._pre_quantization_dtype
+ else:
+ target_dtype = self.q_proj.weight.dtype
+
+ logger.warning_once(
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
+ f" {target_dtype}."
+ )
+
+ query_states = query_states.to(target_dtype)
+ key_states = key_states.to(target_dtype)
+ value_states = value_states.to(target_dtype)
+
+ # Reashape to the expected shape for Flash Attention
+ query_states = query_states.transpose(1, 2)
+ key_states = key_states.transpose(1, 2)
+ value_states = value_states.transpose(1, 2)
+
+ attn_output = self._flash_attention_forward(
+ query_states,
+ key_states,
+ value_states,
+ attention_mask,
+ q_len,
+ dropout=dropout_rate,
+ use_sliding_windows=use_sliding_windows,
+ )
+
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
+ attn_output = self.o_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+ def _flash_attention_forward(
+ self,
+ query_states,
+ key_states,
+ value_states,
+ attention_mask,
+ query_length,
+ dropout=0.0,
+ softmax_scale=None,
+ use_sliding_windows=False,
+ ):
+ """
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
+ first unpad the input, then computes the attention scores and pad the final attention scores.
+
+ Args:
+ query_states (`torch.Tensor`):
+ Input query states to be passed to Flash Attention API
+ key_states (`torch.Tensor`):
+ Input key states to be passed to Flash Attention API
+ value_states (`torch.Tensor`):
+ Input value states to be passed to Flash Attention API
+ attention_mask (`torch.Tensor`):
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
+ position of padding tokens and 1 for the position of non-padding tokens.
+ dropout (`int`, *optional*):
+ Attention dropout
+ softmax_scale (`float`, *optional*):
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
+ use_sliding_windows (`bool`, *optional*):
+ Whether to activate sliding window attention.
+ """
+ if not self._flash_attn_uses_top_left_mask:
+ causal = self.is_causal
+ else:
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
+ causal = self.is_causal and query_length != 1
+
+ # Contains at least one padding token in the sequence
+ if attention_mask is not None:
+ batch_size = query_states.shape[0]
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
+ query_states, key_states, value_states, attention_mask, query_length
+ )
+
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
+
+ if not use_sliding_windows:
+ attn_output_unpad = flash_attn_varlen_func(
+ query_states,
+ key_states,
+ value_states,
+ cu_seqlens_q=cu_seqlens_q,
+ cu_seqlens_k=cu_seqlens_k,
+ max_seqlen_q=max_seqlen_in_batch_q,
+ max_seqlen_k=max_seqlen_in_batch_k,
+ dropout_p=dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ )
+ else:
+ attn_output_unpad = flash_attn_varlen_func(
+ query_states,
+ key_states,
+ value_states,
+ cu_seqlens_q=cu_seqlens_q,
+ cu_seqlens_k=cu_seqlens_k,
+ max_seqlen_q=max_seqlen_in_batch_q,
+ max_seqlen_k=max_seqlen_in_batch_k,
+ dropout_p=dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ window_size=(self.config.sliding_window, self.config.sliding_window),
+ )
+
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
+ else:
+ if not use_sliding_windows:
+ attn_output = flash_attn_func(
+ query_states,
+ key_states,
+ value_states,
+ dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ )
+ else:
+ attn_output = flash_attn_func(
+ query_states,
+ key_states,
+ value_states,
+ dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ window_size=(self.config.sliding_window, self.config.sliding_window),
+ )
+
+ return attn_output
+
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
+ batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
+
+ # On the first iteration we need to properly re-create the padding mask
+ # by slicing it on the proper place
+ if kv_seq_len != attention_mask.shape[-1]:
+ attention_mask_num_tokens = attention_mask.shape[-1]
+ attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
+
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
+
+ key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
+ value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
+
+ if query_length == kv_seq_len:
+ query_layer = index_first_axis(
+ query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
+ )
+ cu_seqlens_q = cu_seqlens_k
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
+ indices_q = indices_k
+ elif query_length == 1:
+ max_seqlen_in_batch_q = 1
+ cu_seqlens_q = torch.arange(
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
+ ) # There is a memcpy here, that is very bad.
+ indices_q = cu_seqlens_q[:-1]
+ query_layer = query_layer.squeeze(1)
+ else:
+ # The -q_len: slice assumes left padding.
+ attention_mask = attention_mask[:, -query_length:]
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
+
+ return (
+ query_layer,
+ key_layer,
+ value_layer,
+ indices_q,
+ (cu_seqlens_q, cu_seqlens_k),
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
+ )
+
+
+# Copied from transformers.models.llama.modeling_llama.LlamaSdpaAttention with Llama->Skywork
+class SkyworkSdpaAttention(SkyworkAttention):
+ """
+ Skywork attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
+ `SkyworkAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
+ SDPA API.
+ """
+
+ # Adapted from SkyworkAttention.forward
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ if output_attentions:
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
+ logger.warning_once(
+ "SkyworkModel is using SkyworkSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
+ )
+ return super().forward(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
+
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
+
+ if past_key_value is not None:
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
+ )
+
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
+ if query_states.device.type == "cuda" and attention_mask is not None:
+ query_states = query_states.contiguous()
+ key_states = key_states.contiguous()
+ value_states = value_states.contiguous()
+
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
+ query_states,
+ key_states,
+ value_states,
+ attn_mask=attention_mask,
+ dropout_p=self.attention_dropout if self.training else 0.0,
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
+ is_causal=self.is_causal and attention_mask is None and q_len > 1,
+ )
+
+ attn_output = attn_output.transpose(1, 2).contiguous()
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
+
+ attn_output = self.o_proj(attn_output)
+
+ return attn_output, None, past_key_value
+
+
+SKYWORK_ATTENTION_CLASSES = {
+ "eager": SkyworkAttention,
+ "flash_attention_2": SkyworkFlashAttention2,
+ "sdpa": SkyworkSdpaAttention,
+}
+
+
+class SkyworkBLockSparseTop2MLP(nn.Module):
+ def __init__(self, config: SkyworkMoeConfig):
+ super().__init__()
+ self.ffn_dim = config.intermediate_size
+ self.hidden_dim = config.hidden_size
+
+ self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
+ self.w2 = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False)
+ self.w3 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
+
+ self.act_fn = ACT2FN[config.hidden_act]
+
+ def forward(self, hidden_states):
+ current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states)
+ current_hidden_states = self.w2(current_hidden_states)
+ return current_hidden_states
+
+MOE_TOP_K = 2
+
+class SkyworkSparseMoeBlock(nn.Module):
+ """
+ This implementation is
+ strictly equivalent to standard MoE with full capacity (no
+ dropped tokens). It's faster since it formulates MoE operations
+ in terms of block-sparse operations to accomodate imbalanced
+ assignments of tokens to experts, whereas standard MoE either
+ (1) drop tokens at the cost of reduced performance or (2) set
+ capacity factor to number of experts and thus waste computation
+ and memory on padding.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.hidden_dim = config.hidden_size
+ self.ffn_dim = config.intermediate_size
+ self.num_experts = config.num_experts[0]
+ self.top_k = MOE_TOP_K
+ self.moe_use_skywork_gating = config.moe_use_skywork_gating
+ self.moe_use_logits_norm = config.moe_use_logits_norm
+ self.moe_gate_norm_std = config.moe_gate_norm_std
+ self.moe_feature_no_mul_topk = config.moe_feature_no_mul_topk
+
+ # gating
+ if config.moe_2layer_gate:
+ self.gate = torch.nn.Sequential(
+ nn.Linear(self.hidden_dim, self.num_experts * 8, bias=False).float(),
+ torch.nn.Tanh(),
+ nn.Linear(self.num_experts * 8, self.num_experts, bias=False).float()).float()
+ else:
+ self.gate = nn.Linear(self.hidden_dim, self.num_experts, bias=False)
+
+ self.experts = nn.ModuleList([SkyworkBLockSparseTop2MLP(config) for _ in range(self.num_experts)])
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ """ """
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
+ hidden_states = hidden_states.view(-1, hidden_dim)
+
+ if isinstance(self.gate, torch.nn.Linear):
+ if self.gate.weight.dtype != torch.float32:
+ self.gate = self.gate.float()
+ setattr(self.gate.weight, 'router', True)
+ else:
+ if self.gate[0].weight.dtype != torch.float32:
+ self.gate = self.gate.float()
+ setattr(self.gate[0].weight, "router", True)
+ setattr(self.gate[2].weight, "router", True)
+ hidden_states_fp32 = hidden_states.float()
+ # router_logits: (batch * sequence_length, n_experts)
+ router_logits = self.gate(hidden_states_fp32)
+ if not (self.moe_use_skywork_gating or self.moe_feature_no_mul_topk):
+ router_logits *= self.top_k
+
+ if self.moe_use_skywork_gating:
+ if self.moe_use_logits_norm:
+ target_std = self.moe_gate_norm_std
+ logits_std = router_logits.std(dim=1, keepdim=True)
+ router_logits = router_logits / (logits_std / target_std)
+ routing_weights, selected_experts = torch.topk(router_logits, k=self.top_k, dim=1)
+ routing_weights = F.softmax(routing_weights, dim=1)
+ else:
+ target_std = self.moe_gate_norm_std
+ if self.moe_use_logits_norm:
+ logits_std = router_logits.std(dim=1, keepdim=True)
+ routing_weights = F.softmax(router_logits / (logits_std / target_std), dim=1)
+ else:
+ routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
+
+ routing_weights, selected_experts = torch.topk(routing_weights,
+ self.top_k,
+ dim=-1)
+ routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
+
+ # we cast back to the input dtype
+ routing_weights = routing_weights.to(hidden_states.dtype)
+
+ final_hidden_states = torch.zeros(
+ (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device
+ )
+
+ # One hot encode the selected experts to create an expert mask
+ # this will be used to easily index which expert is going to be sollicitated
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
+
+ # Loop over all available experts in the model and perform the computation on each expert
+ for expert_idx in range(self.num_experts):
+ expert_layer = self.experts[expert_idx]
+ idx, top_x = torch.where(expert_mask[expert_idx])
+
+ if top_x.shape[0] == 0:
+ continue
+
+ # in torch it is faster to index using lists than torch tensors
+ top_x_list = top_x.tolist()
+ idx_list = idx.tolist()
+
+ # Index the correct hidden states and compute the expert hidden state for
+ # the current expert. We need to make sure to multiply the output hidden
+ # states by `routing_weights` on the corresponding tokens (top-1 and top-2)
+ current_state = hidden_states[None, top_x_list].reshape(-1, hidden_dim)
+ current_hidden_states = expert_layer(current_state) * routing_weights[top_x_list, idx_list, None]
+
+ # However `index_add_` only support torch tensors for indexing so we'll use
+ # the `top_x` tensor here.
+ final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
+ final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
+ return final_hidden_states, router_logits
+
+
+class SkyworkDecoderLayer(nn.Module):
+ def __init__(self, config: SkyworkMoeConfig, layer_idx: int):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+
+ self.self_attn = SKYWORK_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
+
+ self.block_sparse_moe = SkyworkSparseMoeBlock(config)
+ self.input_layernorm = SkyworkRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.post_attention_layernorm = SkyworkRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ output_router_logits: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ **kwargs,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ if "padding_mask" in kwargs:
+ warnings.warn(
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
+ )
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
+ `(batch, sequence_length)` where padding elements are indicated by 0.
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_router_logits (`bool`, *optional*):
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
+ should not be returned during inference.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ """
+
+ residual = hidden_states
+
+ hidden_states = self.input_layernorm(hidden_states)
+
+ # Self Attention
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+ hidden_states = residual + hidden_states
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.post_attention_layernorm(hidden_states)
+ hidden_states, router_logits = self.block_sparse_moe(hidden_states)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ if output_router_logits:
+ outputs += (router_logits,)
+
+ return outputs
+
+
+SKYWORK_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`SkyworkMoeConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+ "The bare Skywork Model outputting raw hidden-states without any specific head on top.",
+ SKYWORK_START_DOCSTRING,
+)
+# Copied from transformers.models.mistral.modeling_mistral.MistralPreTrainedModel with Mistral->Skywork
+class SkyworkPreTrainedModel(PreTrainedModel):
+ config_class = SkyworkMoeConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["SkyworkDecoderLayer"]
+ _skip_keys_device_placement = "past_key_values"
+ _supports_flash_attn_2 = True
+ _supports_sdpa = True
+ _supports_cache_class = True
+
+ # def _init_weights(self, module):
+ # std = self.config.initializer_range
+ # if isinstance(module, nn.Linear):
+ # module.weight.data.normal_(mean=0.0, std=std)
+ # if module.bias is not None:
+ # module.bias.data.zero_()
+ # elif isinstance(module, nn.Embedding):
+ # module.weight.data.normal_(mean=0.0, std=std)
+ # if module.padding_idx is not None:
+ # module.weight.data[module.padding_idx].zero_()
+
+
+SKYWORK_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.n_positions - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ output_router_logits (`bool`, *optional*):
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
+ should not be returned during inference.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Skywork Model outputting raw hidden-states without any specific head on top.",
+ SKYWORK_START_DOCSTRING,
+)
+# Copied from transformers.models.mistral.modeling_mistral.MistralModel with MISTRAL->SKYWORK,Mistral->Skywork
+class SkyworkModel(SkyworkPreTrainedModel):
+ """
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`SkyworkDecoderLayer`]
+
+ Args:
+ config: SkyworkMoeConfig
+ """
+
+ def __init__(self, config: SkyworkMoeConfig):
+ super().__init__(config)
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
+ self.layers = nn.ModuleList(
+ [SkyworkDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
+ )
+ self._attn_implementation = config._attn_implementation
+ self.norm = SkyworkRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ # Ignore copy
+ @add_start_docstrings_to_model_forward(SKYWORK_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_router_logits: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, MoeModelOutputWithPast]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_router_logits = (
+ output_router_logits if output_router_logits is not None else False
+ )
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ batch_size, seq_length = input_ids.shape
+ elif inputs_embeds is not None:
+ batch_size, seq_length, _ = inputs_embeds.shape
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ past_key_values_length = 0
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ if use_cache:
+ use_legacy_cache = not isinstance(past_key_values, Cache)
+ if use_legacy_cache:
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
+ past_key_values_length = past_key_values.get_usable_length(seq_length)
+
+ if position_ids is None:
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+ position_ids = torch.arange(
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
+ )
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
+ else:
+ position_ids = position_ids.view(-1, seq_length).long()
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache:
+ is_padding_right = attention_mask[:, -1].sum().item() != batch_size
+ if is_padding_right:
+ raise ValueError(
+ "You are attempting to perform batched generation with padding_side='right'"
+ " this may lead to unexpected behaviour for Flash Attention version of Skywork. Make sure to "
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
+ )
+
+ if self._attn_implementation == "flash_attention_2":
+ # 2d mask is passed through the layers
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
+ elif self._attn_implementation == "sdpa" and not output_attentions:
+ # output_attentions=True can not be supported when using SDPA, and we fall back on
+ # the manual implementation that requires a 4D causal mask in all cases.
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
+ attention_mask,
+ (batch_size, seq_length),
+ inputs_embeds,
+ past_key_values_length,
+ )
+ else:
+ # 4d mask is passed through the layers
+ attention_mask = _prepare_4d_causal_attention_mask(
+ attention_mask,
+ (batch_size, seq_length),
+ inputs_embeds,
+ past_key_values_length,
+ sliding_window=self.config.sliding_window,
+ )
+
+ hidden_states = inputs_embeds
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_router_logits = () if output_router_logits else None
+ next_decoder_cache = None
+
+ for decoder_layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ position_ids,
+ past_key_values,
+ output_attentions,
+ output_router_logits,
+ use_cache,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_values,
+ output_attentions=output_attentions,
+ output_router_logits=output_router_logits,
+ use_cache=use_cache,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ if output_router_logits:
+ all_router_logits += (layer_outputs[-1],)
+
+ hidden_states = self.norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = None
+ if use_cache:
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_router_logits]
+ if v is not None
+ )
+ return MoeModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ router_logits=all_router_logits,
+ )
+
+
+class SkyworkForCausalLM(SkyworkPreTrainedModel):
+ _tied_weights_keys = ["lm_head.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.model = SkyworkModel(config)
+ self.vocab_size = config.vocab_size
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+ self.router_aux_loss_coef = 0.001
+ self.num_experts = config.num_experts[0]
+ self.num_experts_per_tok = MOE_TOP_K
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def set_decoder(self, decoder):
+ self.model = decoder
+
+ def get_decoder(self):
+ return self.model
+
+ @add_start_docstrings_to_model_forward(SKYWORK_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=MoeCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ # Ignore copy
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_router_logits: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, MoeCausalLMOutputWithPast]:
+ r"""
+ Args:
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, SkyworkForCausalLM
+
+ >>> model = SkyworkForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
+
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
+
+ >>> # Generate
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_router_logits = (
+ output_router_logits if output_router_logits is not None else False
+ )
+
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ output_router_logits=output_router_logits,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ logits = self.lm_head(hidden_states)
+ logits = logits.float()
+
+ loss = None
+ if labels is not None:
+ # Shift so that tokens < n predict n
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
+ shift_labels = shift_labels.view(-1)
+ # Enable model parallelism
+ shift_labels = shift_labels.to(shift_logits.device)
+ loss = loss_fct(shift_logits, shift_labels)
+
+ aux_loss = None
+ if output_router_logits:
+ aux_loss = load_balancing_loss_func(
+ outputs.router_logits if return_dict else outputs[-1], self.num_experts, self.num_experts_per_tok
+ )
+ if labels is not None:
+ loss += self.router_aux_loss_coef * aux_loss
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ if output_router_logits:
+ output = (aux_loss,) + output
+ return (loss,) + output if loss is not None else output
+
+ return MoeCausalLMOutputWithPast(
+ loss=loss,
+ aux_loss=aux_loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ router_logits=outputs.router_logits,
+ )
+
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
+ ):
+ # Omit tokens covered by past_key_values
+ if past_key_values is not None:
+ if isinstance(past_key_values, Cache):
+ cache_length = past_key_values.get_seq_length()
+ past_length = past_key_values.seen_tokens
+ max_cache_length = past_key_values.get_max_length()
+ else:
+ cache_length = past_length = past_key_values[0][0].shape[2]
+ max_cache_length = None
+
+ # Keep only the unprocessed tokens:
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
+ # some of the inputs are exclusivelly passed as part of the cache (e.g. when passing input_embeds as
+ # input)
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
+ # input_ids based on the past_length.
+ elif past_length < input_ids.shape[1]:
+ input_ids = input_ids[:, past_length:]
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
+
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
+ if (
+ max_cache_length is not None
+ and attention_mask is not None
+ and cache_length + input_ids.shape[1] > max_cache_length
+ ):
+ attention_mask = attention_mask[:, -max_cache_length:]
+
+ position_ids = kwargs.get("position_ids", None)
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if past_key_values:
+ position_ids = position_ids[:, -input_ids.shape[1] :]
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and past_key_values is None:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ model_inputs = {"input_ids": input_ids}
+
+ model_inputs.update(
+ {
+ "position_ids": position_ids,
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ "attention_mask": attention_mask,
+ }
+ )
+ return model_inputs
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
+
+
+@add_start_docstrings(
+ """
+ The Skywork Model transformer with a sequence classification head on top (linear layer).
+
+ [`SkyworkForSequenceClassification`] uses the last token in order to do the classification, as other causal models
+ (e.g. GPT-2) do.
+
+ Since it does classification on the last token, it requires to know the position of the last token. If a
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
+ each row of the batch).
+ """,
+ SKYWORK_START_DOCSTRING,
+)
+# Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Skywork, LLAMA->SKYWORK
+class SkyworkForSequenceClassification(SkyworkPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.model = SkyworkModel(config)
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(SKYWORK_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+
+ if input_ids is not None:
+ batch_size = input_ids.shape[0]
+ else:
+ batch_size = inputs_embeds.shape[0]
+
+ if self.config.pad_token_id is None and batch_size != 1:
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
+ sequence_lengths = sequence_lengths.to(logits.device)
+ else:
+ sequence_lengths = -1
+
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
+
+ loss = None
+ if labels is not None:
+ labels = labels.to(logits.device)
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(pooled_logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(pooled_logits, labels)
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutputWithPast(
+ loss=loss,
+ logits=pooled_logits,
+ past_key_values=transformer_outputs.past_key_values,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
diff --git a/pytorch_model-00001-of-00053.bin b/pytorch_model-00001-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..2e16d086a0e2ca46a94c83edec06be3e92350dc9
--- /dev/null
+++ b/pytorch_model-00001-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a22af93ce5144d22d39fc08896e24066aa3aa4454f929810b962f5bfeeafe48f
+size 5606012214
diff --git a/pytorch_model-00002-of-00053.bin b/pytorch_model-00002-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..e66f24abe60a8ab0b0817560ea400d0833802529
--- /dev/null
+++ b/pytorch_model-00002-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b80c780ce008698f4082cf51fecd1f1a41a33d52b782e21f7d8e011f26c3a3f2
+size 5606012214
diff --git a/pytorch_model-00003-of-00053.bin b/pytorch_model-00003-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..126bfcb4c994d21b6a25031bf4a6e975379566ba
--- /dev/null
+++ b/pytorch_model-00003-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f4a7abe80a53eafa1a7e3231188e56941509f7b72a39c82f1ce24fbb1a15bf0b
+size 5606012214
diff --git a/pytorch_model-00004-of-00053.bin b/pytorch_model-00004-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c82350601896bbde8d63f9d89162f41caee66cc6
--- /dev/null
+++ b/pytorch_model-00004-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4e6db7100f0932219e3546d27fb5308e6023d9591d4fe9aed8ecb537ba828302
+size 5606012214
diff --git a/pytorch_model-00005-of-00053.bin b/pytorch_model-00005-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..5193447fac13b7f2057f007e182001c9243c02f0
--- /dev/null
+++ b/pytorch_model-00005-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4380526ea334dbe7e58077abad9b70bd88532e51452b67bb375fa57c15a64ffe
+size 5606012214
diff --git a/pytorch_model-00006-of-00053.bin b/pytorch_model-00006-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..439b4261afe8ba5ed54b05de4fa0f148d1ea04eb
--- /dev/null
+++ b/pytorch_model-00006-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f2e4311e8a1659e3c6d09ca70052ac2e425242d7fa6a22cbefde6d08f385bb53
+size 5606012214
diff --git a/pytorch_model-00007-of-00053.bin b/pytorch_model-00007-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..539e07c7e947b20633f73a07a2687ed281b81674
--- /dev/null
+++ b/pytorch_model-00007-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a29e4b9c28c9c2322a6cd5ea6f75cbac3f6b1d5608495f8e3cbdc399aec44dc6
+size 5606012214
diff --git a/pytorch_model-00008-of-00053.bin b/pytorch_model-00008-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..a6cbba69f9bdb7917a9bf1fa2f38ca047846d68f
--- /dev/null
+++ b/pytorch_model-00008-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a0873082ca9658dac968cbc3ad419e3fc7ebbf9ec6e4eb25c96c66ad7c564046
+size 5606012214
diff --git a/pytorch_model-00009-of-00053.bin b/pytorch_model-00009-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..003fe1233ca779836c0f7e84e3f9c84705b0c075
--- /dev/null
+++ b/pytorch_model-00009-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e24d75f23c3f0a105d545d19bb6b9c07dd15a1e96702f4a61d9fd5bf54b6055c
+size 5606012214
diff --git a/pytorch_model-00010-of-00053.bin b/pytorch_model-00010-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..4080c573cb5839e4a820f3163b5dbb243edcab3d
--- /dev/null
+++ b/pytorch_model-00010-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:60447e9efe1ef8317158f95be43ce20dd2030db0fb1133ac52109c4fedb2ae3a
+size 5606012214
diff --git a/pytorch_model-00011-of-00053.bin b/pytorch_model-00011-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..a8226458edb7ce89d0433000c56f566b2e82cc6f
--- /dev/null
+++ b/pytorch_model-00011-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20320ec02bf3cec1a0905ed7324deb811fe379c5225eeb4c7423b62f85266be
+size 5606012278
diff --git a/pytorch_model-00012-of-00053.bin b/pytorch_model-00012-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..4745ea7cca0bbb423ba5e9a17d7453293576f72b
--- /dev/null
+++ b/pytorch_model-00012-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:291dcb77667bf3ee689f2670762cf953736280ebf2f6e92f4c7d41594d5820c7
+size 5606012278
diff --git a/pytorch_model-00013-of-00053.bin b/pytorch_model-00013-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..4ff5feb2acd3d062e32eccc67581cb445a37d882
--- /dev/null
+++ b/pytorch_model-00013-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cec07f32ce4b620fdc0489b5bfd226a7bfbb216566c7b20585a0038536f638eb
+size 5606012278
diff --git a/pytorch_model-00014-of-00053.bin b/pytorch_model-00014-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..80d1150aa9b0fd8b6df865f4e1c446893871486e
--- /dev/null
+++ b/pytorch_model-00014-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3e498e08211cc8a582c5cc4880fe843961f539c6c3bb9c7594cf2ed1e02725d4
+size 5606012278
diff --git a/pytorch_model-00015-of-00053.bin b/pytorch_model-00015-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..07443f9e60f976e66f0a3a50cd4dfd9ede6187f0
--- /dev/null
+++ b/pytorch_model-00015-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f3c7726513dfe8dc3268c24b8b79f37e9205cb09df3cac30842e29aeb71c3cd8
+size 5606012278
diff --git a/pytorch_model-00016-of-00053.bin b/pytorch_model-00016-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..a399881603ce6d87686d99321f8a99733f6b94a3
--- /dev/null
+++ b/pytorch_model-00016-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:11fab816fe9a0ad46d165473b7ed053b18daa40d2ac24d469af03d7741e8a58d
+size 5606012278
diff --git a/pytorch_model-00017-of-00053.bin b/pytorch_model-00017-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..b1ccd530dba9239913b2d4adcbf837c2f97b17a0
--- /dev/null
+++ b/pytorch_model-00017-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e8fc71f3a1f8f78a473caad029233cef47d8d8e9df9f3a5c164e0ad2795d085a
+size 5606012278
diff --git a/pytorch_model-00018-of-00053.bin b/pytorch_model-00018-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..7f67a2559a8c07905c777881c93d1b35c9e68695
--- /dev/null
+++ b/pytorch_model-00018-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3d44a2ccefc63d9a7c5e2edae37e1e3d75f465bd67379649a121416c3accf651
+size 5606012278
diff --git a/pytorch_model-00019-of-00053.bin b/pytorch_model-00019-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..9fb4d817d886a0a8105aa93292f82c5ae5e6a8b5
--- /dev/null
+++ b/pytorch_model-00019-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:98ea769662b4b35d01528d2cfaa6d4094ae2ca2383027fea90098aa0e05570e8
+size 5606012278
diff --git a/pytorch_model-00020-of-00053.bin b/pytorch_model-00020-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..285b3f6d284debdaee1d69abc18179b7cea06c9c
--- /dev/null
+++ b/pytorch_model-00020-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fbf514e0d46d6f12c97e332e9a51e3a9a6f283d4693a329643eeefc8e1236802
+size 5606012278
diff --git a/pytorch_model-00021-of-00053.bin b/pytorch_model-00021-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..50a0140e92dd8e9005ff4427e5517b0e90ac448e
--- /dev/null
+++ b/pytorch_model-00021-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:286f6441449b7e5bc0d459029f24cbd22c0f4b9143ef56a49c0c99a271175070
+size 5606012278
diff --git a/pytorch_model-00022-of-00053.bin b/pytorch_model-00022-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c991cc20d62c1d21e058d5074c2564a464354852
--- /dev/null
+++ b/pytorch_model-00022-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d82db0d0271a0fcf77643bf025ffc0f471b5b6962a94065d744d0aef586a0b6a
+size 5606012278
diff --git a/pytorch_model-00023-of-00053.bin b/pytorch_model-00023-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..34ec1a030b6ac3941f93f6b5c5800c1af9c471cb
--- /dev/null
+++ b/pytorch_model-00023-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c6d31b5eec13e0cfc81690cf5a78151552a66bc1442051a48ead2d7cadd3b18b
+size 5606012278
diff --git a/pytorch_model-00024-of-00053.bin b/pytorch_model-00024-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..83f0db37dfb5841133bccd1e52a67562b76980f3
--- /dev/null
+++ b/pytorch_model-00024-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eac3fb50eb1e1496415de23a198918b2a19e28bb6045125f6f48974376494dc4
+size 5606012278
diff --git a/pytorch_model-00025-of-00053.bin b/pytorch_model-00025-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..dd728953b6cf67ed8aadb9999d5832f5ad9f4090
--- /dev/null
+++ b/pytorch_model-00025-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3a0c5a08f09c724cc6373d4cfa6b2e448040d3c913c149af43d8c7b8beb3730f
+size 5606012278
diff --git a/pytorch_model-00026-of-00053.bin b/pytorch_model-00026-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..93d7838bb3feb6cbb1840441d4a317c8ec6814d7
--- /dev/null
+++ b/pytorch_model-00026-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3a37e8a69c9ded8a1c0abefff4d3a4d71dba14252f5def086976b0e4998e6dc2
+size 5606012278
diff --git a/pytorch_model-00027-of-00053.bin b/pytorch_model-00027-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..97d01b908a8ae8e67ec729e1e240878b4e78b9e3
--- /dev/null
+++ b/pytorch_model-00027-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c5b26230f8aff06fcc78d2afc627dfff72bd70ea4861c51338c67ae1509141fb
+size 5606012278
diff --git a/pytorch_model-00028-of-00053.bin b/pytorch_model-00028-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..479ff2c208e8eb94849af164ec8463151721a75a
--- /dev/null
+++ b/pytorch_model-00028-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fb1c09a0c2be1d102e0c59c60067bdaae79861a6afba441cdd70a01b0964b84b
+size 5606012278
diff --git a/pytorch_model-00029-of-00053.bin b/pytorch_model-00029-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..273de1680a4392915e95094f6ee995d26cb16f25
--- /dev/null
+++ b/pytorch_model-00029-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:66b0e591ed6a337f7f332d4f8a4f0880a3020d0ed6db1db7f92b51a9fa7a1ca3
+size 5606012278
diff --git a/pytorch_model-00030-of-00053.bin b/pytorch_model-00030-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..a56f9b26921b57e8bb93737bf01b279f4140acb9
--- /dev/null
+++ b/pytorch_model-00030-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bd82d4a5905bb08d96417d4ef91063738f5368ed4053186ab8f44c2baf316612
+size 5606012278
diff --git a/pytorch_model-00031-of-00053.bin b/pytorch_model-00031-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..006d24d898c3aae300a8bf690e66519e6835030a
--- /dev/null
+++ b/pytorch_model-00031-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e16d1611822e21e228afa5ae410eb04456355b9b175333cc1903c16da60f7111
+size 5606012278
diff --git a/pytorch_model-00032-of-00053.bin b/pytorch_model-00032-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..ca99a584c7040203e257df6a6a93ab586b1b2ac3
--- /dev/null
+++ b/pytorch_model-00032-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3b92145427a08ca63ac63d3c7d61e6493f5a7b642ff940f2a4ea92e7a02f4a29
+size 5606012278
diff --git a/pytorch_model-00033-of-00053.bin b/pytorch_model-00033-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..621e0da1700a2cb33203c01b2d70ed5943df1927
--- /dev/null
+++ b/pytorch_model-00033-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d7d0ef57993eba2ed75d66d9bb49e43bb669eb7dd67e77eddc122f4832cd95e3
+size 5606012278
diff --git a/pytorch_model-00034-of-00053.bin b/pytorch_model-00034-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..e3e1a1b9b6f0705dbfbd976aa770c3008004e992
--- /dev/null
+++ b/pytorch_model-00034-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:272a3cfafea1857cb4d1b6e5fa306f1e3cdb97caf9b287adea8b622c4ac10d09
+size 5606012278
diff --git a/pytorch_model-00035-of-00053.bin b/pytorch_model-00035-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c346656d1fa4c24f2a44574bae3c1674a2ca986d
--- /dev/null
+++ b/pytorch_model-00035-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4c7f13eae184e4a98193be2d5dd20bfdd636c12987869969829398a684ba2f26
+size 5606012278
diff --git a/pytorch_model-00036-of-00053.bin b/pytorch_model-00036-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..8abe94584a40bdf7a1b9310485674c73ceeba2a7
--- /dev/null
+++ b/pytorch_model-00036-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:33cde811a94ed1e15074439fe145206918d35ca5240ed2768aa79137b52f1ddb
+size 5606012278
diff --git a/pytorch_model-00037-of-00053.bin b/pytorch_model-00037-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..6c6c7f26073b599ecca90aa8f0ab11a400990632
--- /dev/null
+++ b/pytorch_model-00037-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:793c3ba7957c68e1763fe1b0aea616cc05868d555cabf7e4cda8165f5b93f719
+size 5606012278
diff --git a/pytorch_model-00038-of-00053.bin b/pytorch_model-00038-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..58030995099c6670c811941958f722d5da14f1a5
--- /dev/null
+++ b/pytorch_model-00038-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1d3d216014beb54c40db35235bd60285d611a58e6726bd2e9d14a5771310dc66
+size 5606012278
diff --git a/pytorch_model-00039-of-00053.bin b/pytorch_model-00039-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..cdab1fcf8c49bdd2d68c2ef80bfe87a95b529ac0
--- /dev/null
+++ b/pytorch_model-00039-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:41e45051d4e883163bed6559aec96ddc95c446050f0b4262bdcb751ed3b3e92e
+size 5606012278
diff --git a/pytorch_model-00040-of-00053.bin b/pytorch_model-00040-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..8902d7c9140e5294085c1fb826e1a139977261ee
--- /dev/null
+++ b/pytorch_model-00040-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:19bc57f59a16a571ea5583b90c16172709cd3f37e02ba1799fa4b28c44739f5c
+size 5606012278
diff --git a/pytorch_model-00041-of-00053.bin b/pytorch_model-00041-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..591e1d045888d946b4e303ae30304d30bddddff1
--- /dev/null
+++ b/pytorch_model-00041-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9f3d8770e8cb3723537e1c0826009cd6b71d58497974d4f8c8f876dfe1e26dee
+size 5606012278
diff --git a/pytorch_model-00042-of-00053.bin b/pytorch_model-00042-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..60a33d1c9b1dbdfc0a2deb67382c25880d5b1160
--- /dev/null
+++ b/pytorch_model-00042-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c75613dc3754db1c596a6f6477497cbffb6962f22fcbf5ead458cc9f944f76d1
+size 5606012278
diff --git a/pytorch_model-00043-of-00053.bin b/pytorch_model-00043-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..b6feb4ee4c8d6939b56c0f4f99214322870c1823
--- /dev/null
+++ b/pytorch_model-00043-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:138586728295924720ab003140e58b73db608fb11d31fe142fd244dae72c842a
+size 5606012278
diff --git a/pytorch_model-00044-of-00053.bin b/pytorch_model-00044-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..768bdf623727e0b7c423cc76b4d067ba349eaa17
--- /dev/null
+++ b/pytorch_model-00044-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4dc46ef942b8ecb37ef8b1d81ea5ac12c3812bdc7d6f5e7d38a9d4701cb75ef2
+size 5606012278
diff --git a/pytorch_model-00045-of-00053.bin b/pytorch_model-00045-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..ba7bee4de37800c46b018363786c9d26c8d6484d
--- /dev/null
+++ b/pytorch_model-00045-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c68200d9abc50973203368892203dc59b2b19c95011a964e6ff2ece66f9e2c5e
+size 5606012278
diff --git a/pytorch_model-00046-of-00053.bin b/pytorch_model-00046-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..b239a4204fa99deb212975f85168dd9b78d72bbc
--- /dev/null
+++ b/pytorch_model-00046-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1e4e8ad5990cc662e2a35388cf697cb293de6d870d24134bf7e701ffcad36313
+size 5606012278
diff --git a/pytorch_model-00047-of-00053.bin b/pytorch_model-00047-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..6168f1b2532f16ca8c17402220a9da86019d010d
--- /dev/null
+++ b/pytorch_model-00047-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0ca4149026f17f198ec893dc3cef540a9058aff3b5d2eb1eec691880588f1261
+size 5606012278
diff --git a/pytorch_model-00048-of-00053.bin b/pytorch_model-00048-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..eff0d927d5af46cacde7cb56459becab61b421e6
--- /dev/null
+++ b/pytorch_model-00048-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4e6b2221341dfc883ea8823abf03d411f270d6a58f3fb44e6c365b8e743816bd
+size 5606012278
diff --git a/pytorch_model-00049-of-00053.bin b/pytorch_model-00049-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..6f572722bde9a03712d0211471a33b65cdf86139
--- /dev/null
+++ b/pytorch_model-00049-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bcfe3ac85fc1a260bf6e78ef75d5765e7cc7c3cb6aa67589dffd9188375abb9b
+size 5606012278
diff --git a/pytorch_model-00050-of-00053.bin b/pytorch_model-00050-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..6b52e3df71db6188a32df82436697c848074ba7c
--- /dev/null
+++ b/pytorch_model-00050-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f9aec6b0414e4d09ecbaa7e473f3306459522fcf1f9f95c673f1526bcb7742b2
+size 5606012278
diff --git a/pytorch_model-00051-of-00053.bin b/pytorch_model-00051-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..1aed3a9e2c9699b6e96190f7b30b9b392a2c83dd
--- /dev/null
+++ b/pytorch_model-00051-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3c87980f790ae04e3626f3d71bfde67aa98149b381712545aea6ec8d7d3cd0da
+size 5606012278
diff --git a/pytorch_model-00052-of-00053.bin b/pytorch_model-00052-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..b4807ecc7a094d9a293ae6564c93146f06875746
--- /dev/null
+++ b/pytorch_model-00052-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3650819371731afc44712f474bc1412f1c87f1f0cb14a1db1fbd584e822314f6
+size 5606012278
diff --git a/pytorch_model-00053-of-00053.bin b/pytorch_model-00053-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..df97e654aa4527ea51f839aa16cdc397cd7797d9
--- /dev/null
+++ b/pytorch_model-00053-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:67a658f43bdcb145ccb4ef8ecd14aa0a8e7c72f2ccffcff0267b6b2331daf597
+size 1207896999
diff --git a/pytorch_model.bin.index.json b/pytorch_model.bin.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..4934ee2ab567f39c2a30d11deb179ba5f9ddc307
--- /dev/null
+++ b/pytorch_model.bin.index.json
@@ -0,0 +1 @@
+{"metadata": {"total_size": 292719941632}, "weight_map": {"model.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.self_attn.o_proj.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.gate.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00001-of-00053.bin", "model.layers.1.input_layernorm.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.post_attention_layernorm.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.self_attn.q_proj.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.self_attn.k_proj.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.self_attn.v_proj.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.self_attn.o_proj.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.gate.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00002-of-00053.bin", "model.layers.2.input_layernorm.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.post_attention_layernorm.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.self_attn.q_proj.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.self_attn.k_proj.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.self_attn.v_proj.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.self_attn.o_proj.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.gate.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00003-of-00053.bin", "model.layers.3.input_layernorm.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.post_attention_layernorm.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.self_attn.q_proj.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.self_attn.k_proj.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.self_attn.v_proj.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.self_attn.o_proj.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.self_attn.rotary_emb.inv_freq": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.gate.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00004-of-00053.bin", "model.layers.4.input_layernorm.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.post_attention_layernorm.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.self_attn.q_proj.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.self_attn.k_proj.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.self_attn.v_proj.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.self_attn.o_proj.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.self_attn.rotary_emb.inv_freq": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.gate.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00005-of-00053.bin", "model.layers.5.input_layernorm.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.post_attention_layernorm.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.self_attn.q_proj.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.self_attn.k_proj.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.self_attn.v_proj.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.self_attn.o_proj.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.self_attn.rotary_emb.inv_freq": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.gate.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00006-of-00053.bin", "model.layers.6.input_layernorm.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.post_attention_layernorm.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.self_attn.q_proj.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.self_attn.k_proj.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.self_attn.v_proj.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.self_attn.o_proj.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.self_attn.rotary_emb.inv_freq": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.gate.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00007-of-00053.bin", "model.layers.7.input_layernorm.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.post_attention_layernorm.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.self_attn.q_proj.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.self_attn.k_proj.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.self_attn.v_proj.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.self_attn.o_proj.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.self_attn.rotary_emb.inv_freq": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.gate.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00008-of-00053.bin", "model.layers.8.input_layernorm.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.post_attention_layernorm.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.self_attn.q_proj.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.self_attn.k_proj.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.self_attn.v_proj.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.self_attn.o_proj.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.self_attn.rotary_emb.inv_freq": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.gate.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00009-of-00053.bin", "model.layers.9.input_layernorm.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.post_attention_layernorm.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.self_attn.q_proj.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.self_attn.k_proj.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.self_attn.v_proj.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.self_attn.o_proj.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.self_attn.rotary_emb.inv_freq": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.gate.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00010-of-00053.bin", "model.layers.10.input_layernorm.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.post_attention_layernorm.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.self_attn.q_proj.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.self_attn.k_proj.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.self_attn.v_proj.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.self_attn.o_proj.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.self_attn.rotary_emb.inv_freq": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.gate.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00011-of-00053.bin", "model.layers.11.input_layernorm.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.post_attention_layernorm.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.self_attn.q_proj.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.self_attn.k_proj.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.self_attn.v_proj.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.self_attn.o_proj.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.self_attn.rotary_emb.inv_freq": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.gate.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00012-of-00053.bin", "model.layers.12.input_layernorm.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.post_attention_layernorm.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.self_attn.q_proj.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.self_attn.k_proj.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.self_attn.v_proj.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.self_attn.o_proj.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.self_attn.rotary_emb.inv_freq": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.gate.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00013-of-00053.bin", "model.layers.13.input_layernorm.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.post_attention_layernorm.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.self_attn.q_proj.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.self_attn.k_proj.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.self_attn.v_proj.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.self_attn.o_proj.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.self_attn.rotary_emb.inv_freq": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.gate.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00014-of-00053.bin", "model.layers.14.input_layernorm.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.post_attention_layernorm.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.self_attn.q_proj.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.self_attn.k_proj.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.self_attn.v_proj.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.self_attn.o_proj.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.self_attn.rotary_emb.inv_freq": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.gate.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00015-of-00053.bin", "model.layers.15.input_layernorm.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.post_attention_layernorm.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.self_attn.q_proj.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.self_attn.k_proj.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.self_attn.v_proj.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.self_attn.o_proj.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.self_attn.rotary_emb.inv_freq": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.gate.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00016-of-00053.bin", "model.layers.16.input_layernorm.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.post_attention_layernorm.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.self_attn.q_proj.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.self_attn.k_proj.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.self_attn.v_proj.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.self_attn.o_proj.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.self_attn.rotary_emb.inv_freq": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.gate.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00017-of-00053.bin", "model.layers.17.input_layernorm.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.post_attention_layernorm.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.self_attn.q_proj.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.self_attn.k_proj.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.self_attn.v_proj.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.self_attn.o_proj.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.self_attn.rotary_emb.inv_freq": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.gate.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00018-of-00053.bin", "model.layers.18.input_layernorm.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.post_attention_layernorm.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.self_attn.q_proj.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.self_attn.k_proj.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.self_attn.v_proj.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.self_attn.o_proj.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.self_attn.rotary_emb.inv_freq": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.gate.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00019-of-00053.bin", "model.layers.19.input_layernorm.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.post_attention_layernorm.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.self_attn.q_proj.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.self_attn.k_proj.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.self_attn.v_proj.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.self_attn.o_proj.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.self_attn.rotary_emb.inv_freq": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.gate.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00020-of-00053.bin", "model.layers.20.input_layernorm.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.post_attention_layernorm.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.self_attn.q_proj.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.self_attn.k_proj.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.self_attn.v_proj.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.self_attn.o_proj.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.self_attn.rotary_emb.inv_freq": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.gate.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00021-of-00053.bin", "model.layers.21.input_layernorm.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.post_attention_layernorm.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.self_attn.q_proj.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.self_attn.k_proj.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.self_attn.v_proj.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.self_attn.o_proj.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.self_attn.rotary_emb.inv_freq": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.gate.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00022-of-00053.bin", "model.layers.22.input_layernorm.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.post_attention_layernorm.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.self_attn.q_proj.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.self_attn.k_proj.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.self_attn.v_proj.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.self_attn.o_proj.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.self_attn.rotary_emb.inv_freq": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.gate.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00023-of-00053.bin", "model.layers.23.input_layernorm.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.post_attention_layernorm.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.self_attn.q_proj.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.self_attn.k_proj.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.self_attn.v_proj.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.self_attn.o_proj.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.self_attn.rotary_emb.inv_freq": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.gate.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00024-of-00053.bin", "model.layers.24.input_layernorm.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.post_attention_layernorm.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.self_attn.q_proj.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.self_attn.k_proj.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.self_attn.v_proj.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.self_attn.o_proj.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.self_attn.rotary_emb.inv_freq": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.gate.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00025-of-00053.bin", "model.layers.25.input_layernorm.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.post_attention_layernorm.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.self_attn.q_proj.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.self_attn.k_proj.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.self_attn.v_proj.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.self_attn.o_proj.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.self_attn.rotary_emb.inv_freq": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.gate.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00026-of-00053.bin", "model.layers.26.input_layernorm.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.post_attention_layernorm.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.self_attn.q_proj.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.self_attn.k_proj.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.self_attn.v_proj.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.self_attn.o_proj.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.self_attn.rotary_emb.inv_freq": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.gate.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00027-of-00053.bin", "model.layers.27.input_layernorm.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.post_attention_layernorm.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.self_attn.q_proj.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.self_attn.k_proj.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.self_attn.v_proj.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.self_attn.o_proj.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.self_attn.rotary_emb.inv_freq": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.gate.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00028-of-00053.bin", "model.layers.28.input_layernorm.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.post_attention_layernorm.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.self_attn.q_proj.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.self_attn.k_proj.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.self_attn.v_proj.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.self_attn.o_proj.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.self_attn.rotary_emb.inv_freq": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.gate.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00029-of-00053.bin", "model.layers.29.input_layernorm.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.post_attention_layernorm.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.self_attn.q_proj.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.self_attn.k_proj.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.self_attn.v_proj.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.self_attn.o_proj.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.self_attn.rotary_emb.inv_freq": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.gate.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00030-of-00053.bin", "model.layers.30.input_layernorm.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.post_attention_layernorm.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.self_attn.q_proj.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.self_attn.k_proj.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.self_attn.v_proj.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.self_attn.o_proj.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.self_attn.rotary_emb.inv_freq": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.gate.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00031-of-00053.bin", "model.layers.31.input_layernorm.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.post_attention_layernorm.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.self_attn.q_proj.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.self_attn.k_proj.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.self_attn.v_proj.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.self_attn.o_proj.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.self_attn.rotary_emb.inv_freq": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.gate.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00032-of-00053.bin", "model.layers.32.input_layernorm.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.post_attention_layernorm.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.self_attn.q_proj.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.self_attn.k_proj.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.self_attn.v_proj.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.self_attn.o_proj.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.self_attn.rotary_emb.inv_freq": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.gate.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00033-of-00053.bin", "model.layers.33.input_layernorm.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.post_attention_layernorm.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.self_attn.q_proj.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.self_attn.k_proj.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.self_attn.v_proj.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.self_attn.o_proj.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.self_attn.rotary_emb.inv_freq": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.gate.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00034-of-00053.bin", "model.layers.34.input_layernorm.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.post_attention_layernorm.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.self_attn.q_proj.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.self_attn.k_proj.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.self_attn.v_proj.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.self_attn.o_proj.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.self_attn.rotary_emb.inv_freq": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.gate.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00035-of-00053.bin", "model.layers.35.input_layernorm.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.post_attention_layernorm.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.self_attn.q_proj.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.self_attn.k_proj.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.self_attn.v_proj.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.self_attn.o_proj.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.self_attn.rotary_emb.inv_freq": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.gate.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00036-of-00053.bin", "model.layers.36.input_layernorm.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.post_attention_layernorm.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.self_attn.q_proj.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.self_attn.k_proj.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.self_attn.v_proj.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.self_attn.o_proj.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.self_attn.rotary_emb.inv_freq": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.gate.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00037-of-00053.bin", "model.layers.37.input_layernorm.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.post_attention_layernorm.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.self_attn.q_proj.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.self_attn.k_proj.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.self_attn.v_proj.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.self_attn.o_proj.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.self_attn.rotary_emb.inv_freq": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.gate.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00038-of-00053.bin", "model.layers.38.input_layernorm.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.post_attention_layernorm.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.self_attn.q_proj.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.self_attn.k_proj.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.self_attn.v_proj.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.self_attn.o_proj.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.self_attn.rotary_emb.inv_freq": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.gate.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00039-of-00053.bin", "model.layers.39.input_layernorm.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.post_attention_layernorm.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.self_attn.q_proj.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.self_attn.k_proj.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.self_attn.v_proj.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.self_attn.o_proj.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.self_attn.rotary_emb.inv_freq": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.gate.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00040-of-00053.bin", "model.layers.40.input_layernorm.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.post_attention_layernorm.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.self_attn.q_proj.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.self_attn.k_proj.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.self_attn.v_proj.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.self_attn.o_proj.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.self_attn.rotary_emb.inv_freq": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.gate.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00041-of-00053.bin", "model.layers.41.input_layernorm.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.post_attention_layernorm.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.self_attn.q_proj.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.self_attn.k_proj.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.self_attn.v_proj.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.self_attn.o_proj.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.self_attn.rotary_emb.inv_freq": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.gate.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00042-of-00053.bin", "model.layers.42.input_layernorm.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.post_attention_layernorm.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.self_attn.q_proj.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.self_attn.k_proj.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.self_attn.v_proj.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.self_attn.o_proj.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.self_attn.rotary_emb.inv_freq": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.gate.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00043-of-00053.bin", "model.layers.43.input_layernorm.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.post_attention_layernorm.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.self_attn.q_proj.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.self_attn.k_proj.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.self_attn.v_proj.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.self_attn.o_proj.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.self_attn.rotary_emb.inv_freq": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.gate.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00044-of-00053.bin", "model.layers.44.input_layernorm.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.post_attention_layernorm.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.self_attn.q_proj.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.self_attn.k_proj.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.self_attn.v_proj.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.self_attn.o_proj.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.self_attn.rotary_emb.inv_freq": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.gate.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00045-of-00053.bin", "model.layers.45.input_layernorm.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.post_attention_layernorm.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.self_attn.q_proj.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.self_attn.k_proj.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.self_attn.v_proj.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.self_attn.o_proj.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.self_attn.rotary_emb.inv_freq": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.gate.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00046-of-00053.bin", "model.layers.46.input_layernorm.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.post_attention_layernorm.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.self_attn.q_proj.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.self_attn.k_proj.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.self_attn.v_proj.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.self_attn.o_proj.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.self_attn.rotary_emb.inv_freq": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.gate.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00047-of-00053.bin", "model.layers.47.input_layernorm.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.post_attention_layernorm.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.self_attn.q_proj.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.self_attn.k_proj.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.self_attn.v_proj.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.self_attn.o_proj.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.self_attn.rotary_emb.inv_freq": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.gate.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00048-of-00053.bin", "model.layers.48.input_layernorm.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.post_attention_layernorm.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.self_attn.q_proj.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.self_attn.k_proj.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.self_attn.v_proj.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.self_attn.o_proj.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.self_attn.rotary_emb.inv_freq": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.gate.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00049-of-00053.bin", "model.layers.49.input_layernorm.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.post_attention_layernorm.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.self_attn.q_proj.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.self_attn.k_proj.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.self_attn.v_proj.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.self_attn.o_proj.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.self_attn.rotary_emb.inv_freq": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.gate.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00050-of-00053.bin", "model.layers.50.input_layernorm.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.post_attention_layernorm.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.self_attn.q_proj.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.self_attn.k_proj.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.self_attn.v_proj.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.self_attn.o_proj.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.self_attn.rotary_emb.inv_freq": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.gate.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00051-of-00053.bin", "model.layers.51.input_layernorm.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.post_attention_layernorm.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.self_attn.q_proj.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.self_attn.k_proj.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.self_attn.v_proj.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.self_attn.o_proj.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.self_attn.rotary_emb.inv_freq": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.gate.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.3.w1.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.3.w3.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.3.w2.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.4.w1.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.4.w3.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.4.w2.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.5.w1.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.5.w3.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.5.w2.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.6.w1.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.6.w3.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.6.w2.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.7.w1.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.7.w3.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.7.w2.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.8.w1.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.8.w3.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.8.w2.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.9.w1.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.9.w3.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.9.w2.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.10.w1.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.10.w3.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.10.w2.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.11.w1.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.11.w3.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.11.w2.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.12.w1.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.12.w3.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.12.w2.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.13.w1.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.13.w3.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.13.w2.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.14.w1.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.14.w3.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.14.w2.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.15.w1.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.15.w3.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.block_sparse_moe.experts.15.w2.weight": "pytorch_model-00052-of-00053.bin", "model.norm.weight": "pytorch_model-00053-of-00053.bin", "model.embed_tokens.weight": "pytorch_model-00053-of-00053.bin", "lm_head.weight": "pytorch_model-00053-of-00053.bin"}}
\ No newline at end of file
diff --git a/special_tokens_map.json b/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..ae05f207592fd52613c259d3a02ce868d5967284
--- /dev/null
+++ b/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ }
+ }
\ No newline at end of file
diff --git a/tokenization_skywork.py b/tokenization_skywork.py
new file mode 100644
index 0000000000000000000000000000000000000000..4897a51da746a357961619d8d9d074c5bfc28f4b
--- /dev/null
+++ b/tokenization_skywork.py
@@ -0,0 +1,250 @@
+# Copyright (c) SkyworkAI and the HuggingFace Inc. team. All rights reserved.
+# This code is built upon Huggingface's transformers repository.
+
+"""Tokenization classes for Skywork."""
+import os
+from shutil import copyfile
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
+
+import sentencepiece as spm
+
+from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
+from transformers.utils import logging
+
+if TYPE_CHECKING:
+ from transformers.pipelines.conversational import Conversation
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
+
+
+SPIECE_UNDERLINE = "▁"
+
+B_INST, E_INST = "[INST]", "[/INST]"
+B_SYS, E_SYS = "<>\n", "\n<>\n\n"
+
+DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure\
+that your responses are socially unbiased and positive in nature.
+
+If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."""
+
+class SkyworkTokenizer(PreTrainedTokenizer):
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ # pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ # max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ unk_token="",
+ bos_token="",
+ eos_token="",
+ pad_token=None,
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ add_bos_token=True,
+ add_eos_token=False,
+ clean_up_tokenization_spaces=False,
+ legacy=True,
+ **kwargs,
+ ):
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
+ self.legacy = legacy
+ self.vocab_file = vocab_file
+ self.add_bos_token = add_bos_token
+ self.add_eos_token = add_eos_token
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(vocab_file)
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ add_bos_token=add_bos_token,
+ add_eos_token=add_eos_token,
+ additional_special_tokens=["[USER]", "[BOT]", "[SEP]"],
+ sp_model_kwargs=self.sp_model_kwargs,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ legacy=legacy,
+ **kwargs,
+ )
+ if legacy:
+ logger.warning_once(
+ f"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. "
+ )
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
+
+ @property
+ def vocab_size(self):
+ """Returns vocab size"""
+ return self.sp_model.get_piece_size()
+
+ def get_vocab(self):
+ """Returns vocab as a dict"""
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize
+ def tokenize(self, text, **kwargs) -> List[str]:
+ # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
+ # the beginning of the text
+ if not self.legacy:
+ text = SPIECE_UNDERLINE + text.replace(SPIECE_UNDERLINE, " ")
+ return super().tokenize(text, **kwargs)
+
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize
+ def _tokenize(self, text):
+ if not self.legacy:
+ is_first = text.startswith(SPIECE_UNDERLINE)
+ if is_first:
+ text = text[1:]
+
+ tokens = self.sp_model.encode(text, out_type=str)
+
+ if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(SPIECE_UNDERLINE):
+ tokens = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
+ return tokens
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.sp_model.piece_to_id(token)
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ token = self.sp_model.IdToPiece(index)
+ return token
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ current_sub_tokens = []
+ out_string = ""
+ prev_is_special = False
+ for i, token in enumerate(tokens):
+ # make sure that special tokens are not decoded using sentencepiece model
+ if token in self.all_special_tokens:
+ if not prev_is_special and i != 0:
+ out_string += " "
+ out_string += self.sp_model.decode(current_sub_tokens) + token
+ prev_is_special = True
+ current_sub_tokens = []
+ else:
+ current_sub_tokens.append(token)
+ prev_is_special = False
+ out_string += self.sp_model.decode(current_sub_tokens)
+ return out_string
+
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
+
+ output = bos_token_id + token_ids_0 + eos_token_id
+
+ if token_ids_1 is not None:
+ output = output + bos_token_id + token_ids_1 + eos_token_id
+
+ return output
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ bos_token_id = [1] if self.add_bos_token else []
+ eos_token_id = [1] if self.add_eos_token else []
+
+ if token_ids_1 is None:
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
+ return (
+ bos_token_id
+ + ([0] * len(token_ids_0))
+ + eos_token_id
+ + bos_token_id
+ + ([0] * len(token_ids_1))
+ + eos_token_id
+ )
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
+
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
+
+ if token_ids_1 is not None:
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
+
+ return output
+
+ def _build_conversation_input_ids(self, conversation: "Conversation") -> List[int]:
+ dialogue = list(conversation.iter_texts())
+ if not all([is_user for is_user, msg in dialogue[::2]]) or not all(
+ [not is_user for is_user, msg in dialogue[1::2]]
+ ):
+ raise ValueError(
+ "The model only supports 'user' and 'assistant' roles, starting with user and alternating (u/a/u/a/u...)"
+ )
+
+ dialog_tokens: List[int] = []
+ if len(conversation.past_user_inputs) > 0:
+ if not conversation.past_user_inputs[0].startswith(B_SYS) or E_SYS not in conversation.past_user_inputs[0]:
+ conversation.past_user_inputs[0] = (
+ B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + conversation.past_user_inputs[0]
+ )
+ elif not dialogue[0][1].startswith(B_SYS) or E_SYS not in dialogue[0][1]:
+ dialogue[0] = (dialogue[0][0], B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + dialogue[0][1])
+
+ dialog_tokens += sum(
+ [
+ [self.bos_token_id]
+ + self.encode(
+ f"{B_INST} {(prompt[1]).strip()} {E_INST} {(answer[1]).strip()} ", add_special_tokens=False
+ )
+ + [self.eos_token_id]
+ for prompt, answer in zip(dialogue[::2], dialogue[1::2])
+ ],
+ [],
+ )
+ if not (dialogue[-1][0]):
+ raise ValueError(f"Last message must be from user, got {dialogue[-1]['role']}")
+ dialog_tokens += [self.bos_token_id] + self.encode(
+ f"{B_INST} {(dialogue[-1][1]).strip()} {E_INST}", add_special_tokens=False
+ )
+ return dialog_tokens
\ No newline at end of file
diff --git a/tokenizer.model b/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..decbfe220922d6a38ff52541ef3927b97fb7893e
--- /dev/null
+++ b/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:36ec9a4d6fd7cc78fbb9e4afd89fb04cba0381b08a842ca0b60826073821f594
+size 994250
diff --git a/tokenizer_config.json b/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..64bf95a90a6a7c95bc69ad1c5d043fbe3e7a1ba0
--- /dev/null
+++ b/tokenizer_config.json
@@ -0,0 +1,40 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "bos_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "clean_up_tokenization_spaces": false,
+ "eos_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "legacy": true,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": null,
+ "sp_model_kwargs": {},
+ "tokenizer_class": "SkyworkTokenizer",
+ "unk_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "auto_map": {
+ "AutoTokenizer": [
+ "tokenization_skywork.SkyworkTokenizer",
+ null
+ ]
+ }
+ }
\ No newline at end of file