|
--- |
|
library_name: transformers |
|
license: apache-2.0 |
|
base_model: Qwen/Qwen2.5-14B |
|
tags: |
|
- generated_from_trainer |
|
model-index: |
|
- name: EVA-Qwen2.5-14B-SFFT-v0.2 |
|
results: [] |
|
--- |
|
|
|
<!-- This model card has been generated automatically according to the information the Trainer had access to. You |
|
should probably proofread and complete it, then remove this comment. --> |
|
|
|
[<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl) |
|
<details><summary>See axolotl config</summary> |
|
|
|
axolotl version: `0.4.1` |
|
```yaml |
|
base_model: Qwen/Qwen2.5-14B |
|
|
|
load_in_8bit: false |
|
load_in_4bit: false |
|
strict: false |
|
|
|
plugins: |
|
- axolotl.integrations.liger.LigerPlugin |
|
liger_rope: true |
|
liger_rms_norm: true |
|
liger_swiglu: true |
|
liger_fused_linear_cross_entropy: true |
|
|
|
# plugins: |
|
# - axolotl.integrations.spectrum.SpectrumPlugin |
|
|
|
# spectrum_top_fraction: 0.5 |
|
# # Optional if using a pre-scanned model as your base_model. Useful if using a model mirror |
|
# spectrum_model_name: Qwen/Qwen2.5-32B |
|
|
|
datasets: |
|
- path: datasets/Celeste_Filtered_utf8fix.jsonl |
|
type: sharegpt |
|
- path: datasets/deduped_not_samantha_norefusals.jsonl |
|
type: sharegpt |
|
- path: datasets/deduped_SynthRP-Gens_processed_ShareGPT_converted_cleaned.jsonl |
|
type: sharegpt |
|
- path: datasets/deduped_Synthstruct-Gens_processed_sharegpt_converted_cleaned.jsonl |
|
type: sharegpt |
|
- path: datasets/Gryphe-4o-WP-filtered-sharegpt_utf8fix.jsonl |
|
type: sharegpt |
|
- path: datasets/opus-instruct-22k-no_refusals-filtered_utf8fix.jsonl |
|
type: sharegpt |
|
- path: datasets/Sonnet3-5-charcard-names-filtered-sharegpt_utf8fix.jsonl |
|
type: sharegpt |
|
- path: datasets/SystemChat_subset_filtered_sharegpt_utf8fix.jsonl |
|
type: sharegpt |
|
|
|
chat_template: chatml |
|
shuffle_merged_datasets: true |
|
val_set_size: 0.001 |
|
output_dir: ./EVA-Qwen2.5-14B-SFFT-v0.2 |
|
|
|
sequence_len: 10240 |
|
sample_packing: true |
|
eval_sample_packing: false |
|
pad_to_sequence_len: true |
|
|
|
# adapter: qlora |
|
# lora_model_dir: |
|
# lora_r: 64 |
|
# lora_alpha: 128 |
|
# lora_dropout: 0.05 |
|
# lora_target_linear: true |
|
# peft_use_dora: true |
|
|
|
base_model: Qwen/Qwen2.5-14B |
|
|
|
load_in_8bit: false |
|
load_in_4bit: false |
|
strict: false |
|
|
|
plugins: |
|
- axolotl.integrations.liger.LigerPlugin |
|
liger_rope: true |
|
liger_rms_norm: true |
|
liger_swiglu: true |
|
liger_fused_linear_cross_entropy: true |
|
|
|
datasets: |
|
- path: datasets/Celeste_Filtered_utf8fix.jsonl |
|
type: sharegpt |
|
- path: datasets/deduped_not_samantha_norefusals.jsonl |
|
type: sharegpt |
|
- path: datasets/deduped_SynthRP-Gens_processed_ShareGPT_converted_cleaned.jsonl |
|
type: sharegpt |
|
- path: datasets/deduped_Synthstruct-Gens_processed_sharegpt_converted_cleaned.jsonl |
|
type: sharegpt |
|
- path: datasets/Gryphe-4o-WP-filtered-sharegpt_utf8fix.jsonl |
|
type: sharegpt |
|
- path: datasets/opus-instruct-22k-no_refusals-filtered_utf8fix.jsonl |
|
type: sharegpt |
|
- path: datasets/Sonnet3-5-charcard-names-filtered-sharegpt_utf8fix.jsonl |
|
type: sharegpt |
|
- path: datasets/SystemChat_subset_filtered_sharegpt_utf8fix.jsonl |
|
type: sharegpt |
|
|
|
chat_template: chatml |
|
shuffle_merged_datasets: true |
|
val_set_size: 0.005 |
|
output_dir: ./EVA-Qwen2.5-14B-SFFT-v0.2 |
|
|
|
sequence_len: 10240 |
|
sample_packing: true |
|
eval_sample_packing: false |
|
pad_to_sequence_len: true |
|
|
|
# adapter: qlora |
|
# lora_model_dir: |
|
# lora_r: 32 |
|
# lora_alpha: 16 |
|
# lora_dropout: 0.05 |
|
# lora_target_linear: true |
|
# peft_use_dora: true |
|
|
|
unfrozen_parameters: |
|
- ^lm_head.weight$ |
|
- ^model.embed_tokens.weight$ |
|
# mlp.down_proj layers |
|
- model.layers.1.mlp.down_proj |
|
- model.layers.35.mlp.down_proj |
|
- model.layers.38.mlp.down_proj |
|
- model.layers.37.mlp.down_proj |
|
- model.layers.36.mlp.down_proj |
|
- model.layers.15.mlp.down_proj |
|
- model.layers.11.mlp.down_proj |
|
- model.layers.12.mlp.down_proj |
|
- model.layers.34.mlp.down_proj |
|
- model.layers.44.mlp.down_proj |
|
- model.layers.45.mlp.down_proj |
|
- model.layers.9.mlp.down_proj |
|
- model.layers.41.mlp.down_proj |
|
- model.layers.33.mlp.down_proj |
|
- model.layers.43.mlp.down_proj |
|
- model.layers.40.mlp.down_proj |
|
- model.layers.13.mlp.down_proj |
|
- model.layers.8.mlp.down_proj |
|
- model.layers.39.mlp.down_proj |
|
- model.layers.10.mlp.down_proj |
|
- model.layers.14.mlp.down_proj |
|
- model.layers.16.mlp.down_proj |
|
- model.layers.31.mlp.down_proj |
|
- model.layers.32.mlp.down_proj |
|
# mlp.gate_proj layers |
|
- model.layers.1.mlp.gate_proj |
|
- model.layers.44.mlp.gate_proj |
|
- model.layers.46.mlp.gate_proj |
|
- model.layers.45.mlp.gate_proj |
|
- model.layers.43.mlp.gate_proj |
|
- model.layers.47.mlp.gate_proj |
|
- model.layers.42.mlp.gate_proj |
|
- model.layers.32.mlp.gate_proj |
|
- model.layers.27.mlp.gate_proj |
|
- model.layers.33.mlp.gate_proj |
|
- model.layers.28.mlp.gate_proj |
|
- model.layers.39.mlp.gate_proj |
|
- model.layers.41.mlp.gate_proj |
|
- model.layers.40.mlp.gate_proj |
|
- model.layers.30.mlp.gate_proj |
|
- model.layers.29.mlp.gate_proj |
|
- model.layers.31.mlp.gate_proj |
|
- model.layers.37.mlp.gate_proj |
|
- model.layers.26.mlp.gate_proj |
|
- model.layers.10.mlp.gate_proj |
|
- model.layers.38.mlp.gate_proj |
|
- model.layers.36.mlp.gate_proj |
|
- model.layers.12.mlp.gate_proj |
|
- model.layers.13.mlp.gate_proj |
|
# mlp.up_proj layers |
|
- model.layers.1.mlp.up_proj |
|
- model.layers.13.mlp.up_proj |
|
- model.layers.11.mlp.up_proj |
|
- model.layers.14.mlp.up_proj |
|
- model.layers.15.mlp.up_proj |
|
- model.layers.12.mlp.up_proj |
|
- model.layers.8.mlp.up_proj |
|
- model.layers.16.mlp.up_proj |
|
- model.layers.9.mlp.up_proj |
|
- model.layers.19.mlp.up_proj |
|
- model.layers.10.mlp.up_proj |
|
- model.layers.7.mlp.up_proj |
|
- model.layers.17.mlp.up_proj |
|
- model.layers.20.mlp.up_proj |
|
- model.layers.21.mlp.up_proj |
|
- model.layers.18.mlp.up_proj |
|
- model.layers.37.mlp.up_proj |
|
- model.layers.38.mlp.up_proj |
|
- model.layers.39.mlp.up_proj |
|
- model.layers.42.mlp.up_proj |
|
- model.layers.41.mlp.up_proj |
|
- model.layers.27.mlp.up_proj |
|
- model.layers.28.mlp.up_proj |
|
- model.layers.36.mlp.up_proj |
|
# self_attn.k_proj layers |
|
- model.layers.47.self_attn.k_proj |
|
- model.layers.39.self_attn.k_proj |
|
- model.layers.41.self_attn.k_proj |
|
- model.layers.37.self_attn.k_proj |
|
- model.layers.35.self_attn.k_proj |
|
- model.layers.44.self_attn.k_proj |
|
- model.layers.38.self_attn.k_proj |
|
- model.layers.14.self_attn.k_proj |
|
- model.layers.7.self_attn.k_proj |
|
- model.layers.12.self_attn.k_proj |
|
- model.layers.11.self_attn.k_proj |
|
- model.layers.32.self_attn.k_proj |
|
- model.layers.10.self_attn.k_proj |
|
- model.layers.8.self_attn.k_proj |
|
- model.layers.6.self_attn.k_proj |
|
- model.layers.9.self_attn.k_proj |
|
- model.layers.45.self_attn.k_proj |
|
- model.layers.42.self_attn.k_proj |
|
- model.layers.40.self_attn.k_proj |
|
- model.layers.5.self_attn.k_proj |
|
- model.layers.0.self_attn.k_proj |
|
- model.layers.33.self_attn.k_proj |
|
- model.layers.34.self_attn.k_proj |
|
- model.layers.13.self_attn.k_proj |
|
# self_attn.o_proj layers |
|
- model.layers.12.self_attn.o_proj |
|
- model.layers.5.self_attn.o_proj |
|
- model.layers.14.self_attn.o_proj |
|
- model.layers.16.self_attn.o_proj |
|
- model.layers.20.self_attn.o_proj |
|
- model.layers.13.self_attn.o_proj |
|
- model.layers.11.self_attn.o_proj |
|
- model.layers.4.self_attn.o_proj |
|
- model.layers.6.self_attn.o_proj |
|
- model.layers.19.self_attn.o_proj |
|
- model.layers.7.self_attn.o_proj |
|
- model.layers.18.self_attn.o_proj |
|
- model.layers.8.self_attn.o_proj |
|
- model.layers.38.self_attn.o_proj |
|
- model.layers.15.self_attn.o_proj |
|
- model.layers.17.self_attn.o_proj |
|
- model.layers.9.self_attn.o_proj |
|
- model.layers.10.self_attn.o_proj |
|
- model.layers.21.self_attn.o_proj |
|
- model.layers.28.self_attn.o_proj |
|
- model.layers.32.self_attn.o_proj |
|
- model.layers.35.self_attn.o_proj |
|
- model.layers.39.self_attn.o_proj |
|
- model.layers.3.self_attn.o_proj |
|
# self_attn.q_proj layers |
|
- model.layers.1.self_attn.q_proj |
|
- model.layers.2.self_attn.q_proj |
|
- model.layers.3.self_attn.q_proj |
|
- model.layers.44.self_attn.q_proj |
|
- model.layers.29.self_attn.q_proj |
|
- model.layers.45.self_attn.q_proj |
|
- model.layers.43.self_attn.q_proj |
|
- model.layers.32.self_attn.q_proj |
|
- model.layers.38.self_attn.q_proj |
|
- model.layers.19.self_attn.q_proj |
|
- model.layers.42.self_attn.q_proj |
|
- model.layers.34.self_attn.q_proj |
|
- model.layers.36.self_attn.q_proj |
|
- model.layers.40.self_attn.q_proj |
|
- model.layers.26.self_attn.q_proj |
|
- model.layers.20.self_attn.q_proj |
|
- model.layers.28.self_attn.q_proj |
|
- model.layers.39.self_attn.q_proj |
|
- model.layers.41.self_attn.q_proj |
|
- model.layers.33.self_attn.q_proj |
|
- model.layers.35.self_attn.q_proj |
|
- model.layers.25.self_attn.q_proj |
|
- model.layers.30.self_attn.q_proj |
|
- model.layers.27.self_attn.q_proj |
|
# self_attn.v_proj layers |
|
- model.layers.0.self_attn.v_proj |
|
- model.layers.7.self_attn.v_proj |
|
- model.layers.39.self_attn.v_proj |
|
- model.layers.31.self_attn.v_proj |
|
- model.layers.15.self_attn.v_proj |
|
- model.layers.10.self_attn.v_proj |
|
- model.layers.41.self_attn.v_proj |
|
- model.layers.32.self_attn.v_proj |
|
- model.layers.6.self_attn.v_proj |
|
- model.layers.33.self_attn.v_proj |
|
- model.layers.42.self_attn.v_proj |
|
- model.layers.29.self_attn.v_proj |
|
- model.layers.9.self_attn.v_proj |
|
- model.layers.14.self_attn.v_proj |
|
- model.layers.35.self_attn.v_proj |
|
- model.layers.38.self_attn.v_proj |
|
- model.layers.13.self_attn.v_proj |
|
- model.layers.30.self_attn.v_proj |
|
- model.layers.34.self_attn.v_proj |
|
- model.layers.5.self_attn.v_proj |
|
- model.layers.28.self_attn.v_proj |
|
- model.layers.37.self_attn.v_proj |
|
- model.layers.27.self_attn.v_proj |
|
- model.layers.11.self_attn.v_proj |
|
|
|
wandb_project: EVA-Qwen2.5-14B-SFFT-v0.2 |
|
wandb_entity: |
|
wandb_watch: |
|
wandb_name: Unit-02 |
|
wandb_log_model: |
|
|
|
gradient_accumulation_steps: 8 |
|
micro_batch_size: 2 |
|
num_epochs: 3 |
|
optimizer: paged_ademamix_8bit |
|
lr_scheduler: cosine |
|
learning_rate: 0.00005 |
|
max_grad_norm: 3 |
|
|
|
train_on_inputs: false |
|
group_by_length: false |
|
bf16: auto |
|
fp16: |
|
tf32: false |
|
|
|
gradient_checkpointing: "unsloth" |
|
# gradient_checkpointing_kwargs: |
|
# use_reentrant: true |
|
early_stopping_patience: |
|
resume_from_checkpoint: |
|
local_rank: |
|
logging_steps: 1 |
|
xformers_attention: |
|
flash_attention: true |
|
|
|
warmup_steps: 20 |
|
evals_per_epoch: 4 |
|
saves_per_epoch: 4 |
|
save_safetensors: true |
|
hub_model_id: |
|
hub_strategy: |
|
debug: |
|
deepspeed: deepspeed_configs/zero3_bf16.json |
|
weight_decay: 0.1 |
|
# fsdp: |
|
# - full_shard |
|
# - auto_wrap |
|
# fsdp_config: |
|
# fsdp_limit_all_gathers: true |
|
# fsdp_sync_module_states: false |
|
# fsdp_offload_params: true |
|
# fsdp_cpu_ram_efficient_loading: true |
|
# fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP |
|
# fsdp_transformer_layer_cls_to_wrap: Qwen2DecoderLayer |
|
# fsdp_activation_checkpointing: true |
|
# fsdp_state_dict_type: SHARDED_STATE_DICT # Changed from FULL_STATE_DICT |
|
# fsdp_sharding_strategy: FULL_SHARD |
|
# fsdp_forward_prefetch: false # Added |
|
# fsdp_backward_prefetch: "BACKWARD_PRE" # Added |
|
# fsdp_backward_prefetch_limit: 1 # Added |
|
# fsdp_mixed_precision: BF16 # Added |
|
``` |
|
|
|
</details><br> |
|
|
|
# EVA-Qwen2.5-14B-SFFT-v0.2 |
|
|
|
This model is a fine-tuned version of [Qwen/Qwen2.5-14B](https://huggingface.co/Qwen/Qwen2.5-14B) on the None dataset. |
|
It achieves the following results on the evaluation set: |
|
- Loss: 3.0986 |
|
|
|
## Model description |
|
|
|
More information needed |
|
|
|
## Intended uses & limitations |
|
|
|
More information needed |
|
|
|
## Training and evaluation data |
|
|
|
More information needed |
|
|
|
## Training procedure |
|
|
|
### Training hyperparameters |
|
|
|
The following hyperparameters were used during training: |
|
- learning_rate: 5e-05 |
|
- train_batch_size: 2 |
|
- eval_batch_size: 2 |
|
- seed: 42 |
|
- distributed_type: multi-GPU |
|
- num_devices: 8 |
|
- gradient_accumulation_steps: 8 |
|
- total_train_batch_size: 128 |
|
- total_eval_batch_size: 16 |
|
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 |
|
- lr_scheduler_type: cosine |
|
- lr_scheduler_warmup_steps: 20 |
|
- num_epochs: 3 |
|
|
|
### Training results |
|
|
|
| Training Loss | Epoch | Step | Validation Loss | |
|
|:-------------:|:------:|:----:|:---------------:| |
|
| 1.4236 | 0.0170 | 1 | 2.6557 | |
|
| 1.2513 | 0.2553 | 15 | 3.4606 | |
|
| 1.1338 | 0.5106 | 30 | 3.5536 | |
|
| 1.0985 | 0.7660 | 45 | 3.1957 | |
|
| 0.8794 | 1.0170 | 60 | 3.0346 | |
|
| 0.8584 | 1.2718 | 75 | 3.0551 | |
|
| 0.8421 | 1.5265 | 90 | 3.0168 | |
|
| 0.8081 | 1.7813 | 105 | 3.0335 | |
|
| 0.8227 | 2.0361 | 120 | 3.0369 | |
|
| 0.7416 | 2.2909 | 135 | 3.0876 | |
|
| 0.7396 | 2.5456 | 150 | 3.1023 | |
|
| 0.7775 | 2.8004 | 165 | 3.0986 | |
|
|
|
|
|
### Framework versions |
|
|
|
- Transformers 4.45.1 |
|
- Pytorch 2.4.0+cu121 |
|
- Datasets 2.21.0 |
|
- Tokenizers 0.20.2 |
|
|