llama-3-Nephilim-v2.1-8B / mergekit_config.yml
grimjim's picture
Initial release
5f516d9
raw
history blame
767 Bytes
base_model: grimjim/llama-3-Nephilim-v1-8B
dtype: bfloat16
merge_method: task_arithmetic
parameters:
normalize: false
slices:
- sources:
- layer_range: [0, 32]
model: grimjim/llama-3-Nephilim-v1-8B
- layer_range: [0, 32]
model: grimjim/Llama-3-Instruct-8B-SPPO-Iter3-SimPO-merge
parameters:
weight: 0.62
- layer_range: [0, 32]
model: openlynn/Llama-3-Soliloquy-8B-v2
parameters:
weight: 0.03
- layer_range: [0, 32]
model: tokyotech-llm/Llama-3-Swallow-8B-Instruct-v0.1
parameters:
weight: 0.2
- layer_range: [0, 32]
model: MLP-KTLim/llama-3-Korean-Bllossom-8B
parameters:
weight: 0.001
- layer_range: [0, 32]
model: grimjim/llama-3-aaditya-OpenBioLLM-8B
parameters:
weight: 0.03