slices: - sources: - model: gradientai/Llama-3-8B-Instruct-Gradient-1048k layer_range: [0, 16] - sources: - model: failspy/Llama-3-8B-Instruct-MopeyMule layer_range: [8, 32] merge_method: passthrough tokenizer_source: union dtype: float16