Hjgugugjhuhjggg's picture
Upload folder using huggingface_hub
ad816e4 verified
|
raw
history blame
4.57 kB
metadata
base_model:
  - Hjgugugjhuhjggg/mergekit-ties-pghuyfi
  - Hjgugugjhuhjggg/mergekit-ties-qgcitfu
  - Hjgugugjhuhjggg/mergekit-ties-poovzrh
  - Hjgugugjhuhjggg/mergekit-ties-kmlzhzo
  - Hjgugugjhuhjggg/mergekit-ties-dkhnzcn
  - huihui-ai/Llama-3.2-3B-Instruct-abliterated
  - Hjgugugjhuhjggg/mergekit-ties-xflmond
library_name: transformers
tags:
  - mergekit
  - merge

merge

This is a merge of pre-trained language models created using mergekit.

Merge Details

Merge Method

This model was merged using the linear merge method using huihui-ai/Llama-3.2-3B-Instruct-abliterated as a base.

Models Merged

The following models were included in the merge:

Configuration

The following YAML configuration was used to produce this model:

models:
  - layer_range: [0, 28]
    model: Hjgugugjhuhjggg/mergekit-ties-qgcitfu
    parameters:
      weight: 1
      density: 0.9
      gamma: 0.01
      normalize: true
      int8_mask: true
      random_seed: 0
      temperature: 0.5
      top_p: 0.65
      inference: true
      max_tokens: 999999999
      stream: true
      quantization:
        method: int8
        value: 100
      quantization:
        method: int4
        value: 100

  - layer_range: [0, 28]
    model: Hjgugugjhuhjggg/mergekit-ties-dkhnzcn
    parameters:
      weight: 1
      density: 0.9
      gamma: 0.01
      normalize: true
      int8_mask: true
      random_seed: 0
      temperature: 0.5
      top_p: 0.65
      inference: true
      max_tokens: 999999999
      stream: true
      quantization:
        method: int8
        value: 100
      quantization:
        method: int4
        value: 100

  - layer_range: [0, 28]
    model: Hjgugugjhuhjggg/mergekit-ties-poovzrh
    parameters:
      weight: 1
      density: 0.9
      gamma: 0.01
      normalize: true
      int8_mask: true
      random_seed: 0
      temperature: 0.5
      top_p: 0.65
      inference: true
      max_tokens: 999999999
      stream: true
      quantization:
        method: int8
        value: 100
      quantization:
        method: int4
        value: 100

  - layer_range: [0, 28]
    model: Hjgugugjhuhjggg/mergekit-ties-pghuyfi
    parameters:
      weight: 1
      density: 0.9
      gamma: 0.01
      normalize: true
      int8_mask: true
      random_seed: 0
      temperature: 0.5
      top_p: 0.65
      inference: true
      max_tokens: 999999999
      stream: true
      quantization:
        method: int8
        value: 100
      quantization:
        method: int4
        value: 100

  - layer_range: [0, 28]
    model: Hjgugugjhuhjggg/mergekit-ties-kmlzhzo
    parameters:
      weight: 1
      density: 0.9
      gamma: 0.01
      normalize: true
      int8_mask: true
      random_seed: 0
      temperature: 0.5
      top_p: 0.65
      inference: true
      max_tokens: 999999999
      stream: true
      quantization:
        method: int8
        value: 100
      quantization:
        method: int4
        value: 100

  - layer_range: [0, 28]
    model: Hjgugugjhuhjggg/mergekit-ties-xflmond
    parameters:
      weight: 1
      density: 0.9
      gamma: 0.01
      normalize: true
      int8_mask: true
      random_seed: 0
      temperature: 0.5
      top_p: 0.65
      inference: true
      max_tokens: 999999999
      stream: true
      quantization:
        method: int8
        value: 100
      quantization:
        method: int4
        value: 100


merge_method: linear
base_model: huihui-ai/Llama-3.2-3B-Instruct-abliterated
parameters:
    weight: 1
    density: 0.9
    gamma: 0.01
    normalize: true
    int8_mask: true
    random_seed: 0
    temperature: 0.5
    top_p: 0.65
    inference: true
    max_tokens: 999999999
    stream: true
    quantization:
      method: int8
      value: 100
    quantization:
      method: int4
      value: 100