automerger commited on
Commit
d3a3d90
1 Parent(s): afad297

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -6,37 +6,31 @@ tags:
6
  - lazymergekit
7
  - automerger
8
  base_model:
9
- - nlpguy/AlloyIngotNeoY
10
  - Kukedlc/NeuralSirKrishna-7b
11
  ---
12
 
13
  # AlloyingotneoyNeuralsirkrishna-7B
14
 
15
  AlloyingotneoyNeuralsirkrishna-7B is an automated merge created by [Maxime Labonne](https://huggingface.co/mlabonne) using the following configuration.
16
- * [nlpguy/AlloyIngotNeoY](https://huggingface.co/nlpguy/AlloyIngotNeoY)
17
  * [Kukedlc/NeuralSirKrishna-7b](https://huggingface.co/Kukedlc/NeuralSirKrishna-7b)
18
 
19
  ## 🧩 Configuration
20
 
21
  ```yaml
22
- slices:
23
- - sources:
24
- - model: nlpguy/AlloyIngotNeoY
25
- layer_range: [0, 32]
26
- - model: Kukedlc/NeuralSirKrishna-7b
27
- layer_range: [0, 32]
28
- merge_method: slerp
 
29
  base_model: nlpguy/AlloyIngotNeoY
30
  parameters:
31
- t:
32
- - filter: self_attn
33
- value: [0, 0.5, 0.3, 0.7, 1]
34
- - filter: mlp
35
- value: [1, 0.5, 0.7, 0.3, 0]
36
- - value: 0.5
37
  dtype: bfloat16
38
  random_seed: 0
39
- ```
40
 
41
  ## 💻 Usage
42
 
 
6
  - lazymergekit
7
  - automerger
8
  base_model:
 
9
  - Kukedlc/NeuralSirKrishna-7b
10
  ---
11
 
12
  # AlloyingotneoyNeuralsirkrishna-7B
13
 
14
  AlloyingotneoyNeuralsirkrishna-7B is an automated merge created by [Maxime Labonne](https://huggingface.co/mlabonne) using the following configuration.
 
15
  * [Kukedlc/NeuralSirKrishna-7b](https://huggingface.co/Kukedlc/NeuralSirKrishna-7b)
16
 
17
  ## 🧩 Configuration
18
 
19
  ```yaml
20
+ models:
21
+ - model: nlpguy/AlloyIngotNeoY
22
+ # No parameters necessary for base model
23
+ - model: Kukedlc/NeuralSirKrishna-7b
24
+ parameters:
25
+ density: 0.53
26
+ weight: 0.6
27
+ merge_method: dare_ties
28
  base_model: nlpguy/AlloyIngotNeoY
29
  parameters:
30
+ int8_mask: true
 
 
 
 
 
31
  dtype: bfloat16
32
  random_seed: 0
33
+ ```
34
 
35
  ## 💻 Usage
36
 
config.json CHANGED
@@ -21,7 +21,7 @@
21
  "sliding_window": 4096,
22
  "tie_word_embeddings": false,
23
  "torch_dtype": "bfloat16",
24
- "transformers_version": "4.38.2",
25
  "use_cache": true,
26
  "vocab_size": 32000
27
  }
 
21
  "sliding_window": 4096,
22
  "tie_word_embeddings": false,
23
  "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.39.0",
25
  "use_cache": true,
26
  "vocab_size": 32000
27
  }
mergekit_config.yml CHANGED
@@ -1,19 +1,14 @@
1
 
2
- slices:
3
- - sources:
4
- - model: nlpguy/AlloyIngotNeoY
5
- layer_range: [0, 32]
6
- - model: Kukedlc/NeuralSirKrishna-7b
7
- layer_range: [0, 32]
8
- merge_method: slerp
 
9
  base_model: nlpguy/AlloyIngotNeoY
10
  parameters:
11
- t:
12
- - filter: self_attn
13
- value: [0, 0.5, 0.3, 0.7, 1]
14
- - filter: mlp
15
- value: [1, 0.5, 0.7, 0.3, 0]
16
- - value: 0.5
17
  dtype: bfloat16
18
  random_seed: 0
19
-
 
1
 
2
+ models:
3
+ - model: nlpguy/AlloyIngotNeoY
4
+ # No parameters necessary for base model
5
+ - model: Kukedlc/NeuralSirKrishna-7b
6
+ parameters:
7
+ density: 0.53
8
+ weight: 0.6
9
+ merge_method: dare_ties
10
  base_model: nlpguy/AlloyIngotNeoY
11
  parameters:
12
+ int8_mask: true
 
 
 
 
 
13
  dtype: bfloat16
14
  random_seed: 0
 
model-00001-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b868bc025487a8dff6b9399f16fe8a10c5292d041e8dfadc9e55e89810ff1d1a
3
  size 9825524456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a04579ef4cdfc587466d43f0bc612812490fdef88e80fb49fee3b2a97b549871
3
  size 9825524456
model-00002-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e3364d3470d76141b7be686d9ff659985f74ed0db4a5b67d7ab66ef5138e299a
3
  size 4657973592
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84db58379e13e5ce65486c57b37d136c5d88ed144cbbb42381864a78032c3565
3
  size 4657973592