sometimesanotion
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -31,117 +31,3 @@ KytheraMix-7B is crafted using semi-automated merges YAML templates. As with [A
|
|
31 |
- **jeffmeloy/Qwen2.5-7B-nerd-uncensored-ties** - A model_stock and TIES merge of [jeffmeloy/Qwen2.5-7B-nerd-uncensored-v0.9](http://huggingface.co/jeffmeloy/Qwen2.5-7B-nerd-uncensored-v0.9), [jeffmeloy/Qwen2.5-7B-nerd-uncensored-v1.0](http://huggingface.co/jeffmeloy/Qwen2.5-7B-nerd-uncensored-v1.0), and [jeffmeloy/Qwen2.5-7B-nerd-uncensored-v1.8](http://huggingface.co/jeffmeloy/Qwen2.5-7B-nerd-uncensored-v1.8). These models are themselves the product of ner_merge, which chooses select layers from many other merges.
|
32 |
|
33 |
- **[fblgit/cybertron-v4-qw7B-UNAMGS](http://huggingface.co/fblgit/cybertron-v4-qw7B-UNAMGS)** - Strong coding and knowledge representation.
|
34 |
-
|
35 |
-
|
36 |
-
### Models Merged
|
37 |
-
|
38 |
-
The following YAML configuration was used to produce this model:
|
39 |
-
|
40 |
-
```yaml
|
41 |
-
name: kytheramix-7b-if-della # This contributes insruction following
|
42 |
-
merge_method: della
|
43 |
-
base_model: Qwen/Qwen2.5-7B
|
44 |
-
tokenizer_source: base
|
45 |
-
parameters:
|
46 |
-
int8_mask: false
|
47 |
-
normalize: true
|
48 |
-
rescale: false
|
49 |
-
density: 0.30
|
50 |
-
weight: 0.50
|
51 |
-
epsilon: 0.09
|
52 |
-
lambda: 0.95
|
53 |
-
models:
|
54 |
-
- model: newsbang/Homer-v0.5-Qwen2.5-7B # Exceptional instruction following, coding, math
|
55 |
-
parameters:
|
56 |
-
density: 0.80
|
57 |
-
weight: 1.00
|
58 |
-
- model: sethuiyer/Qwen2.5-7B-Anvita # Good instruction following, combined with exceptional recall and reasoning
|
59 |
-
parameters:
|
60 |
-
density: 0.30
|
61 |
-
weight: [ 0.00, 0.40, 0.40, 0.40, 0.40, 0.40, 0.40, 0.40, 0.40, 0.40, 0.40, 0.40, 0.30, 0.30 ]
|
62 |
-
dtype: bfloat16
|
63 |
-
out_dtype: bfloat16
|
64 |
-
---
|
65 |
-
name: kytheramix-7b-reason-della # This contributes the knowledge and reasoning pool, later to be merged
|
66 |
-
merge_method: della # with the dominant instruction-following model
|
67 |
-
base_model: Qwen/Qwen2.5-7B
|
68 |
-
tokenizer_source: base
|
69 |
-
parameters:
|
70 |
-
int8_mask: false
|
71 |
-
normalize: true
|
72 |
-
rescale: false
|
73 |
-
density: 0.30
|
74 |
-
weight: 0.50
|
75 |
-
epsilon: 0.08
|
76 |
-
lambda: 0.95
|
77 |
-
models:
|
78 |
-
- model: jeffmeloy/Qwen2.5-7B-nerd-uncensored-ties/
|
79 |
-
parameters:
|
80 |
-
density: 0.80
|
81 |
-
weight: 1.00
|
82 |
-
- model: jeffmeloy/jeffmeloy_Qwen2.5-7B-minperplexity-1
|
83 |
-
parameters:
|
84 |
-
density: 0.60
|
85 |
-
weight: 0.50
|
86 |
-
- model: fblgit/cybertron-v4-qw7B-UNAMGS
|
87 |
-
parameters:
|
88 |
-
density: 0.40
|
89 |
-
weight: 0.50
|
90 |
-
dtype: bfloat16
|
91 |
-
out_dtype: bfloat16
|
92 |
-
---
|
93 |
-
name: kytheramix-7b-finalize-slerp
|
94 |
-
merge_method: slerp
|
95 |
-
base_model: merges/kytheramix-7b-if-della # Excellent instruction-following
|
96 |
-
tokenizer_source: base
|
97 |
-
parameters:
|
98 |
-
t: [ 0.00, 0.50, 0.60, 0.70, 0.70, 0.70, 0.70, 0.55, 0.40 ]
|
99 |
-
slices:
|
100 |
-
- sources:
|
101 |
-
- layer_range: [ 0, 2 ]
|
102 |
-
model: merges/kytheramix-7b-if-della
|
103 |
-
- layer_range: [ 0, 2 ]
|
104 |
-
model: merges/kytheramix-7b-reason-della
|
105 |
-
t: [ 0.00, 0.00 ]
|
106 |
-
- sources:
|
107 |
-
- layer_range: [ 2, 7 ]
|
108 |
-
model: merges/kytheramix-7b-if-della
|
109 |
-
- layer_range: [ 2, 7 ]
|
110 |
-
model: merges/kytheramix-7b-reason-della
|
111 |
-
t: [ 0.00, 0.50 ]
|
112 |
-
- sources:
|
113 |
-
- layer_range: [ 7, 14 ]
|
114 |
-
model: merges/kytheramix-7b-if-della
|
115 |
-
- layer_range: [ 7, 14 ]
|
116 |
-
model: merges/kytheramix-7b-reason-della
|
117 |
-
t: [ 0.50, 0.70 ]
|
118 |
-
- sources:
|
119 |
-
- layer_range: [ 14, 21 ]
|
120 |
-
model: merges/kytheramix-7b-if-della
|
121 |
-
- layer_range: [ 14, 21 ]
|
122 |
-
model: merges/kytheramix-7b-reason-della
|
123 |
-
t: [ 0.70, 0.70 ]
|
124 |
-
- sources:
|
125 |
-
- layer_range: [ 21, 28 ]
|
126 |
-
model: merges/kytheramix-7b-if-della
|
127 |
-
- layer_range: [ 21, 28 ]
|
128 |
-
model: merges/kytheramix-7b-reason-della
|
129 |
-
t: [ 0.70, 0.40 ]
|
130 |
-
dtype: bfloat16
|
131 |
-
---
|
132 |
-
name: kytheramix-7b-finalize
|
133 |
-
merge_method: ties
|
134 |
-
base_model: Qwen/Qwen2.5-7B
|
135 |
-
tokenizer_source: huihui-ai/Qwen2.5-7B-Instruct-abliterated-v2
|
136 |
-
parameters:
|
137 |
-
int8_mask: false
|
138 |
-
normalize: true
|
139 |
-
rescale: false
|
140 |
-
density: 1.00
|
141 |
-
weight: 1.00
|
142 |
-
models:
|
143 |
-
- model: merges/kytheramix-7b-finalize-slerp
|
144 |
-
dtype: bfloat16
|
145 |
-
out_dtype: bfloat16
|
146 |
-
|
147 |
-
```
|
|
|
31 |
- **jeffmeloy/Qwen2.5-7B-nerd-uncensored-ties** - A model_stock and TIES merge of [jeffmeloy/Qwen2.5-7B-nerd-uncensored-v0.9](http://huggingface.co/jeffmeloy/Qwen2.5-7B-nerd-uncensored-v0.9), [jeffmeloy/Qwen2.5-7B-nerd-uncensored-v1.0](http://huggingface.co/jeffmeloy/Qwen2.5-7B-nerd-uncensored-v1.0), and [jeffmeloy/Qwen2.5-7B-nerd-uncensored-v1.8](http://huggingface.co/jeffmeloy/Qwen2.5-7B-nerd-uncensored-v1.8). These models are themselves the product of ner_merge, which chooses select layers from many other merges.
|
32 |
|
33 |
- **[fblgit/cybertron-v4-qw7B-UNAMGS](http://huggingface.co/fblgit/cybertron-v4-qw7B-UNAMGS)** - Strong coding and knowledge representation.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|