pipihand01
commited on
Upload 3 files
Browse files- README.md +32 -0
- adapter_config.json +38 -0
- adapter_model.safetensors +3 -0
README.md
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
license: apache-2.0
|
2 |
+
license_link: https://huggingface.co/pipihand01/QwQ-32B-Preview-abliterated-lora-rank32/blob/main/LICENSE
|
3 |
+
language:
|
4 |
+
- en
|
5 |
+
base_model:
|
6 |
+
- Qwen/QwQ-32B-Preview
|
7 |
+
- huihui-ai/QwQ-32B-Preview-abliterated
|
8 |
+
tags:
|
9 |
+
- chat
|
10 |
+
- abliterated
|
11 |
+
- uncensored
|
12 |
+
- lora
|
13 |
+
library_name: transformers
|
14 |
+
---
|
15 |
+
This is a rank-32 LoRA extracted from [huihui-ai/QwQ-32B-Preview-abliterated](https://huggingface.co/huihui-ai/QwQ-32B-Preview-abliterated) with base model [Qwen/QwQ-32B-Preview](https://huggingface.co/Qwen/QwQ-32B-Preview), using [mergekit](https://github.com/arcee-ai/mergekit).
|
16 |
+
|
17 |
+
**NOTE: I bear no responsibility for any output when using this LoRA. When properly prompted with this LoRA, it may generate contents that are not suitable in some situations. Use it with your own caution.**
|
18 |
+
---
|
19 |
+
base_model: []
|
20 |
+
library_name: transformers
|
21 |
+
tags:
|
22 |
+
- mergekit
|
23 |
+
- peft
|
24 |
+
|
25 |
+
---
|
26 |
+
# Untitled LoRA Model (1)
|
27 |
+
|
28 |
+
This is a LoRA extracted from a language model. It was extracted using [mergekit](https://github.com/arcee-ai/mergekit).
|
29 |
+
|
30 |
+
## LoRA Details
|
31 |
+
|
32 |
+
This LoRA adapter was extracted from [huihui-ai/QwQ-32B-Preview-abliterated](https://huggingface.co/huihui-ai/QwQ-32B-Preview-abliterated) and uses [Qwen/QwQ-32B-Preview](https://huggingface.co/Qwen/QwQ-32B-Preview) as a base.
|
adapter_config.json
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "Qwen/QwQ-32B-Preview",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": true,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layers_pattern": null,
|
10 |
+
"layers_to_transform": null,
|
11 |
+
"loftq_config": {},
|
12 |
+
"lora_alpha": 32,
|
13 |
+
"lora_dropout": 0,
|
14 |
+
"megatron_config": null,
|
15 |
+
"megatron_core": "megatron.core",
|
16 |
+
"modules_to_save": [
|
17 |
+
"input_layernorm",
|
18 |
+
"post_attention_layernorm",
|
19 |
+
"norm"
|
20 |
+
],
|
21 |
+
"peft_type": "LORA",
|
22 |
+
"r": 32,
|
23 |
+
"rank_pattern": {},
|
24 |
+
"revision": null,
|
25 |
+
"target_modules": [
|
26 |
+
"o_proj",
|
27 |
+
"gate_proj",
|
28 |
+
"down_proj",
|
29 |
+
"v_proj",
|
30 |
+
"k_proj",
|
31 |
+
"up_proj",
|
32 |
+
"lm_head",
|
33 |
+
"q_proj",
|
34 |
+
"embed_tokens"
|
35 |
+
],
|
36 |
+
"task_type": "CAUSAL_LM",
|
37 |
+
"use_rslora": false
|
38 |
+
}
|
adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f2aeda5884e92e0c1302ed40a3cb5f62dfdfd7f84cfa5997c36f4b91430b3c08
|
3 |
+
size 558450816
|