kromeurus commited on
Commit
07055c4
·
verified ·
1 Parent(s): 7a415c9

Upload folder using huggingface_hub

Browse files
.ipynb_checkpoints/adapter_config-checkpoint.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "arcee-ai/Llama-3.1-SuperNova-Lite",
3
+ "peft_type": "LORA",
4
+ "use_rslora": false,
5
+ "target_modules": [
6
+ "lm_head",
7
+ "o_proj",
8
+ "k_proj",
9
+ "gate_proj",
10
+ "down_proj",
11
+ "embed_tokens",
12
+ "v_proj",
13
+ "q_proj",
14
+ "up_proj"
15
+ ],
16
+ "modules_to_save": [],
17
+ "task_type": "CAUSAL_LM",
18
+ "r": 128,
19
+ "lora_alpha": 128,
20
+ "rank_pattern": {},
21
+ "alpha_pattern": {},
22
+ "lora_dropout": 0.0,
23
+ "fan_in_fan_out": false,
24
+ "inference_mode": true
25
+ }
README.md ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model:
3
+ - arcee-ai/Llama-3.1-SuperNova-Lite
4
+ - Delta-Vector/Control-Nanuq-8B
5
+ library_name: peft
6
+ tags:
7
+ - mergekit
8
+ - peft
9
+
10
+ ---
11
+ # PRtest-r128-LoRA
12
+
13
+ This is a LoRA extracted from a language model. It was extracted using [mergekit](https://github.com/arcee-ai/mergekit).
14
+
15
+ ## LoRA Details
16
+
17
+ This LoRA adapter was extracted from [Delta-Vector/Control-Nanuq-8B](https://huggingface.co/Delta-Vector/Control-Nanuq-8B) and uses [arcee-ai/Llama-3.1-SuperNova-Lite](https://huggingface.co/arcee-ai/Llama-3.1-SuperNova-Lite) as a base.
18
+
19
+ ### Parameters
20
+
21
+ The following command was used to extract this LoRA adapter:
22
+
23
+ ```sh
24
+ /usr/local/bin/mergekit-extract-lora --out-path=loras/PRtest-r128-LoRA --model=Delta-Vector/Control-Nanuq-8B --base-model=arcee-ai/Llama-3.1-SuperNova-Lite --no-lazy-unpickle --max-rank=128 --sv-epsilon=0 --gpu-rich -v --embed-lora
25
+ ```
adapter_config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "arcee-ai/Llama-3.1-SuperNova-Lite",
3
+ "peft_type": "LORA",
4
+ "use_rslora": false,
5
+ "target_modules": [
6
+ "lm_head",
7
+ "o_proj",
8
+ "k_proj",
9
+ "gate_proj",
10
+ "down_proj",
11
+ "embed_tokens",
12
+ "v_proj",
13
+ "q_proj",
14
+ "up_proj"
15
+ ],
16
+ "modules_to_save": [],
17
+ "task_type": "CAUSAL_LM",
18
+ "r": 128,
19
+ "lora_alpha": 128,
20
+ "rank_pattern": {},
21
+ "alpha_pattern": {},
22
+ "lora_dropout": 0.0,
23
+ "fan_in_fan_out": false,
24
+ "inference_mode": true
25
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbbb759b0af337001d88b153be5102dce0c39323ae4d4e887f0e067195f7dfa6
3
+ size 739455840