sahlebrahim commited on
Commit
6993666
1 Parent(s): 211b0d7

End of training

Browse files
Files changed (3) hide show
  1. README.md +1 -5
  2. adapter_config.json +3 -3
  3. adapter_model.safetensors +1 -1
README.md CHANGED
@@ -11,7 +11,7 @@ model-index:
11
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
  should probably proofread and complete it, then remove this comment. -->
13
 
14
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/shl31006/huggingface/runs/pr5shl38)
15
  # your_checkpoint_directory
16
 
17
  This model is a fine-tuned version of [NousResearch/Llama-2-7b-hf](https://huggingface.co/NousResearch/Llama-2-7b-hf) on an unknown dataset.
@@ -45,10 +45,6 @@ The following hyperparameters were used during training:
45
  - num_epochs: 1
46
  - mixed_precision_training: Native AMP
47
 
48
- ### Training results
49
-
50
-
51
-
52
  ### Framework versions
53
 
54
  - PEFT 0.11.1
 
11
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
  should probably proofread and complete it, then remove this comment. -->
13
 
14
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/shl31006/huggingface/runs/s80g9iqx)
15
  # your_checkpoint_directory
16
 
17
  This model is a fine-tuned version of [NousResearch/Llama-2-7b-hf](https://huggingface.co/NousResearch/Llama-2-7b-hf) on an unknown dataset.
 
45
  - num_epochs: 1
46
  - mixed_precision_training: Native AMP
47
 
 
 
 
 
48
  ### Framework versions
49
 
50
  - PEFT 0.11.1
adapter_config.json CHANGED
@@ -20,12 +20,12 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "up_proj",
24
- "k_proj",
25
- "q_proj",
26
  "down_proj",
 
27
  "o_proj",
28
  "gate_proj",
 
 
29
  "v_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
 
23
  "down_proj",
24
+ "up_proj",
25
  "o_proj",
26
  "gate_proj",
27
+ "k_proj",
28
+ "q_proj",
29
  "v_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2bad21a285ca05847b86d5d2fb9c8c74ed860c94669cdc50f3bf887d4e5f3442
3
  size 159967880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdc35e23ca6ee3b30395e38a32f195536af50526541001951bdaedac1b49ba4f
3
  size 159967880