haniu commited on
Commit
125e1d8
·
verified ·
1 Parent(s): 9c6d9d0

Model save

Browse files
README.md CHANGED
@@ -1,7 +1,7 @@
1
  ---
2
  library_name: peft
3
  license: llama3.2
4
- base_model: meta-llama/Llama-3.2-11B-Vision-Instruct
5
  tags:
6
  - generated_from_trainer
7
  model-index:
@@ -14,7 +14,7 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  # text
16
 
17
- This model is a fine-tuned version of [meta-llama/Llama-3.2-11B-Vision-Instruct](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct) on the None dataset.
18
 
19
  ## Model description
20
 
@@ -50,8 +50,8 @@ The following hyperparameters were used during training:
50
 
51
  ### Framework versions
52
 
53
- - PEFT 0.13.2
54
- - Transformers 4.46.3
55
  - Pytorch 2.5.1+cu124
56
- - Datasets 3.1.0
57
- - Tokenizers 0.20.3
 
1
  ---
2
  library_name: peft
3
  license: llama3.2
4
+ base_model: meta-llama/Llama-3.2-90B-Vision-Instruct
5
  tags:
6
  - generated_from_trainer
7
  model-index:
 
14
 
15
  # text
16
 
17
+ This model is a fine-tuned version of [meta-llama/Llama-3.2-90B-Vision-Instruct](https://huggingface.co/meta-llama/Llama-3.2-90B-Vision-Instruct) on the None dataset.
18
 
19
  ## Model description
20
 
 
50
 
51
  ### Framework versions
52
 
53
+ - PEFT 0.14.0
54
+ - Transformers 4.48.1
55
  - Pytorch 2.5.1+cu124
56
+ - Datasets 3.2.0
57
+ - Tokenizers 0.21.0
adapter_config.json CHANGED
@@ -4,8 +4,10 @@
4
  "base_model_class": "MllamaForConditionalGeneration",
5
  "parent_library": "transformers.models.mllama.modeling_mllama"
6
  },
7
- "base_model_name_or_path": "meta-llama/Llama-3.2-11B-Vision-Instruct",
8
  "bias": "none",
 
 
9
  "fan_in_fan_out": false,
10
  "inference_mode": true,
11
  "init_lora_weights": "gaussian",
@@ -14,6 +16,7 @@
14
  "layers_to_transform": null,
15
  "loftq_config": {},
16
  "lora_alpha": 8,
 
17
  "lora_dropout": 0.1,
18
  "megatron_config": null,
19
  "megatron_core": "megatron.core",
@@ -23,13 +26,13 @@
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
26
- "v_proj",
27
- "gate_proj",
28
- "down_proj",
29
- "q_proj",
30
  "k_proj",
 
31
  "o_proj",
32
- "up_proj"
 
 
33
  ],
34
  "task_type": null,
35
  "use_dora": true,
 
4
  "base_model_class": "MllamaForConditionalGeneration",
5
  "parent_library": "transformers.models.mllama.modeling_mllama"
6
  },
7
+ "base_model_name_or_path": "meta-llama/Llama-3.2-90B-Vision-Instruct",
8
  "bias": "none",
9
+ "eva_config": null,
10
+ "exclude_modules": null,
11
  "fan_in_fan_out": false,
12
  "inference_mode": true,
13
  "init_lora_weights": "gaussian",
 
16
  "layers_to_transform": null,
17
  "loftq_config": {},
18
  "lora_alpha": 8,
19
+ "lora_bias": false,
20
  "lora_dropout": 0.1,
21
  "megatron_config": null,
22
  "megatron_core": "megatron.core",
 
26
  "rank_pattern": {},
27
  "revision": null,
28
  "target_modules": [
29
+ "up_proj",
 
 
 
30
  "k_proj",
31
+ "q_proj",
32
  "o_proj",
33
+ "v_proj",
34
+ "down_proj",
35
+ "gate_proj"
36
  ],
37
  "task_type": null,
38
  "use_dora": true,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b72d5ec5c5d9ce352b7522e7ec6867c0fe6f74427695c7bc2efb9891c7a5d3ed
3
- size 125866776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9895f41d607f56831abd6ef097d101a35e9a73e13090b20c456abce4180d9195
3
+ size 565641552
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0cf4e3e23fabe472d5990932d00b0ebdb8bfaa4a5a69825173a97d64f8211531
3
- size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e920ae8d3f2a036f0f511582177ae84b3e39f82af7a5199c516e3ad1c31151e
3
+ size 5304