Louisnguyen commited on
Commit
ec1a5e6
1 Parent(s): 8a8311c

Training in progress, step 500

Browse files
README.md CHANGED
@@ -1,15 +1,15 @@
1
- ---
2
- base_model: llava-hf/llava-1.5-7b-hf
3
- library_name: peft
4
- license: llama2
5
- tags:
6
- - trl
7
- - sft
8
- - generated_from_trainer
9
- model-index:
10
- - name: llava-1.5-7b-hf-train-bio2
11
- results: []
12
- ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
  should probably proofread and complete it, then remove this comment. -->
@@ -52,6 +52,6 @@ The following hyperparameters were used during training:
52
 
53
  - PEFT 0.12.0
54
  - Transformers 4.43.2
55
- - Pytorch 2.3.1+cu118
56
  - Datasets 2.20.0
57
  - Tokenizers 0.19.1
 
1
+ ---
2
+ base_model: llava-hf/llava-1.5-7b-hf
3
+ library_name: peft
4
+ license: llama2
5
+ tags:
6
+ - trl
7
+ - sft
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: llava-1.5-7b-hf-train-bio2
11
+ results: []
12
+ ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
  should probably proofread and complete it, then remove this comment. -->
 
52
 
53
  - PEFT 0.12.0
54
  - Transformers 4.43.2
55
+ - Pytorch 2.2.1+cu121
56
  - Datasets 2.20.0
57
  - Tokenizers 0.19.1
adapter_config.json CHANGED
@@ -1,36 +1,36 @@
1
- {
2
- "alpha_pattern": {},
3
- "auto_mapping": {
4
- "base_model_class": "LlavaForConditionalGeneration",
5
- "parent_library": "transformers.models.llava.modeling_llava"
6
- },
7
- "base_model_name_or_path": "llava-hf/llava-1.5-7b-hf",
8
- "bias": "none",
9
- "fan_in_fan_out": false,
10
- "inference_mode": true,
11
- "init_lora_weights": true,
12
- "layer_replication": null,
13
- "layers_pattern": null,
14
- "layers_to_transform": null,
15
- "loftq_config": {},
16
- "lora_alpha": 16,
17
- "lora_dropout": 0.0,
18
- "megatron_config": null,
19
- "megatron_core": "megatron.core",
20
- "modules_to_save": null,
21
- "peft_type": "LORA",
22
- "r": 64,
23
- "rank_pattern": {},
24
- "revision": null,
25
- "target_modules": [
26
- "k_proj",
27
- "fc2up_proj",
28
- "v_projfc1",
29
- "q_proj",
30
- "gate_proj",
31
- "down_proj"
32
- ],
33
- "task_type": null,
34
- "use_dora": false,
35
- "use_rslora": false
36
  }
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": {
4
+ "base_model_class": "LlavaForConditionalGeneration",
5
+ "parent_library": "transformers.models.llava.modeling_llava"
6
+ },
7
+ "base_model_name_or_path": "llava-hf/llava-1.5-7b-hf",
8
+ "bias": "none",
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_dropout": 0.0,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 64,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "q_proj",
27
+ "down_proj",
28
+ "gate_proj",
29
+ "v_projfc1",
30
+ "k_proj",
31
+ "fc2up_proj"
32
+ ],
33
+ "task_type": null,
34
+ "use_dora": false,
35
+ "use_rslora": false
36
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a15e780b65a1ca6f611a43c77406c35f0f7d23923f12e1906c86e6fae7173849
3
  size 406901688
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22574490638dcb4a3a335722f7a8378733ec6197069567c5eb1fc80ccfdfdf2f
3
  size 406901688
runs/Jul26_04-32-02_ip-10-192-12-151/events.out.tfevents.1721968337.ip-10-192-12-151.8017.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1371ba020ad967f7d1adacb99c70d938d06aa81d995f8d8630dba1b0d1ed0b7c
3
+ size 7959
runs/Jul26_05-28-52_ip-10-192-12-151/events.out.tfevents.1721971749.ip-10-192-12-151.8017.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2a6d5bf6dbb4ed40032a7958e768a6c74d106a6c380c98116fbd0426f2f0463
3
+ size 6080
runs/Jul26_05-32-51_ip-10-192-12-151/events.out.tfevents.1721971982.ip-10-192-12-151.8017.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:044df83ef2b89bd892a7888928db961645e1a80ceeb02ad34c24daaf7c0be339
3
+ size 10280
special_tokens_map.json CHANGED
@@ -1,30 +1,30 @@
1
- {
2
- "bos_token": {
3
- "content": "<s>",
4
- "lstrip": false,
5
- "normalized": false,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "eos_token": {
10
- "content": "</s>",
11
- "lstrip": false,
12
- "normalized": false,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "pad_token": {
17
- "content": "<pad>",
18
- "lstrip": false,
19
- "normalized": false,
20
- "rstrip": false,
21
- "single_word": false
22
- },
23
- "unk_token": {
24
- "content": "<unk>",
25
- "lstrip": false,
26
- "normalized": false,
27
- "rstrip": false,
28
- "single_word": false
29
- }
30
- }
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<pad>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer_config.json CHANGED
@@ -1,61 +1,61 @@
1
- {
2
- "add_bos_token": true,
3
- "add_eos_token": false,
4
- "add_prefix_space": null,
5
- "added_tokens_decoder": {
6
- "0": {
7
- "content": "<unk>",
8
- "lstrip": false,
9
- "normalized": false,
10
- "rstrip": false,
11
- "single_word": false,
12
- "special": true
13
- },
14
- "1": {
15
- "content": "<s>",
16
- "lstrip": false,
17
- "normalized": false,
18
- "rstrip": false,
19
- "single_word": false,
20
- "special": true
21
- },
22
- "2": {
23
- "content": "</s>",
24
- "lstrip": false,
25
- "normalized": false,
26
- "rstrip": false,
27
- "single_word": false,
28
- "special": true
29
- },
30
- "32000": {
31
- "content": "<image>",
32
- "lstrip": false,
33
- "normalized": false,
34
- "rstrip": false,
35
- "single_word": false,
36
- "special": true
37
- },
38
- "32001": {
39
- "content": "<pad>",
40
- "lstrip": false,
41
- "normalized": false,
42
- "rstrip": false,
43
- "single_word": false,
44
- "special": true
45
- }
46
- },
47
- "bos_token": "<s>",
48
- "chat_template": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.{% for message in messages %}{% if message['role'] == 'user' %} \nUSER: {% else %} \nASSISTANT: {% endif %}{% for item in message['content'] %}{% if item['type'] == 'text' %}{{ item['text'] }}{% elif item['type'] == 'image' %}<image> {% endif %}{% endfor %}{% if message['role'] == 'user' %}{% else %}{{eos_token}}{% endif %}{% endfor %}",
49
- "clean_up_tokenization_spaces": false,
50
- "eos_token": "</s>",
51
- "legacy": false,
52
- "model_max_length": 1000000000000000019884624838656,
53
- "pad_token": "<pad>",
54
- "padding_side": "left",
55
- "processor_class": "LlavaProcessor",
56
- "sp_model_kwargs": {},
57
- "tokenizer_class": "LlamaTokenizer",
58
- "trust_remote_code": false,
59
- "unk_token": "<unk>",
60
- "use_default_system_prompt": false
61
- }
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ },
30
+ "32000": {
31
+ "content": "<image>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "32001": {
39
+ "content": "<pad>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": true
45
+ }
46
+ },
47
+ "bos_token": "<s>",
48
+ "chat_template": "A chat between an user and an artificial intelligence assistant about Science Question Answering. The assistant gives helpful, detailed, and polite answers to the user's questions.\nBased on the image and question, please choose one of the given choices that answer the question.\nGive yourself room to think by extracting the image and question before choosing the choice.\nDon't return the thinking, only return the highest accuracy choice.\nMake sure your answers are as correct as possible.\n{% for tag, content in messages.items() %}\n{% if tag == 'sample_question' %}\nUse the following examples as reference for the ideal answer style.\n{% for message in content %}\n{% if message['role'] == 'user' %} \nExample\nUSER: {% else %}ASSISTANT: {% endif %}\n{% for item in message['content'] %}\n{% if item['type'] == 'text_question' %}\nQuestion: {{ item['question'] }}\n{% elif item['type'] == 'text_choice' %}\nChoices: {{ item['choice'] }}\n{% elif item['type'] == 'text_solution' %}\nSolution: {{ item['solution'] }}\n{% elif item['type'] == 'text_answer' %}\nAnswer: {{ item['answer'] }}{% elif item['type'] == 'image' %}<image>\n{% endif %}\n{% endfor %}\n{% if message['role'] == 'user' %}\n{% else %}\n{{eos_token}}\n{% endif %}{% endfor %}{% endif %}\n\n{% if tag == 'real_question' %}\nNow use the following image and question to choose the choice:\n{% for message in content %}\n{% if message['role'] == 'user' %}USER: {% else %}ASSISTANT: {% endif %}\n{% for item in message['content'] %}\n{% if item['type'] == 'text_question' %}\nQuestion: {{ item['question'] }}\n{% elif item['type'] == 'text_choice' %}\nChoices: {{ item['choice'] }}\n{% elif item['type'] == 'text_solution' %}\nSolution: {{ item['solution'] }}\n{% elif item['type'] == 'text_answer' %}\nAnswer: {{ item['answer'] }}{% elif item['type'] == 'image' %}<image>\n{% endif %}\n{% endfor %}\n{% if message['role'] == 'user' %}\n{% else %}\n{{eos_token}}\n{% endif %}{% endfor %}{% endif %}\n{% endfor %}",
49
+ "clean_up_tokenization_spaces": false,
50
+ "eos_token": "</s>",
51
+ "legacy": false,
52
+ "model_max_length": 1000000000000000019884624838656,
53
+ "pad_token": "<pad>",
54
+ "padding_side": "left",
55
+ "processor_class": "LlavaProcessor",
56
+ "sp_model_kwargs": {},
57
+ "tokenizer_class": "LlamaTokenizer",
58
+ "trust_remote_code": false,
59
+ "unk_token": "<unk>",
60
+ "use_default_system_prompt": false
61
+ }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1944c9220793c15eef6b8918f0cbd0939f6bd967ed59c3cb34666c87d1e22938
3
  size 5496
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e9705325b640d251bcc226e086bcb6e10a8bfdd500e23b4a747cdeb0dfa9850
3
  size 5496