SystemAdmin123 commited on
Commit
e6f353a
·
verified ·
1 Parent(s): 7542681

Training in progress, step 200

Browse files
axolotl_config.yaml CHANGED
@@ -1,5 +1,5 @@
1
- base_model: trl-internal-testing/tiny-random-LlamaForCausalLM
2
- batch_size: 128
3
  bf16: true
4
  chat_template: tokenizer_default_fallback_alpaca
5
  datasets:
@@ -25,7 +25,7 @@ learning_rate: 0.0002
25
  logging_steps: 10
26
  lr_scheduler: cosine
27
  max_steps: 10000
28
- micro_batch_size: 32
29
  model_type: AutoModelForCausalLM
30
  num_epochs: 100
31
  optimizer: adamw_bnb_8bit
@@ -44,7 +44,7 @@ trust_remote_code: true
44
  val_set_size: 0.1
45
  wandb_entity: ''
46
  wandb_mode: online
47
- wandb_name: trl-internal-testing/tiny-random-LlamaForCausalLM-argilla/databricks-dolly-15k-curated-en
48
  wandb_project: Gradients-On-Demand
49
  wandb_run: your_name
50
  wandb_runid: default
 
1
+ base_model: unsloth/OpenHermes-2.5-Mistral-7B
2
+ batch_size: 32
3
  bf16: true
4
  chat_template: tokenizer_default_fallback_alpaca
5
  datasets:
 
25
  logging_steps: 10
26
  lr_scheduler: cosine
27
  max_steps: 10000
28
+ micro_batch_size: 2
29
  model_type: AutoModelForCausalLM
30
  num_epochs: 100
31
  optimizer: adamw_bnb_8bit
 
44
  val_set_size: 0.1
45
  wandb_entity: ''
46
  wandb_mode: online
47
+ wandb_name: unsloth/OpenHermes-2.5-Mistral-7B-argilla/databricks-dolly-15k-curated-en
48
  wandb_project: Gradients-On-Demand
49
  wandb_run: your_name
50
  wandb_runid: default
config.json CHANGED
@@ -1,31 +1,28 @@
1
  {
2
- "_name_or_path": "trl-internal-testing/tiny-random-LlamaForCausalLM",
3
  "architectures": [
4
- "LlamaForCausalLM"
5
  ],
6
- "attention_bias": false,
7
  "attention_dropout": 0.0,
8
- "bos_token_id": 0,
9
- "eos_token_id": 2,
10
- "head_dim": 4,
11
  "hidden_act": "silu",
12
- "hidden_size": 16,
13
  "initializer_range": 0.02,
14
- "intermediate_size": 64,
15
- "max_position_embeddings": 2048,
16
- "mlp_bias": false,
17
- "model_type": "llama",
18
- "num_attention_heads": 4,
19
- "num_hidden_layers": 2,
20
- "num_key_value_heads": 4,
21
- "pad_token_id": -1,
22
- "pretraining_tp": 1,
23
- "rms_norm_eps": 1e-06,
24
- "rope_scaling": null,
25
  "rope_theta": 10000.0,
 
26
  "tie_word_embeddings": false,
27
  "torch_dtype": "bfloat16",
28
  "transformers_version": "4.48.1",
29
  "use_cache": false,
30
- "vocab_size": 32000
31
  }
 
1
  {
2
+ "_name_or_path": "unsloth/OpenHermes-2.5-Mistral-7B",
3
  "architectures": [
4
+ "MistralForCausalLM"
5
  ],
 
6
  "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 32000,
9
+ "head_dim": 128,
10
  "hidden_act": "silu",
11
+ "hidden_size": 4096,
12
  "initializer_range": 0.02,
13
+ "intermediate_size": 14336,
14
+ "max_position_embeddings": 32768,
15
+ "model_type": "mistral",
16
+ "num_attention_heads": 32,
17
+ "num_hidden_layers": 32,
18
+ "num_key_value_heads": 8,
19
+ "pad_token_id": 0,
20
+ "rms_norm_eps": 1e-05,
 
 
 
21
  "rope_theta": 10000.0,
22
+ "sliding_window": 4096,
23
  "tie_word_embeddings": false,
24
  "torch_dtype": "bfloat16",
25
  "transformers_version": "4.48.1",
26
  "use_cache": false,
27
+ "vocab_size": 32002
28
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a7868cab33fad23d3511b8ea224cf1dbea7722902a8867937737b71ccacc2059
3
  size 4943178720
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:410afacf2f348a926671e6ccc3caa4629e74e907f868e42ecb93d6f5d05ee853
3
  size 4943178720
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:378ce005606bc9c097868eef32fada737076ab88cbbb362c2ba481fb98b05de1
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f52bef3c6ffa1c00376a6dfa73ddef315bc86f0dd6c2057a47be4e61e1fb7c3d
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:15ff3e698d027f621311484ef4aedcc8082231840d43b6ce7616843621290ca6
3
  size 4540532728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0278078bf87ec279f8a814ac678eb563b3b6410abe702bdf7049de5062e7cd9e
3
  size 4540532728
special_tokens_map.json CHANGED
@@ -7,7 +7,7 @@
7
  "single_word": false
8
  },
9
  "eos_token": {
10
- "content": "</s>",
11
  "lstrip": false,
12
  "normalized": false,
13
  "rstrip": false,
 
7
  "single_word": false
8
  },
9
  "eos_token": {
10
+ "content": "<|im_end|>",
11
  "lstrip": false,
12
  "normalized": false,
13
  "rstrip": false,
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d606f46a8aa6f29f0a0abdec7c3ffddefc9f9bfe26919532d209a0a850e25029
3
- size 3619013
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04222cd76979c181cd3f72c3bf6982fe2a09d9f4b8f23d82902efde18f1d0668
3
+ size 3506125
tokenizer.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
- size 499723
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "add_bos_token": true,
3
  "add_eos_token": false,
4
- "add_prefix_space": null,
5
  "added_tokens_decoder": {
6
  "0": {
7
  "content": "<unk>",
@@ -26,17 +26,38 @@
26
  "rstrip": false,
27
  "single_word": false,
28
  "special": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  }
30
  },
 
31
  "bos_token": "<s>",
32
- "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{ '### Instruction: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ '### Response: ' + message['content'] + eos_token}}{% endif %}{% endfor %}",
33
  "clean_up_tokenization_spaces": false,
34
- "eos_token": "</s>",
35
  "extra_special_tokens": {},
36
  "legacy": true,
37
- "model_max_length": 2048,
38
  "pad_token": "<unk>",
 
 
 
39
  "tokenizer_class": "LlamaTokenizer",
 
40
  "unk_token": "<unk>",
41
  "use_default_system_prompt": true,
42
  "use_fast": true
 
1
  {
2
  "add_bos_token": true,
3
  "add_eos_token": false,
4
+ "add_prefix_space": true,
5
  "added_tokens_decoder": {
6
  "0": {
7
  "content": "<unk>",
 
26
  "rstrip": false,
27
  "single_word": false,
28
  "special": true
29
+ },
30
+ "32000": {
31
+ "content": "<|im_end|>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "32001": {
39
+ "content": "<|im_start|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": true
45
  }
46
  },
47
+ "additional_special_tokens": [],
48
  "bos_token": "<s>",
49
+ "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
50
  "clean_up_tokenization_spaces": false,
51
+ "eos_token": "<|im_end|>",
52
  "extra_special_tokens": {},
53
  "legacy": true,
54
+ "model_max_length": 32768,
55
  "pad_token": "<unk>",
56
+ "padding_side": "right",
57
+ "sp_model_kwargs": {},
58
+ "spaces_between_special_tokens": false,
59
  "tokenizer_class": "LlamaTokenizer",
60
+ "trust_remote_code": false,
61
  "unk_token": "<unk>",
62
  "use_default_system_prompt": true,
63
  "use_fast": true
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:11cf48786efac37806223cc8882d4253d84ff8c3599dc92c9fda0e12bc8a651f
3
- size 6840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef3eb8b3b2a8acd873a0906e2cee7771ee216bb2230a5edc2d984d58604ec971
3
+ size 6776