gokul00060 commited on
Commit
e5ee032
1 Parent(s): 86b3532

Delete GGUF-arm

Browse files
GGUF-arm/README.md DELETED
@@ -1,54 +0,0 @@
1
- ---
2
- license: mit
3
- base_model: HuggingFaceH4/zephyr-7b-alpha
4
- tags:
5
- - generated_from_trainer
6
- model-index:
7
- - name: GGUF-arm
8
- results: []
9
- ---
10
-
11
- <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
- should probably proofread and complete it, then remove this comment. -->
13
-
14
- # GGUF-arm
15
-
16
- This model is a fine-tuned version of [HuggingFaceH4/zephyr-7b-alpha](https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha) on the None dataset.
17
-
18
- ## Model description
19
-
20
- More information needed
21
-
22
- ## Intended uses & limitations
23
-
24
- More information needed
25
-
26
- ## Training and evaluation data
27
-
28
- More information needed
29
-
30
- ## Training procedure
31
-
32
- ### Training hyperparameters
33
-
34
- The following hyperparameters were used during training:
35
- - learning_rate: 0.0002
36
- - train_batch_size: 8
37
- - eval_batch_size: 8
38
- - seed: 42
39
- - gradient_accumulation_steps: 4
40
- - total_train_batch_size: 32
41
- - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
- - lr_scheduler_type: cosine
43
- - training_steps: 100
44
-
45
- ### Training results
46
-
47
-
48
-
49
- ### Framework versions
50
-
51
- - Transformers 4.34.1
52
- - Pytorch 2.1.0+cu118
53
- - Datasets 2.14.6
54
- - Tokenizers 0.14.1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
GGUF-arm/adapter_config.json DELETED
@@ -1,21 +0,0 @@
1
- {
2
- "auto_mapping": null,
3
- "base_model_name_or_path": "HuggingFaceH4/zephyr-7b-alpha",
4
- "bias": "none",
5
- "fan_in_fan_out": false,
6
- "inference_mode": true,
7
- "init_lora_weights": true,
8
- "layers_pattern": null,
9
- "layers_to_transform": null,
10
- "lora_alpha": 16,
11
- "lora_dropout": 0.05,
12
- "modules_to_save": null,
13
- "peft_type": "LORA",
14
- "r": 8,
15
- "revision": null,
16
- "target_modules": [
17
- "q_proj",
18
- "v_proj"
19
- ],
20
- "task_type": "CAUSAL_LM"
21
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
GGUF-arm/adapter_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9b97bad9aaa9d44a8a49c3a887a66e5c0fd8a75d24078f0eff402d69b0842a17
3
- size 13677706
 
 
 
 
GGUF-arm/checkpoint-1/README.md DELETED
@@ -1,34 +0,0 @@
1
- ---
2
- library_name: peft
3
- ---
4
- ## Training procedure
5
-
6
-
7
- The following `bitsandbytes` quantization config was used during training:
8
- - quant_method: bitsandbytes
9
- - load_in_8bit: False
10
- - load_in_4bit: True
11
- - llm_int8_threshold: 6.0
12
- - llm_int8_skip_modules: None
13
- - llm_int8_enable_fp32_cpu_offload: False
14
- - llm_int8_has_fp16_weight: False
15
- - bnb_4bit_quant_type: nf4
16
- - bnb_4bit_use_double_quant: True
17
- - bnb_4bit_compute_dtype: float16
18
-
19
- The following `bitsandbytes` quantization config was used during training:
20
- - quant_method: bitsandbytes
21
- - load_in_8bit: False
22
- - load_in_4bit: True
23
- - llm_int8_threshold: 6.0
24
- - llm_int8_skip_modules: None
25
- - llm_int8_enable_fp32_cpu_offload: False
26
- - llm_int8_has_fp16_weight: False
27
- - bnb_4bit_quant_type: nf4
28
- - bnb_4bit_use_double_quant: True
29
- - bnb_4bit_compute_dtype: float16
30
- ### Framework versions
31
-
32
- - PEFT 0.5.0
33
-
34
- - PEFT 0.5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
GGUF-arm/checkpoint-1/adapter_config.json DELETED
@@ -1,21 +0,0 @@
1
- {
2
- "auto_mapping": null,
3
- "base_model_name_or_path": "HuggingFaceH4/zephyr-7b-alpha",
4
- "bias": "none",
5
- "fan_in_fan_out": false,
6
- "inference_mode": true,
7
- "init_lora_weights": true,
8
- "layers_pattern": null,
9
- "layers_to_transform": null,
10
- "lora_alpha": 16,
11
- "lora_dropout": 0.05,
12
- "modules_to_save": null,
13
- "peft_type": "LORA",
14
- "r": 8,
15
- "revision": null,
16
- "target_modules": [
17
- "q_proj",
18
- "v_proj"
19
- ],
20
- "task_type": "CAUSAL_LM"
21
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
GGUF-arm/checkpoint-1/adapter_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:01fd3748efc4e025482fdbdbc1769794bd2163ab4be86fd24b40cbbfcff0c7aa
3
- size 13677706
 
 
 
 
GGUF-arm/checkpoint-1/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b3abbdb3072483266132c98f6fef64c124256db4849fb7ec3f9a6d359d4e45c8
3
- size 27338682
 
 
 
 
GGUF-arm/checkpoint-1/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:65d7f6b0d79af9c4bcb72f29b3e6ed81eabde6a813d75ce2988058c511288fc7
3
- size 14244
 
 
 
 
GGUF-arm/checkpoint-1/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a997a23ebce56d9f2b5b1529d3767c29e8b98b6bc6f1a49bdcc700f019e9c857
3
- size 1064
 
 
 
 
GGUF-arm/checkpoint-1/special_tokens_map.json DELETED
@@ -1,29 +0,0 @@
1
- {
2
- "additional_special_tokens": [
3
- "<unk>",
4
- "<s>",
5
- "</s>"
6
- ],
7
- "bos_token": {
8
- "content": "<s>",
9
- "lstrip": false,
10
- "normalized": false,
11
- "rstrip": false,
12
- "single_word": false
13
- },
14
- "eos_token": {
15
- "content": "</s>",
16
- "lstrip": false,
17
- "normalized": false,
18
- "rstrip": false,
19
- "single_word": false
20
- },
21
- "pad_token": "</s>",
22
- "unk_token": {
23
- "content": "<unk>",
24
- "lstrip": false,
25
- "normalized": false,
26
- "rstrip": false,
27
- "single_word": false
28
- }
29
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
GGUF-arm/checkpoint-1/tokenizer.json DELETED
The diff for this file is too large to render. See raw diff
 
GGUF-arm/checkpoint-1/tokenizer_config.json DELETED
@@ -1,46 +0,0 @@
1
- {
2
- "added_tokens_decoder": {
3
- "0": {
4
- "content": "<unk>",
5
- "lstrip": false,
6
- "normalized": false,
7
- "rstrip": false,
8
- "single_word": false,
9
- "special": true
10
- },
11
- "1": {
12
- "content": "<s>",
13
- "lstrip": false,
14
- "normalized": false,
15
- "rstrip": false,
16
- "single_word": false,
17
- "special": true
18
- },
19
- "2": {
20
- "content": "</s>",
21
- "lstrip": false,
22
- "normalized": false,
23
- "rstrip": false,
24
- "single_word": false,
25
- "special": true
26
- }
27
- },
28
- "additional_special_tokens": [
29
- "<unk>",
30
- "<s>",
31
- "</s>"
32
- ],
33
- "bos_token": "<s>",
34
- "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
35
- "clean_up_tokenization_spaces": false,
36
- "eos_token": "</s>",
37
- "legacy": true,
38
- "model_max_length": 1000000000000000019884624838656,
39
- "pad_token": "</s>",
40
- "sp_model_kwargs": {},
41
- "spaces_between_special_tokens": false,
42
- "tokenizer_class": "LlamaTokenizer",
43
- "truncation_side": "left",
44
- "unk_token": "<unk>",
45
- "use_default_system_prompt": true
46
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
GGUF-arm/checkpoint-1/trainer_state.json DELETED
@@ -1,18 +0,0 @@
1
- {
2
- "best_metric": null,
3
- "best_model_checkpoint": null,
4
- "epoch": 1.0,
5
- "eval_steps": 500,
6
- "global_step": 1,
7
- "is_hyper_param_search": false,
8
- "is_local_process_zero": true,
9
- "is_world_process_zero": true,
10
- "log_history": [],
11
- "logging_steps": 10,
12
- "max_steps": 100,
13
- "num_train_epochs": 100,
14
- "save_steps": 500,
15
- "total_flos": 28513184415744.0,
16
- "trial_name": null,
17
- "trial_params": null
18
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
GGUF-arm/checkpoint-1/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7139291f338e9a107d2f95f645ca28419cde427d8fd967b31812376aa1de3818
3
- size 4536
 
 
 
 
GGUF-arm/runs/Oct29_14-04-03_36ac896f0d4e/events.out.tfevents.1698588250.36ac896f0d4e.485.0 DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b4b9d95abe832b732f43048e1be30488f4932d5358a22c58b299d3b1e3603928
3
- size 6405
 
 
 
 
GGUF-arm/special_tokens_map.json DELETED
@@ -1,29 +0,0 @@
1
- {
2
- "additional_special_tokens": [
3
- "<unk>",
4
- "<s>",
5
- "</s>"
6
- ],
7
- "bos_token": {
8
- "content": "<s>",
9
- "lstrip": false,
10
- "normalized": false,
11
- "rstrip": false,
12
- "single_word": false
13
- },
14
- "eos_token": {
15
- "content": "</s>",
16
- "lstrip": false,
17
- "normalized": false,
18
- "rstrip": false,
19
- "single_word": false
20
- },
21
- "pad_token": "</s>",
22
- "unk_token": {
23
- "content": "<unk>",
24
- "lstrip": false,
25
- "normalized": false,
26
- "rstrip": false,
27
- "single_word": false
28
- }
29
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
GGUF-arm/tokenizer.json DELETED
The diff for this file is too large to render. See raw diff
 
GGUF-arm/tokenizer_config.json DELETED
@@ -1,46 +0,0 @@
1
- {
2
- "added_tokens_decoder": {
3
- "0": {
4
- "content": "<unk>",
5
- "lstrip": false,
6
- "normalized": false,
7
- "rstrip": false,
8
- "single_word": false,
9
- "special": true
10
- },
11
- "1": {
12
- "content": "<s>",
13
- "lstrip": false,
14
- "normalized": false,
15
- "rstrip": false,
16
- "single_word": false,
17
- "special": true
18
- },
19
- "2": {
20
- "content": "</s>",
21
- "lstrip": false,
22
- "normalized": false,
23
- "rstrip": false,
24
- "single_word": false,
25
- "special": true
26
- }
27
- },
28
- "additional_special_tokens": [
29
- "<unk>",
30
- "<s>",
31
- "</s>"
32
- ],
33
- "bos_token": "<s>",
34
- "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
35
- "clean_up_tokenization_spaces": false,
36
- "eos_token": "</s>",
37
- "legacy": true,
38
- "model_max_length": 1000000000000000019884624838656,
39
- "pad_token": "</s>",
40
- "sp_model_kwargs": {},
41
- "spaces_between_special_tokens": false,
42
- "tokenizer_class": "LlamaTokenizer",
43
- "truncation_side": "left",
44
- "unk_token": "<unk>",
45
- "use_default_system_prompt": true
46
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
GGUF-arm/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7139291f338e9a107d2f95f645ca28419cde427d8fd967b31812376aa1de3818
3
- size 4536