hendrydong commited on
Commit
9ffdc49
·
1 Parent(s): 52b3bdf

Delete robin-7b

Browse files
robin-7b/README.md DELETED
@@ -1,58 +0,0 @@
1
- ---
2
- license: apache-2.0
3
- tags:
4
- - generated_from_trainer
5
- datasets:
6
- - customized
7
- model-index:
8
- - name: h10
9
- results: []
10
- ---
11
-
12
- <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
- should probably proofread and complete it, then remove this comment. -->
14
-
15
- # h10
16
-
17
- This model is a fine-tuned version of [pinkmanlove/llama-7b-hf](https://huggingface.co/pinkmanlove/llama-7b-hf) on the customized dataset.
18
-
19
- ## Model description
20
-
21
- More information needed
22
-
23
- ## Intended uses & limitations
24
-
25
- More information needed
26
-
27
- ## Training and evaluation data
28
-
29
- More information needed
30
-
31
- ## Training procedure
32
-
33
- ### Training hyperparameters
34
-
35
- The following hyperparameters were used during training:
36
- - learning_rate: 0.0001
37
- - train_batch_size: 4
38
- - eval_batch_size: 8
39
- - seed: 42
40
- - distributed_type: multi-GPU
41
- - num_devices: 4
42
- - total_train_batch_size: 16
43
- - total_eval_batch_size: 32
44
- - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
- - lr_scheduler_type: cosine
46
- - lr_scheduler_warmup_ratio: 0.03
47
- - num_epochs: 5.0
48
-
49
- ### Training results
50
-
51
-
52
-
53
- ### Framework versions
54
-
55
- - Transformers 4.28.0.dev0
56
- - Pytorch 2.0.0+cu117
57
- - Datasets 2.10.1
58
- - Tokenizers 0.13.3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
robin-7b/adapter_config.json DELETED
@@ -1,21 +0,0 @@
1
- {
2
- "base_model_name_or_path": "pinkmanlove/llama-7b-hf",
3
- "bias": "none",
4
- "enable_lora": null,
5
- "fan_in_fan_out": false,
6
- "inference_mode": true,
7
- "init_lora_weights": true,
8
- "lora_alpha": 32,
9
- "lora_dropout": 0.1,
10
- "merge_weights": false,
11
- "modules_to_save": null,
12
- "peft_type": "LORA",
13
- "r": 128,
14
- "target_modules": [
15
- "q_proj",
16
- "k_proj",
17
- "v_proj",
18
- "o_proj"
19
- ],
20
- "task_type": "CAUSAL_LM"
21
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
robin-7b/adapter_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:250ed2611b7e1071a390509be29f05e25d5ecb2c703955b97b2a640ddd6ce337
3
- size 268476157
 
 
 
 
robin-7b/all_results.json DELETED
@@ -1,8 +0,0 @@
1
- {
2
- "epoch": 5.0,
3
- "train_loss": 0.9734652058462079,
4
- "train_runtime": 41271.4588,
5
- "train_samples": 142397,
6
- "train_samples_per_second": 17.251,
7
- "train_steps_per_second": 1.078
8
- }
 
 
 
 
 
 
 
 
 
robin-7b/special_tokens_map.json DELETED
@@ -1 +0,0 @@
1
- {}
 
 
robin-7b/tokenizer.model DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
- size 499723
 
 
 
 
robin-7b/tokenizer_config.json DELETED
@@ -1,9 +0,0 @@
1
- {
2
- "bos_token": "",
3
- "clean_up_tokenization_spaces": false,
4
- "eos_token": "",
5
- "model_max_length": 1000000000000000019884624838656,
6
- "special_tokens_map_file": "/root/data/.cache/hub/models--pinkmanlove--llama-7b-hf/snapshots/b3cde76468bad3c085ead29707ee7481121a4ca0/special_tokens_map.json",
7
- "tokenizer_class": "LlamaTokenizer",
8
- "unk_token": ""
9
- }
 
 
 
 
 
 
 
 
 
 
robin-7b/train_results.json DELETED
@@ -1,8 +0,0 @@
1
- {
2
- "epoch": 5.0,
3
- "train_loss": 0.9734652058462079,
4
- "train_runtime": 41271.4588,
5
- "train_samples": 142397,
6
- "train_samples_per_second": 17.251,
7
- "train_steps_per_second": 1.078
8
- }
 
 
 
 
 
 
 
 
 
robin-7b/trainer_state.json DELETED
The diff for this file is too large to render. See raw diff