SaiedAlshahrani
commited on
Commit
·
a86b3c2
1
Parent(s):
6e12c9c
Upload files manually
Browse files- README.md +2 -6
- adapter_model.bin +1 -1
- checkpoint-300/adapter_model.bin +1 -1
- checkpoint-300/optimizer.pt +1 -1
- checkpoint-300/tokenizer.json +2 -2
- checkpoint-300/trainer_state.json +3 -3
- checkpoint-300/training_args.bin +1 -1
- checkpoint-400/adapter_model.bin +1 -1
- checkpoint-400/optimizer.pt +1 -1
- checkpoint-400/tokenizer.json +2 -2
- checkpoint-400/trainer_state.json +4 -4
- checkpoint-400/training_args.bin +1 -1
- checkpoint-500/adapter_model.bin +1 -1
- checkpoint-500/optimizer.pt +1 -1
- checkpoint-500/tokenizer.json +2 -2
- checkpoint-500/trainer_state.json +5 -5
- checkpoint-500/training_args.bin +1 -1
- runs/Aug29_06-19-45_n7u3rzpi4b/events.out.tfevents.1693290027.n7u3rzpi4b.193.0 +3 -0
- training_args.bin +1 -1
README.md
CHANGED
@@ -5,10 +5,6 @@ tags:
|
|
5 |
model-index:
|
6 |
- name: bloom_7B_8bit_qlora
|
7 |
results: []
|
8 |
-
datasets:
|
9 |
-
- mlqa
|
10 |
-
library_name: adapter-transformers
|
11 |
-
pipeline_tag: text-generation
|
12 |
---
|
13 |
|
14 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
@@ -16,7 +12,7 @@ should probably proofread and complete it, then remove this comment. -->
|
|
16 |
|
17 |
# bloom_7B_8bit_qlora
|
18 |
|
19 |
-
This model is a fine-tuned version of [
|
20 |
|
21 |
## Model description
|
22 |
|
@@ -55,4 +51,4 @@ The following hyperparameters were used during training:
|
|
55 |
- Transformers 4.32.1
|
56 |
- Pytorch 2.0.1+cu117
|
57 |
- Datasets 2.4.0
|
58 |
-
- Tokenizers 0.12.1
|
|
|
5 |
model-index:
|
6 |
- name: bloom_7B_8bit_qlora
|
7 |
results: []
|
|
|
|
|
|
|
|
|
8 |
---
|
9 |
|
10 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
|
|
12 |
|
13 |
# bloom_7B_8bit_qlora
|
14 |
|
15 |
+
This model is a fine-tuned version of [asas-ai/bloom_7B_8bit](https://huggingface.co/asas-ai/bloom_7B_8bit) on an unknown dataset.
|
16 |
|
17 |
## Model description
|
18 |
|
|
|
51 |
- Transformers 4.32.1
|
52 |
- Pytorch 2.0.1+cu117
|
53 |
- Datasets 2.4.0
|
54 |
+
- Tokenizers 0.12.1
|
adapter_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 503404621
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0dcae4f42d6bbc43e90909ae599e305d836cd5019ce03484978f53fc6f7abfe0
|
3 |
size 503404621
|
checkpoint-300/adapter_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 503404621
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d25cdbe167442b666295bf5a1951cc61d7ce36dfcb6d0356db518164e6cd9b61
|
3 |
size 503404621
|
checkpoint-300/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1006781317
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ada9e116d03032ea292bb2aba0d7b3b3001074d8def0531b0b70ac094c38b039
|
3 |
size 1006781317
|
checkpoint-300/tokenizer.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6c1abd73425d69d27b6933af4fa2a004568434169689b37d1314c6ca3a1d2a7f
|
3 |
+
size 14500541
|
checkpoint-300/trainer_state.json
CHANGED
@@ -11,19 +11,19 @@
|
|
11 |
{
|
12 |
"epoch": 3.08,
|
13 |
"learning_rate": 0.0002,
|
14 |
-
"loss": 2.
|
15 |
"step": 100
|
16 |
},
|
17 |
{
|
18 |
"epoch": 6.15,
|
19 |
"learning_rate": 0.0002,
|
20 |
-
"loss": 1.
|
21 |
"step": 200
|
22 |
},
|
23 |
{
|
24 |
"epoch": 9.23,
|
25 |
"learning_rate": 0.0002,
|
26 |
-
"loss": 0.
|
27 |
"step": 300
|
28 |
}
|
29 |
],
|
|
|
11 |
{
|
12 |
"epoch": 3.08,
|
13 |
"learning_rate": 0.0002,
|
14 |
+
"loss": 2.7271,
|
15 |
"step": 100
|
16 |
},
|
17 |
{
|
18 |
"epoch": 6.15,
|
19 |
"learning_rate": 0.0002,
|
20 |
+
"loss": 1.5738,
|
21 |
"step": 200
|
22 |
},
|
23 |
{
|
24 |
"epoch": 9.23,
|
25 |
"learning_rate": 0.0002,
|
26 |
+
"loss": 0.8103,
|
27 |
"step": 300
|
28 |
}
|
29 |
],
|
checkpoint-300/training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4091
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:73225d6f31fc3bfbd0a084fa030ff0a2a05d553ce3bb715cd5d3712013941b69
|
3 |
size 4091
|
checkpoint-400/adapter_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 503404621
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:37469a33c3275e6de2e908cf87e4bbdb2bb8f599c3d4a893fbe8171436a63c4d
|
3 |
size 503404621
|
checkpoint-400/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1006781317
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:12d79aba242e2b481617857f9f6fe106ce729bd9891c4a778fbab27e718b8003
|
3 |
size 1006781317
|
checkpoint-400/tokenizer.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6c1abd73425d69d27b6933af4fa2a004568434169689b37d1314c6ca3a1d2a7f
|
3 |
+
size 14500541
|
checkpoint-400/trainer_state.json
CHANGED
@@ -11,25 +11,25 @@
|
|
11 |
{
|
12 |
"epoch": 3.08,
|
13 |
"learning_rate": 0.0002,
|
14 |
-
"loss": 2.
|
15 |
"step": 100
|
16 |
},
|
17 |
{
|
18 |
"epoch": 6.15,
|
19 |
"learning_rate": 0.0002,
|
20 |
-
"loss": 1.
|
21 |
"step": 200
|
22 |
},
|
23 |
{
|
24 |
"epoch": 9.23,
|
25 |
"learning_rate": 0.0002,
|
26 |
-
"loss": 0.
|
27 |
"step": 300
|
28 |
},
|
29 |
{
|
30 |
"epoch": 12.31,
|
31 |
"learning_rate": 0.0002,
|
32 |
-
"loss": 0.
|
33 |
"step": 400
|
34 |
}
|
35 |
],
|
|
|
11 |
{
|
12 |
"epoch": 3.08,
|
13 |
"learning_rate": 0.0002,
|
14 |
+
"loss": 2.7271,
|
15 |
"step": 100
|
16 |
},
|
17 |
{
|
18 |
"epoch": 6.15,
|
19 |
"learning_rate": 0.0002,
|
20 |
+
"loss": 1.5738,
|
21 |
"step": 200
|
22 |
},
|
23 |
{
|
24 |
"epoch": 9.23,
|
25 |
"learning_rate": 0.0002,
|
26 |
+
"loss": 0.8103,
|
27 |
"step": 300
|
28 |
},
|
29 |
{
|
30 |
"epoch": 12.31,
|
31 |
"learning_rate": 0.0002,
|
32 |
+
"loss": 0.4226,
|
33 |
"step": 400
|
34 |
}
|
35 |
],
|
checkpoint-400/training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4091
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:73225d6f31fc3bfbd0a084fa030ff0a2a05d553ce3bb715cd5d3712013941b69
|
3 |
size 4091
|
checkpoint-500/adapter_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 503404621
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0dcae4f42d6bbc43e90909ae599e305d836cd5019ce03484978f53fc6f7abfe0
|
3 |
size 503404621
|
checkpoint-500/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1006781317
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6c2151615d0060a8e98a6f0199051f366772d83c308c4f3a681da1f5b5c696af
|
3 |
size 1006781317
|
checkpoint-500/tokenizer.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6c1abd73425d69d27b6933af4fa2a004568434169689b37d1314c6ca3a1d2a7f
|
3 |
+
size 14500541
|
checkpoint-500/trainer_state.json
CHANGED
@@ -11,31 +11,31 @@
|
|
11 |
{
|
12 |
"epoch": 3.08,
|
13 |
"learning_rate": 0.0002,
|
14 |
-
"loss": 2.
|
15 |
"step": 100
|
16 |
},
|
17 |
{
|
18 |
"epoch": 6.15,
|
19 |
"learning_rate": 0.0002,
|
20 |
-
"loss": 1.
|
21 |
"step": 200
|
22 |
},
|
23 |
{
|
24 |
"epoch": 9.23,
|
25 |
"learning_rate": 0.0002,
|
26 |
-
"loss": 0.
|
27 |
"step": 300
|
28 |
},
|
29 |
{
|
30 |
"epoch": 12.31,
|
31 |
"learning_rate": 0.0002,
|
32 |
-
"loss": 0.
|
33 |
"step": 400
|
34 |
},
|
35 |
{
|
36 |
"epoch": 15.38,
|
37 |
"learning_rate": 0.0002,
|
38 |
-
"loss": 0.
|
39 |
"step": 500
|
40 |
}
|
41 |
],
|
|
|
11 |
{
|
12 |
"epoch": 3.08,
|
13 |
"learning_rate": 0.0002,
|
14 |
+
"loss": 2.7271,
|
15 |
"step": 100
|
16 |
},
|
17 |
{
|
18 |
"epoch": 6.15,
|
19 |
"learning_rate": 0.0002,
|
20 |
+
"loss": 1.5738,
|
21 |
"step": 200
|
22 |
},
|
23 |
{
|
24 |
"epoch": 9.23,
|
25 |
"learning_rate": 0.0002,
|
26 |
+
"loss": 0.8103,
|
27 |
"step": 300
|
28 |
},
|
29 |
{
|
30 |
"epoch": 12.31,
|
31 |
"learning_rate": 0.0002,
|
32 |
+
"loss": 0.4226,
|
33 |
"step": 400
|
34 |
},
|
35 |
{
|
36 |
"epoch": 15.38,
|
37 |
"learning_rate": 0.0002,
|
38 |
+
"loss": 0.2093,
|
39 |
"step": 500
|
40 |
}
|
41 |
],
|
checkpoint-500/training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4091
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:73225d6f31fc3bfbd0a084fa030ff0a2a05d553ce3bb715cd5d3712013941b69
|
3 |
size 4091
|
runs/Aug29_06-19-45_n7u3rzpi4b/events.out.tfevents.1693290027.n7u3rzpi4b.193.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0ca3548b6402733c27788c9c7b1846810efd4b8af36fde78fc8005d279ba0152
|
3 |
+
size 5773
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4091
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:73225d6f31fc3bfbd0a084fa030ff0a2a05d553ce3bb715cd5d3712013941b69
|
3 |
size 4091
|