tomasmcm commited on
Commit
3cc98ab
·
verified ·
1 Parent(s): 8b9fd7b

Upload folder using huggingface_hub

Browse files
Files changed (41) hide show
  1. added_tokens.json +3 -0
  2. config.json +75 -0
  3. generation_config.json +13 -0
  4. merges.txt +0 -0
  5. onnx/decoder_model.onnx +3 -0
  6. onnx/decoder_model_bnb4.onnx +3 -0
  7. onnx/decoder_model_fp16.onnx +3 -0
  8. onnx/decoder_model_int8.onnx +3 -0
  9. onnx/decoder_model_merged.onnx +3 -0
  10. onnx/decoder_model_merged_bnb4.onnx +3 -0
  11. onnx/decoder_model_merged_fp16.onnx +3 -0
  12. onnx/decoder_model_merged_int8.onnx +3 -0
  13. onnx/decoder_model_merged_q4.onnx +3 -0
  14. onnx/decoder_model_merged_q4f16.onnx +3 -0
  15. onnx/decoder_model_merged_quantized.onnx +3 -0
  16. onnx/decoder_model_merged_uint8.onnx +3 -0
  17. onnx/decoder_model_q4.onnx +3 -0
  18. onnx/decoder_model_q4f16.onnx +3 -0
  19. onnx/decoder_model_quantized.onnx +3 -0
  20. onnx/decoder_model_uint8.onnx +3 -0
  21. onnx/decoder_with_past_model.onnx +3 -0
  22. onnx/decoder_with_past_model_bnb4.onnx +3 -0
  23. onnx/decoder_with_past_model_fp16.onnx +3 -0
  24. onnx/decoder_with_past_model_int8.onnx +3 -0
  25. onnx/decoder_with_past_model_q4.onnx +3 -0
  26. onnx/decoder_with_past_model_q4f16.onnx +3 -0
  27. onnx/decoder_with_past_model_quantized.onnx +3 -0
  28. onnx/decoder_with_past_model_uint8.onnx +3 -0
  29. onnx/encoder_model.onnx +3 -0
  30. onnx/encoder_model_bnb4.onnx +3 -0
  31. onnx/encoder_model_fp16.onnx +3 -0
  32. onnx/encoder_model_int8.onnx +3 -0
  33. onnx/encoder_model_q4.onnx +3 -0
  34. onnx/encoder_model_q4f16.onnx +3 -0
  35. onnx/encoder_model_quantized.onnx +3 -0
  36. onnx/encoder_model_uint8.onnx +3 -0
  37. quantize_config.json +18 -0
  38. special_tokens_map.json +54 -0
  39. tokenizer.json +0 -0
  40. tokenizer_config.json +69 -0
  41. vocab.json +0 -0
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<hl>": 50265
3
+ }
config.json ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_attn_implementation_autoset": true,
3
+ "_name_or_path": "lmqg/bart-large-tweetqa-qa",
4
+ "activation_dropout": 0.1,
5
+ "activation_function": "gelu",
6
+ "add_bias_logits": false,
7
+ "add_final_layer_norm": false,
8
+ "add_prefix": false,
9
+ "architectures": [
10
+ "BartForConditionalGeneration"
11
+ ],
12
+ "attention_dropout": 0.1,
13
+ "bos_token_id": 0,
14
+ "classif_dropout": 0.1,
15
+ "classifier_dropout": 0.0,
16
+ "d_model": 1024,
17
+ "decoder_attention_heads": 16,
18
+ "decoder_ffn_dim": 4096,
19
+ "decoder_layerdrop": 0.0,
20
+ "decoder_layers": 12,
21
+ "decoder_start_token_id": 2,
22
+ "dropout": 0.1,
23
+ "early_stopping": null,
24
+ "encoder_attention_heads": 16,
25
+ "encoder_ffn_dim": 4096,
26
+ "encoder_layerdrop": 0.0,
27
+ "encoder_layers": 12,
28
+ "eos_token_id": 2,
29
+ "forced_eos_token_id": 2,
30
+ "gradient_checkpointing": false,
31
+ "id2label": {
32
+ "0": "LABEL_0",
33
+ "1": "LABEL_1",
34
+ "2": "LABEL_2"
35
+ },
36
+ "init_std": 0.02,
37
+ "is_encoder_decoder": true,
38
+ "label2id": {
39
+ "LABEL_0": 0,
40
+ "LABEL_1": 1,
41
+ "LABEL_2": 2
42
+ },
43
+ "max_position_embeddings": 1024,
44
+ "model_type": "bart",
45
+ "no_repeat_ngram_size": null,
46
+ "normalize_before": false,
47
+ "num_beams": null,
48
+ "num_hidden_layers": 12,
49
+ "pad_token_id": 1,
50
+ "scale_embedding": false,
51
+ "task_specific_params": {
52
+ "summarization": {
53
+ "length_penalty": 1.0,
54
+ "max_length": 128,
55
+ "min_length": 12,
56
+ "num_beams": 4
57
+ },
58
+ "summarization_cnn": {
59
+ "length_penalty": 2.0,
60
+ "max_length": 142,
61
+ "min_length": 56,
62
+ "num_beams": 4
63
+ },
64
+ "summarization_xsum": {
65
+ "length_penalty": 1.0,
66
+ "max_length": 62,
67
+ "min_length": 11,
68
+ "num_beams": 6
69
+ }
70
+ },
71
+ "torch_dtype": "float32",
72
+ "transformers_version": "4.49.0",
73
+ "use_cache": true,
74
+ "vocab_size": 50266
75
+ }
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "decoder_start_token_id": 2,
5
+ "early_stopping": true,
6
+ "eos_token_id": 2,
7
+ "forced_bos_token_id": 0,
8
+ "forced_eos_token_id": 2,
9
+ "no_repeat_ngram_size": 3,
10
+ "num_beams": 4,
11
+ "pad_token_id": 1,
12
+ "transformers_version": "4.49.0"
13
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
onnx/decoder_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a910a89d5380bd980dc41176a4297dc9968bb535aeae8bb53dd6bad0387932a
3
+ size 1222764074
onnx/decoder_model_bnb4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e81f397456a91d3f89d0e0e355da422d5b573d782211057c8dd0b4ca4c0bc0ff
3
+ size 353786567
onnx/decoder_model_fp16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b9adaad77c8c038ee2afb84e4012fa714fc041ce64bd1989aa6422fe280f512
3
+ size 611651757
onnx/decoder_model_int8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72849773fa27c3da8973b094031d6a7bea9b2fcb37b33d10f1153246ea999354
3
+ size 307996056
onnx/decoder_model_merged.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08204e3e18959663902cd2687151acfff100e0c26337d71eee562316c1ed46e0
3
+ size 1223617547
onnx/decoder_model_merged_bnb4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e3a5b185b68248158b81e7b33da6e59abd18adf7156098c70388f7dc94be5dd
3
+ size 354654246
onnx/decoder_model_merged_fp16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39969bc99c6abb874707ec88ae69e0da63458e4135a5df8e24c2d3c55292d89a
3
+ size 612420281
onnx/decoder_model_merged_int8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58011d129039d617d9ce63c8a83673e3b689cf6f6a89d71a7ceb8cc7a0960926
3
+ size 308971183
onnx/decoder_model_merged_q4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c0958c88e84d9c6046dfe3508fc63bc59e94924fa75a155196bff19986a6449
3
+ size 370452329
onnx/decoder_model_merged_q4f16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b898b37441535f32647170263e47fc6e483e36a4e48be8a10e25b30a624d972
3
+ size 249053078
onnx/decoder_model_merged_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58011d129039d617d9ce63c8a83673e3b689cf6f6a89d71a7ceb8cc7a0960926
3
+ size 308971183
onnx/decoder_model_merged_uint8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e1d977dda1f0cad32eed1cacd6ca63f08de59ae7750d865e37d6006029a55ef
3
+ size 308971183
onnx/decoder_model_q4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90032be226a7fc38d7ad3f8684aeb1f795ae783c74c66bd6f741a508ee53bf22
3
+ size 369585523
onnx/decoder_model_q4f16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d581b2916b0f0542419784eb0511437ffdf6714be698d5103c59ee6fe0fea71
3
+ size 248271221
onnx/decoder_model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72849773fa27c3da8973b094031d6a7bea9b2fcb37b33d10f1153246ea999354
3
+ size 307996056
onnx/decoder_model_uint8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bada8bb81b17fddcc2045305a1617bc8def0b4c96e31e2ff864734b71528432
3
+ size 307996056
onnx/decoder_with_past_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8ae08cb5c3b9b98ce693af2a67e096c1d18cc11f624009cdca1ce9acc902746
3
+ size 1121871082
onnx/decoder_with_past_model_bnb4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d574229f8202da4213b3d99f1e66d593b2d0f1a4a7299cb702e191331834a4fa
3
+ size 339397351
onnx/decoder_with_past_model_fp16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d96dfed3a7764c2ae90b4146b164fd6355abe1bc00c4d12430785633946b643a
3
+ size 561149061
onnx/decoder_with_past_model_int8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f26a6134b6e9be9123449cf3c943653782567b5fa2b05eaee0882523057966d7
3
+ size 282453941
onnx/decoder_with_past_model_q4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb629fb8fba8fb11c0bbfcec9a054ea9325b992f7ed8ecf24445c5d75174dd35
3
+ size 353623635
onnx/decoder_with_past_model_q4f16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5584b42f402174448bac3c7e8ad4e177372c06b4b00015a42f812975f4ec328b
3
+ size 233940845
onnx/decoder_with_past_model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f26a6134b6e9be9123449cf3c943653782567b5fa2b05eaee0882523057966d7
3
+ size 282453941
onnx/decoder_with_past_model_uint8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de769a7084c15aa11de36970de35df16d8b230b0756c7fe98ec887d0fdb44f68
3
+ size 282453941
onnx/encoder_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c82bb984e5a681b7e590860369ab2cb5dd54f5aafc1f93d5fd4f57d9e102afcc
3
+ size 814958119
onnx/encoder_model_bnb4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b4e363c017e85d7bc1189b466bd8ba4872baafe30ee99403259640c991a886b
3
+ size 295924344
onnx/encoder_model_fp16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63fc68da642d6148f795be126e2b570f0a36c2d0e4c7b6cd79742522451d5df2
3
+ size 407598999
onnx/encoder_model_int8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b85ee01d8ed38954504b1ab1df2a2a0478b5aeaecbe83d49cb2a8592b8c45ec
3
+ size 205031294
onnx/encoder_model_q4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d575fc1c729c437016b87a59acb5c5008af121b572fe830bc7b04981d190338
3
+ size 305360940
onnx/encoder_model_q4f16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af2c05ffdeb8de10c329e46d6d989ed986671da1f3b7dcba0579231ba20725d4
3
+ size 190554523
onnx/encoder_model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b85ee01d8ed38954504b1ab1df2a2a0478b5aeaecbe83d49cb2a8592b8c45ec
3
+ size 205031294
onnx/encoder_model_uint8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b8b3b4ad590e11eccab55e16229e87b0586168553d42b562c1006612dc0167b
3
+ size 205031294
quantize_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "modes": [
3
+ "fp16",
4
+ "q8",
5
+ "int8",
6
+ "uint8",
7
+ "q4",
8
+ "q4f16",
9
+ "bnb4"
10
+ ],
11
+ "per_channel": true,
12
+ "reduce_range": true,
13
+ "block_size": null,
14
+ "is_symmetric": true,
15
+ "accuracy_level": null,
16
+ "quant_type": 1,
17
+ "op_block_list": null
18
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<hl>"
4
+ ],
5
+ "bos_token": {
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "cls_token": {
13
+ "content": "<s>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false
18
+ },
19
+ "eos_token": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "mask_token": {
27
+ "content": "<mask>",
28
+ "lstrip": true,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ },
33
+ "pad_token": {
34
+ "content": "<pad>",
35
+ "lstrip": false,
36
+ "normalized": false,
37
+ "rstrip": false,
38
+ "single_word": false
39
+ },
40
+ "sep_token": {
41
+ "content": "</s>",
42
+ "lstrip": false,
43
+ "normalized": false,
44
+ "rstrip": false,
45
+ "single_word": false
46
+ },
47
+ "unk_token": {
48
+ "content": "<unk>",
49
+ "lstrip": false,
50
+ "normalized": false,
51
+ "rstrip": false,
52
+ "single_word": false
53
+ }
54
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "50264": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "50265": {
45
+ "content": "<hl>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ }
52
+ },
53
+ "additional_special_tokens": [
54
+ "<hl>"
55
+ ],
56
+ "bos_token": "<s>",
57
+ "clean_up_tokenization_spaces": false,
58
+ "cls_token": "<s>",
59
+ "eos_token": "</s>",
60
+ "errors": "replace",
61
+ "extra_special_tokens": {},
62
+ "mask_token": "<mask>",
63
+ "model_max_length": 1024,
64
+ "pad_token": "<pad>",
65
+ "sep_token": "</s>",
66
+ "tokenizer_class": "BartTokenizer",
67
+ "trim_offsets": true,
68
+ "unk_token": "<unk>"
69
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff