chgrdj commited on
Commit
c924629
·
verified ·
1 Parent(s): a9b456a

Upload 5 files

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. config.json +10 -9
  3. model.safetensors +2 -2
  4. tokenizer.json +0 -0
  5. tokenizer_config.json +6 -9
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "_name_or_path": "distilbert/distilroberta-base",
3
  "architectures": [
4
- "RobertaForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "bos_token_id": 0,
@@ -9,25 +9,26 @@
9
  "eos_token_id": 2,
10
  "hidden_act": "gelu",
11
  "hidden_dropout_prob": 0.1,
12
- "hidden_size": 768,
13
  "id2label": {
14
  "0": "LABEL_0"
15
  },
16
  "initializer_range": 0.02,
17
- "intermediate_size": 3072,
18
  "label2id": {
19
  "LABEL_0": 0
20
  },
21
  "layer_norm_eps": 1e-05,
22
  "max_position_embeddings": 514,
23
- "model_type": "roberta",
24
- "num_attention_heads": 12,
25
- "num_hidden_layers": 6,
 
26
  "pad_token_id": 1,
27
  "position_embedding_type": "absolute",
28
  "torch_dtype": "float32",
29
- "transformers_version": "4.46.2",
30
  "type_vocab_size": 1,
31
  "use_cache": true,
32
- "vocab_size": 50265
33
  }
 
1
  {
2
+ "_name_or_path": "FacebookAI/xlm-roberta-large",
3
  "architectures": [
4
+ "XLMRobertaForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "bos_token_id": 0,
 
9
  "eos_token_id": 2,
10
  "hidden_act": "gelu",
11
  "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
  "id2label": {
14
  "0": "LABEL_0"
15
  },
16
  "initializer_range": 0.02,
17
+ "intermediate_size": 4096,
18
  "label2id": {
19
  "LABEL_0": 0
20
  },
21
  "layer_norm_eps": 1e-05,
22
  "max_position_embeddings": 514,
23
+ "model_type": "xlm-roberta",
24
+ "num_attention_heads": 16,
25
+ "num_hidden_layers": 24,
26
+ "output_past": true,
27
  "pad_token_id": 1,
28
  "position_embedding_type": "absolute",
29
  "torch_dtype": "float32",
30
+ "transformers_version": "4.46.0",
31
  "type_vocab_size": 1,
32
  "use_cache": true,
33
+ "vocab_size": 250002
34
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e5f96bb69c02c71b5bcc45d66cd52025c3de632380279cb8904ea5beb9deed97
3
- size 328489204
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00dbd848edda76ffd5000637ecfbea667633673e39b744cd150b3c79989138ad
3
+ size 2239614572
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,10 +1,9 @@
1
  {
2
- "add_prefix_space": false,
3
  "added_tokens_decoder": {
4
  "0": {
5
  "content": "<s>",
6
  "lstrip": false,
7
- "normalized": true,
8
  "rstrip": false,
9
  "single_word": false,
10
  "special": true
@@ -12,7 +11,7 @@
12
  "1": {
13
  "content": "<pad>",
14
  "lstrip": false,
15
- "normalized": true,
16
  "rstrip": false,
17
  "single_word": false,
18
  "special": true
@@ -20,7 +19,7 @@
20
  "2": {
21
  "content": "</s>",
22
  "lstrip": false,
23
- "normalized": true,
24
  "rstrip": false,
25
  "single_word": false,
26
  "special": true
@@ -28,12 +27,12 @@
28
  "3": {
29
  "content": "<unk>",
30
  "lstrip": false,
31
- "normalized": true,
32
  "rstrip": false,
33
  "single_word": false,
34
  "special": true
35
  },
36
- "50264": {
37
  "content": "<mask>",
38
  "lstrip": true,
39
  "normalized": false,
@@ -46,12 +45,10 @@
46
  "clean_up_tokenization_spaces": false,
47
  "cls_token": "<s>",
48
  "eos_token": "</s>",
49
- "errors": "replace",
50
  "mask_token": "<mask>",
51
  "model_max_length": 512,
52
  "pad_token": "<pad>",
53
  "sep_token": "</s>",
54
- "tokenizer_class": "RobertaTokenizer",
55
- "trim_offsets": true,
56
  "unk_token": "<unk>"
57
  }
 
1
  {
 
2
  "added_tokens_decoder": {
3
  "0": {
4
  "content": "<s>",
5
  "lstrip": false,
6
+ "normalized": false,
7
  "rstrip": false,
8
  "single_word": false,
9
  "special": true
 
11
  "1": {
12
  "content": "<pad>",
13
  "lstrip": false,
14
+ "normalized": false,
15
  "rstrip": false,
16
  "single_word": false,
17
  "special": true
 
19
  "2": {
20
  "content": "</s>",
21
  "lstrip": false,
22
+ "normalized": false,
23
  "rstrip": false,
24
  "single_word": false,
25
  "special": true
 
27
  "3": {
28
  "content": "<unk>",
29
  "lstrip": false,
30
+ "normalized": false,
31
  "rstrip": false,
32
  "single_word": false,
33
  "special": true
34
  },
35
+ "250001": {
36
  "content": "<mask>",
37
  "lstrip": true,
38
  "normalized": false,
 
45
  "clean_up_tokenization_spaces": false,
46
  "cls_token": "<s>",
47
  "eos_token": "</s>",
 
48
  "mask_token": "<mask>",
49
  "model_max_length": 512,
50
  "pad_token": "<pad>",
51
  "sep_token": "</s>",
52
+ "tokenizer_class": "XLMRobertaTokenizer",
 
53
  "unk_token": "<unk>"
54
  }