Nehc commited on
Commit
4738fc9
·
1 Parent(s): 511650d
Files changed (5) hide show
  1. config.json +8 -7
  2. pytorch_model.bin +2 -2
  3. tokenizer.json +0 -0
  4. tokenizer_config.json +1 -1
  5. vocab.txt +0 -0
config.json CHANGED
@@ -1,14 +1,15 @@
1
  {
2
- "_name_or_path": "bert-base-multilingual-uncased",
3
  "architectures": [
4
  "BertForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "classifier_dropout": null,
8
  "directionality": "bidi",
 
9
  "hidden_act": "gelu",
10
  "hidden_dropout_prob": 0.1,
11
- "hidden_size": 768,
12
  "id2label": {
13
  "0": "LABEL_0",
14
  "1": "LABEL_1",
@@ -39,7 +40,7 @@
39
  "26": "LABEL_26"
40
  },
41
  "initializer_range": 0.02,
42
- "intermediate_size": 3072,
43
  "label2id": {
44
  "LABEL_0": 0,
45
  "LABEL_1": 1,
@@ -72,8 +73,8 @@
72
  "layer_norm_eps": 1e-12,
73
  "max_position_embeddings": 512,
74
  "model_type": "bert",
75
- "num_attention_heads": 12,
76
- "num_hidden_layers": 12,
77
  "pad_token_id": 0,
78
  "pooler_fc_size": 768,
79
  "pooler_num_attention_heads": 12,
@@ -83,8 +84,8 @@
83
  "position_embedding_type": "absolute",
84
  "problem_type": "single_label_classification",
85
  "torch_dtype": "float32",
86
- "transformers_version": "4.16.2",
87
  "type_vocab_size": 2,
88
  "use_cache": true,
89
- "vocab_size": 105879
90
  }
 
1
  {
2
+ "_name_or_path": "sberbank-ai/sbert_large_nlu_ru",
3
  "architectures": [
4
  "BertForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "classifier_dropout": null,
8
  "directionality": "bidi",
9
+ "gradient_checkpointing": false,
10
  "hidden_act": "gelu",
11
  "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
  "id2label": {
14
  "0": "LABEL_0",
15
  "1": "LABEL_1",
 
40
  "26": "LABEL_26"
41
  },
42
  "initializer_range": 0.02,
43
+ "intermediate_size": 4096,
44
  "label2id": {
45
  "LABEL_0": 0,
46
  "LABEL_1": 1,
 
73
  "layer_norm_eps": 1e-12,
74
  "max_position_embeddings": 512,
75
  "model_type": "bert",
76
+ "num_attention_heads": 16,
77
+ "num_hidden_layers": 24,
78
  "pad_token_id": 0,
79
  "pooler_fc_size": 768,
80
  "pooler_num_attention_heads": 12,
 
84
  "position_embedding_type": "absolute",
85
  "problem_type": "single_label_classification",
86
  "torch_dtype": "float32",
87
+ "transformers_version": "4.19.2",
88
  "type_vocab_size": 2,
89
  "use_cache": true,
90
+ "vocab_size": 120138
91
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b8303f111984af1847fbfdbe5e392de62a7bc9c1892600c4339e79f5d8a9b279
3
- size 669592813
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44e214d9a8cf71a9aa70908de8bc8f17ff73c96279785dd5db042c9c34e645a1
3
+ size 1707881325
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"do_lower_case": true, "do_basic_tokenize": true, "never_split": null, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "tokenizer_file": "E:\\HF_HOME\\transformers\\857db185d48b92f3e6141ef5092d8d5dbebab7eef1bacc6c9eaf85cf23807641.73ad1f9fd9f94089672128003fb4a687b64b73b2bfb8d08766bbc71feec8cd96", "name_or_path": "bert-base-multilingual-uncased", "tokenizer_class": "BertTokenizer"}
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "special_tokens_map_file": null, "name_or_path": "sberbank-ai/sbert_large_nlu_ru", "do_basic_tokenize": true, "never_split": null, "tokenizer_class": "BertTokenizer"}
vocab.txt CHANGED
The diff for this file is too large to render. See raw diff