LilaBoualili commited on
Commit
cf3594d
1 Parent(s): e2832ca

initial commit

Browse files
added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"[e0]": 30522, "[\\e0]": 30523, "[e1]": 30524, "[\\e1]": 30525, "[e2]": 30526, "[\\e2]": 30527, "[e3]": 30528, "[\\e3]": 30529, "[e4]": 30530, "[\\e4]": 30531, "[e5]": 30532, "[\\e5]": 30533, "[e6]": 30534, "[\\e6]": 30535, "[e7]": 30536, "[\\e7]": 30537, "[e8]": 30538, "[\\e8]": 30539, "[e9]": 30540, "[\\e9]": 30541, "[e10]": 30542, "[\\e10]": 30543, "[e11]": 30544, "[\\e11]": 30545, "[e12]": 30546, "[\\e12]": 30547, "[e13]": 30548, "[\\e13]": 30549, "[e14]": 30550, "[\\e14]": 30551, "[e15]": 30552, "[\\e15]": 30553, "[e16]": 30554, "[\\e16]": 30555, "[e17]": 30556, "[\\e17]": 30557, "[e18]": 30558, "[\\e18]": 30559, "[e19]": 30560, "[\\e19]": 30561}
config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_num_labels": 2,
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "eos_token_ids": 0,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-12,
16
+ "max_position_embeddings": 512,
17
+ "model_type": "bert",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "output_past": true,
21
+ "pad_token_id": 0,
22
+ "type_vocab_size": 2,
23
+ "vocab_size": 30562
24
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c0619b67501e652fdb802cfd85834c6ab0d78d8c6543d0bd0a7329e7172f498
3
+ size 438109740
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8ba0d0c1c4cb7a531654aded8cdec465b531014fcd3919619ac11cf9513bfbc
3
+ size 438328680
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "max_len": 512}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff