ali2066 commited on
Commit
4089be7
1 Parent(s): 02701e6

End of training

Browse files
config.json CHANGED
@@ -1,35 +1,29 @@
1
  {
2
- "_name_or_path": "distilbert-base-uncased-finetuned-sst-2-english",
3
- "activation": "gelu",
4
  "architectures": [
5
- "DistilBertForSequenceClassification"
6
  ],
7
- "attention_dropout": 0.1,
8
- "dim": 768,
9
- "dropout": 0.1,
10
- "finetuning_task": "sst-2",
11
- "hidden_dim": 3072,
12
- "id2label": {
13
- "0": "NEGATIVE",
14
- "1": "POSITIVE"
15
- },
16
  "initializer_range": 0.02,
17
- "label2id": {
18
- "NEGATIVE": 0,
19
- "POSITIVE": 1
20
- },
21
- "max_position_embeddings": 512,
22
- "model_type": "distilbert",
23
- "n_heads": 12,
24
- "n_layers": 6,
25
- "output_past": true,
26
- "pad_token_id": 0,
27
  "problem_type": "single_label_classification",
28
- "qa_dropout": 0.1,
29
- "seq_classif_dropout": 0.2,
30
- "sinusoidal_pos_embds": false,
31
- "tie_weights_": true,
32
  "torch_dtype": "float32",
33
  "transformers_version": "4.15.0",
34
- "vocab_size": 30522
 
 
35
  }
 
1
  {
2
+ "_name_or_path": "cardiffnlp/twitter-roberta-base",
 
3
  "architectures": [
4
+ "RobertaForSequenceClassification"
5
  ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
 
14
  "initializer_range": 0.02,
15
+ "intermediate_size": 3072,
16
+ "layer_norm_eps": 1e-05,
17
+ "max_position_embeddings": 514,
18
+ "model_type": "roberta",
19
+ "num_attention_heads": 12,
20
+ "num_hidden_layers": 12,
21
+ "pad_token_id": 1,
22
+ "position_embedding_type": "absolute",
 
 
23
  "problem_type": "single_label_classification",
 
 
 
 
24
  "torch_dtype": "float32",
25
  "transformers_version": "4.15.0",
26
+ "type_vocab_size": 1,
27
+ "use_cache": true,
28
+ "vocab_size": 50265
29
  }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5a21a497ec4095b2d17702cc59b474ac9937b91cfd42dc1eaf7f520f95682281
3
- size 267860081
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02d4e429e1b2c0d3e8530560724e9442d30d2b87848e8ab5f4ed859204d57ce2
3
+ size 498674093
runs/May02_15-19-35_bb8-lix.polytechnique.fr/events.out.tfevents.1651498074.bb8-lix.polytechnique.fr CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:295080be48c5f38a71bb19967c1570adcae3ff4afc2ca955a96fb53a948bbeb1
3
- size 1456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28decbbb764443855d9eda893090e6d1a47536a64b5d76dd2d9b129521ca5579
3
+ size 1928
runs/May02_15-43-55_bb8-lix.polytechnique.fr/1651499040.2684743/events.out.tfevents.1651499040.bb8-lix.polytechnique.fr ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2b749d5512d0fa0fc3a37a8f568619872d95c9939c3bce460161e3c153e9c5b
3
+ size 4717
runs/May02_15-43-55_bb8-lix.polytechnique.fr/events.out.tfevents.1651499040.bb8-lix.polytechnique.fr ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2e52103e14907f55fce8d091bbcfaa3e2e824a54294b8f9df6635385e19ea32
3
+ size 6858
runs/May02_15-43-55_bb8-lix.polytechnique.fr/events.out.tfevents.1651499975.bb8-lix.polytechnique.fr ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6350fa6087018454c8ffd8515ef3ea02f5d52510f23473417b99cfa51332381
3
+ size 1456
special_tokens_map.json CHANGED
@@ -1 +1 @@
1
- {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
 
1
+ {"bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "distilbert-base-uncased-finetuned-sst-2-english", "do_basic_tokenize": true, "never_split": null, "tokenizer_class": "DistilBertTokenizer"}
 
1
+ {"unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "errors": "replace", "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "trim_offsets": true, "special_tokens_map_file": null, "name_or_path": "cardiffnlp/twitter-roberta-base", "tokenizer_class": "RobertaTokenizer"}
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a2c649b77dbecbf3d2e1ee52d3c9cc87a41f30dfb66d65deb69c38799db6aee
3
  size 3055
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ece8b61e5df53551a8baba59e78c418ae7e222a2a5093bccad49db5ddaa5533a
3
  size 3055
vocab.json ADDED
The diff for this file is too large to render. See raw diff