model update
Browse files- README.md +12 -0
- analogy.bidirection.json +1 -1
- analogy.forward.json +1 -1
- analogy.reverse.json +1 -1
- config.json +1 -1
- tokenizer_config.json +1 -1
README.md
CHANGED
@@ -103,6 +103,17 @@ model-index:
|
|
103 |
- name: Accuracy
|
104 |
type: accuracy
|
105 |
value: 0.644808743169399
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
- task:
|
107 |
name: Lexical Relation Classification (BLESS)
|
108 |
type: classification
|
@@ -188,6 +199,7 @@ This model achieves the following results on the relation understanding tasks:
|
|
188 |
- Accuracy on Google: 0.952
|
189 |
- Accuracy on ConceptNet Analogy: 0.4748322147651007
|
190 |
- Accuracy on T-Rex Analogy: 0.644808743169399
|
|
|
191 |
- Lexical Relation Classification ([dataset](https://huggingface.co/datasets/relbert/lexical_relation_classification), [full result](https://huggingface.co/relbert/relbert-roberta-large-nce-d-semeval2012/raw/main/classification.json)):
|
192 |
- Micro F1 score on BLESS: 0.9199939731806539
|
193 |
- Micro F1 score on CogALexV: 0.8497652582159625
|
|
|
103 |
- name: Accuracy
|
104 |
type: accuracy
|
105 |
value: 0.644808743169399
|
106 |
+
- task:
|
107 |
+
name: Analogy Questions (NELL-ONE Analogy)
|
108 |
+
type: multiple-choice-qa
|
109 |
+
dataset:
|
110 |
+
name: NELL-ONE Analogy
|
111 |
+
args: relbert/analogy_questions
|
112 |
+
type: analogy-questions
|
113 |
+
metrics:
|
114 |
+
- name: Accuracy
|
115 |
+
type: accuracy
|
116 |
+
value: 0.6583333333333333
|
117 |
- task:
|
118 |
name: Lexical Relation Classification (BLESS)
|
119 |
type: classification
|
|
|
199 |
- Accuracy on Google: 0.952
|
200 |
- Accuracy on ConceptNet Analogy: 0.4748322147651007
|
201 |
- Accuracy on T-Rex Analogy: 0.644808743169399
|
202 |
+
- Accuracy on NELL-ONE Analogy: 0.6583333333333333
|
203 |
- Lexical Relation Classification ([dataset](https://huggingface.co/datasets/relbert/lexical_relation_classification), [full result](https://huggingface.co/relbert/relbert-roberta-large-nce-d-semeval2012/raw/main/classification.json)):
|
204 |
- Micro F1 score on BLESS: 0.9199939731806539
|
205 |
- Micro F1 score on CogALexV: 0.8497652582159625
|
analogy.bidirection.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"sat_full/test": 0.7272727272727273, "sat/test": 0.7299703264094956, "u2/test": 0.7149122807017544, "u4/test": 0.6875, "google/test": 0.962, "bats/test": 0.8354641467481935, "t_rex_relational_similarity/test": 0.644808743169399, "conceptnet_relational_similarity/test": 0.4672818791946309, "sat/validation": 0.7027027027027027, "u2/validation": 0.5833333333333334, "u4/validation": 0.5833333333333334, "google/validation": 1.0, "bats/validation": 0.8793969849246231, "semeval2012_relational_similarity/validation": 0.7341772151898734, "t_rex_relational_similarity/validation": 0.27419354838709675, "conceptnet_relational_similarity/validation": 0.38219424460431656}
|
|
|
1 |
+
{"sat_full/test": 0.7272727272727273, "sat/test": 0.7299703264094956, "u2/test": 0.7149122807017544, "u4/test": 0.6875, "google/test": 0.962, "bats/test": 0.8354641467481935, "t_rex_relational_similarity/test": 0.644808743169399, "conceptnet_relational_similarity/test": 0.4672818791946309, "sat/validation": 0.7027027027027027, "u2/validation": 0.5833333333333334, "u4/validation": 0.5833333333333334, "google/validation": 1.0, "bats/validation": 0.8793969849246231, "semeval2012_relational_similarity/validation": 0.7341772151898734, "t_rex_relational_similarity/validation": 0.27419354838709675, "conceptnet_relational_similarity/validation": 0.38219424460431656, "nell_relational_similarity/test": 0.6916666666666667, "nell_relational_similarity/validation": 0.6075}
|
analogy.forward.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"semeval2012_relational_similarity/validation": 0.7468354430379747, "sat_full/test": 0.732620320855615, "sat/test": 0.7359050445103857, "u2/test": 0.6754385964912281, "u4/test": 0.6296296296296297, "google/test": 0.952, "bats/test": 0.8093385214007782, "t_rex_relational_similarity/test": 0.644808743169399, "conceptnet_relational_similarity/test": 0.4748322147651007, "sat/validation": 0.7027027027027027, "u2/validation": 0.625, "u4/validation": 0.5625, "google/validation": 1.0, "bats/validation": 0.8542713567839196, "t_rex_relational_similarity/validation": 0.29435483870967744, "conceptnet_relational_similarity/validation": 0.37859712230215825}
|
|
|
1 |
+
{"semeval2012_relational_similarity/validation": 0.7468354430379747, "sat_full/test": 0.732620320855615, "sat/test": 0.7359050445103857, "u2/test": 0.6754385964912281, "u4/test": 0.6296296296296297, "google/test": 0.952, "bats/test": 0.8093385214007782, "t_rex_relational_similarity/test": 0.644808743169399, "conceptnet_relational_similarity/test": 0.4748322147651007, "sat/validation": 0.7027027027027027, "u2/validation": 0.625, "u4/validation": 0.5625, "google/validation": 1.0, "bats/validation": 0.8542713567839196, "t_rex_relational_similarity/validation": 0.29435483870967744, "conceptnet_relational_similarity/validation": 0.37859712230215825, "nell_relational_similarity/test": 0.6583333333333333, "nell_relational_similarity/validation": 0.61}
|
analogy.reverse.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"sat_full/test": 0.6524064171122995, "sat/test": 0.6468842729970327, "u2/test": 0.6885964912280702, "u4/test": 0.6597222222222222, "google/test": 0.944, "bats/test": 0.7976653696498055, "t_rex_relational_similarity/test": 0.5956284153005464, "conceptnet_relational_similarity/test": 0.40604026845637586, "sat/validation": 0.7027027027027027, "u2/validation": 0.7083333333333334, "u4/validation": 0.625, "google/validation": 0.98, "bats/validation": 0.8592964824120602, "semeval2012_relational_similarity/validation": 0.6708860759493671, "t_rex_relational_similarity/validation": 0.24596774193548387, "conceptnet_relational_similarity/validation": 0.3237410071942446}
|
|
|
1 |
+
{"sat_full/test": 0.6524064171122995, "sat/test": 0.6468842729970327, "u2/test": 0.6885964912280702, "u4/test": 0.6597222222222222, "google/test": 0.944, "bats/test": 0.7976653696498055, "t_rex_relational_similarity/test": 0.5956284153005464, "conceptnet_relational_similarity/test": 0.40604026845637586, "sat/validation": 0.7027027027027027, "u2/validation": 0.7083333333333334, "u4/validation": 0.625, "google/validation": 0.98, "bats/validation": 0.8592964824120602, "semeval2012_relational_similarity/validation": 0.6708860759493671, "t_rex_relational_similarity/validation": 0.24596774193548387, "conceptnet_relational_similarity/validation": 0.3237410071942446, "nell_relational_similarity/test": 0.7016666666666667, "nell_relational_similarity/validation": 0.5425}
|
config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "
|
3 |
"architectures": [
|
4 |
"RobertaModel"
|
5 |
],
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "roberta-large",
|
3 |
"architectures": [
|
4 |
"RobertaModel"
|
5 |
],
|
tokenizer_config.json
CHANGED
@@ -6,7 +6,7 @@
|
|
6 |
"errors": "replace",
|
7 |
"mask_token": "<mask>",
|
8 |
"model_max_length": 512,
|
9 |
-
"name_or_path": "
|
10 |
"pad_token": "<pad>",
|
11 |
"sep_token": "</s>",
|
12 |
"special_tokens_map_file": null,
|
|
|
6 |
"errors": "replace",
|
7 |
"mask_token": "<mask>",
|
8 |
"model_max_length": 512,
|
9 |
+
"name_or_path": "roberta-large",
|
10 |
"pad_token": "<pad>",
|
11 |
"sep_token": "</s>",
|
12 |
"special_tokens_map_file": null,
|