id
stringlengths 6
6
| query_type
stringclasses 14
values | question
dict | paraphrased_question
sequence | query
dict | template_id
stringclasses 8
values | query_shape
stringclasses 7
values | query_class
stringclasses 5
values | auto_generated
bool 2
classes | number_of_patterns
int32 1
14
|
---|---|---|---|---|---|---|---|---|---|
AQ1346 | non-factoid | {
"string": "What is the top benchmark result (metric and value) over the dataset Kuzushiji-MNIST?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Kuzushiji-MNIST\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl"
} | T04 | Tree | WHICH-WHAT | true | 13 |
AQ1899 | Factoid | {
"string": "Provide a list of benchmarked datasets related to the Atari Games research area?"
} | [] | {
"sparql": "SELECT DISTINCT ?dataset ?dataset_lbl\nWHERE {\n ?problem a orkgc:Problem;\n rdfs:label ?problem_lbl. \n FILTER (str(?problem_lbl) = \"Atari Games\")\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:P32 ?problem.\n}"
} | T06 | Tree | WHICH-WHAT | true | 5 |
AQ1890 | Factoid | {
"string": "Provide a list of benchmarked datasets related to the Music Modeling research area?"
} | [] | {
"sparql": "SELECT DISTINCT ?dataset ?dataset_lbl\nWHERE {\n ?problem a orkgc:Problem;\n rdfs:label ?problem_lbl. \n FILTER (str(?problem_lbl) = \"Music Modeling\")\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:P32 ?problem.\n}"
} | T06 | Tree | WHICH-WHAT | true | 5 |
AQ1646 | Factoid | {
"string": "Which model has achieved the highest DISTANCE_TO_GOAL score on the Habitat 2020 Object Nav test-std benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"DISTANCE_TO_GOAL\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Habitat 2020 Object Nav test-std\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ0886 | Factoid | {
"string": "Can you list the metrics used to evaluate models on the Cart Pole (OpenAI Gym) dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Cart Pole (OpenAI Gym)\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ1339 | non-factoid | {
"string": "What is the top benchmark score and its metric on the SciCite dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"SciCite\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl"
} | T04 | Tree | WHICH-WHAT | true | 13 |
AQ2195 | Factoid | {
"string": "Where can I find code references in papers that have used the SRU++ model for benchmarking purposes?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"SRU++\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ0207 | Factoid | {
"string": "Could you provide a list of models that have been tested on the Gibson PointGoal Navigation benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Gibson PointGoal Navigation\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ2259 | Factoid | {
"string": "Where can I find code references in papers that have used the Bi+ model for benchmarking purposes?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Bi+\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ0038 | Factoid | {
"string": "What models are being evaluated on the Car speed in Liuliqiao District, Beijing dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Car speed in Liuliqiao District, Beijing\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
HQ0035 | Factoid | {
"string": "Which scenario factsheets from the Open Energy Platform are used in studies with public funding?"
} | [
" Which Open Energy Platform factsheets are used in the publicly funded studies?",
" What is the list of those Open Energy Platform scenario factsheets which are exploited in the studies with public funding ?"
] | {
"sparql": "SELECT ?paper\nWHERE {\n orkgr:R113171 orkgp:compareContribution ?cont.\n ?paper orkgp:P31 ?cont.\n ?cont orkgp:P37586 ?hasFacts.\n ?hasFacts orkgp:P37675 ?study.\n ?study orkgp:P37663 ?sourceOfFunding.\n FILTER(REGEX(?sourceOfFunding, \"public\"))\n}"
} | null | tree | WHICH-WHAT | false | 5 |
AQ1612 | Factoid | {
"string": "What is the name of the top performing model in terms of Accuracy (%) score when benchmarked on the Oxford 102 Flowers dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Accuracy (%)\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Oxford 102 Flowers\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ0139 | Factoid | {
"string": "Can you list the models that have been evaluated on the Supervised: dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Supervised:\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ1122 | non-factoid | {
"string": "What is the top benchmark result (metric and value) over the dataset Fashion-MNIST?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Fashion-MNIST\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl"
} | T04 | Tree | WHICH-WHAT | true | 13 |
AQ1511 | Factoid | {
"string": "Indicate the model that performed best in terms of F1 metric on the CommitmentBank benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"F1\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"CommitmentBank\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ2370 | Factoid | {
"string": "List the code links in papers that use the LeViT-192 model in any benchmark?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"LeViT-192\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ0180 | Factoid | {
"string": "Could you provide a list of models that have been tested on the Yelp Binary classification benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Yelp Binary classification\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ1056 | non-factoid | {
"string": "What is the highest benchmark result achieved on the SciGEN dataset, including the metric and its value?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"SciGEN\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl"
} | T04 | Tree | WHICH-WHAT | true | 13 |
AQ1055 | non-factoid | {
"string": "What is the top benchmark score and its metric on the SciTLDR dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"SciTLDR\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl"
} | T04 | Tree | WHICH-WHAT | true | 13 |
AQ2134 | Factoid | {
"string": "List the code links in papers that use the All-attention network (36 layers) model in any benchmark?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"All-attention network (36 layers)\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ0046 | Factoid | {
"string": "Can you list the models that have been evaluated on the FB15k dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"FB15k\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ0137 | Factoid | {
"string": "Can you list the models that have been evaluated on the ARC (Easy) dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ARC (Easy)\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ1393 | Factoid | {
"string": "Which model has achieved the highest NER Micro F1 score on the ACE 2005 benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"NER Micro F1\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ACE 2005\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ0596 | Factoid | {
"string": "Provide a list of research paper titles and IDs that have benchmarked models on the Atari 2600 Kung-Fu Master dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Kung-Fu Master\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}"
} | T02 | Tree | WHICH-WHAT | true | 5 |
AQ1943 | Factoid | {
"string": "Can you provide links to code used in papers that benchmark the S-NLI model?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"S-NLI\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ0650 | Factoid | {
"string": "Provide a list of research paper titles and IDs that have benchmarked models on the Reuters En-De dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Reuters En-De\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}"
} | T02 | Tree | WHICH-WHAT | true | 5 |
AQ0354 | Factoid | {
"string": "Provide a list of research paper titles and IDs that have benchmarked models on the ORKG-TDM dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ORKG-TDM\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}"
} | T02 | Tree | WHICH-WHAT | true | 5 |
AQ1828 | Factoid | {
"string": "What is the name of the top performing model in terms of Micro Precision score when benchmarked on the NLP-TDMS (Exp, arXiv only) dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Micro Precision\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"NLP-TDMS (Exp, arXiv only)\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ0366 | Factoid | {
"string": "List the title and ID of research papers that contain a benchmark over the SciCite dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"SciCite\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}"
} | T02 | Tree | WHICH-WHAT | true | 5 |
AQ2458 | Factoid | {
"string": "Can you provide links to code used in papers that benchmark the Relationship Types model?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Relationship Types\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ1183 | non-factoid | {
"string": "What is the top benchmark result (metric and value) over the dataset The Pile?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"The Pile\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl"
} | T04 | Tree | WHICH-WHAT | true | 13 |
AQ1462 | Factoid | {
"string": "What is the name of the top performing model in terms of F1a score when benchmarked on the MultiRC dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"F1a\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"MultiRC\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ1432 | Factoid | {
"string": "Indicate the model that performed best in terms of BLEU metric on the WMT2016 English-German benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"BLEU\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"WMT2016 English-German\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ0763 | Factoid | {
"string": "List the metrics that are used to evaluate models on the WMT2016 English-Czech benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"WMT2016 English-Czech\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ0482 | Factoid | {
"string": "Provide a list of research paper titles and IDs that have benchmarked models on the Supervised: dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Supervised:\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}"
} | T02 | Tree | WHICH-WHAT | true | 5 |
AQ0400 | Factoid | {
"string": "Provide a list of research paper titles and IDs that have benchmarked models on the CoNLL04 dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"CoNLL04\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}"
} | T02 | Tree | WHICH-WHAT | true | 5 |
AQ1801 | Factoid | {
"string": "What is the best performing model benchmarking the Yelp-14 dataset in terms of Accuracy metric?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Accuracy\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Yelp-14\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ0728 | Factoid | {
"string": "What are the metrics of evaluation over the MNIST dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"MNIST\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ1126 | non-factoid | {
"string": "What is the top benchmark result (metric and value) over the dataset Multimodal PISA?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Multimodal PISA\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl"
} | T04 | Tree | WHICH-WHAT | true | 13 |
AQ2027 | Factoid | {
"string": "Can you provide links to code used in papers that benchmark the CMLM+LAT+4 iterations model?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"CMLM+LAT+4 iterations\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ1061 | non-factoid | {
"string": "Can you provide the highest benchmark result, including the metric and score, for the Annotated development corpus dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Annotated development corpus\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl"
} | T04 | Tree | WHICH-WHAT | true | 13 |
AQ0539 | Factoid | {
"string": "List the title and ID of research papers that contain a benchmark over the Reacher, easy (DMControl500k) dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Reacher, easy (DMControl500k)\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}"
} | T02 | Tree | WHICH-WHAT | true | 5 |
AQ0920 | Factoid | {
"string": "List the metrics that are used to evaluate models on the Atari 2600 Seaquest benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Seaquest\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ1089 | non-factoid | {
"string": "What is the top benchmark result (metric and value) over the dataset GAD?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"GAD\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl"
} | T04 | Tree | WHICH-WHAT | true | 13 |
AQ1243 | non-factoid | {
"string": "What is the top benchmark score and its metric on the CIFAR-10 Image Classification dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"CIFAR-10 Image Classification\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl"
} | T04 | Tree | WHICH-WHAT | true | 13 |
AQ0088 | Factoid | {
"string": "Could you provide a list of models that have been tested on the WMT2016 Romanian-English benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"WMT2016 Romanian-English\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ0192 | Factoid | {
"string": "Can you list the models that have been evaluated on the Ball in cup, catch (DMControl500k) dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Ball in cup, catch (DMControl500k)\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ0427 | Factoid | {
"string": "Provide a list of research paper titles and IDs that have benchmarked models on the IWSLT2015 German-English dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"IWSLT2015 German-English\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}"
} | T02 | Tree | WHICH-WHAT | true | 5 |
AQ0959 | Factoid | {
"string": "What evaluation metrics are commonly used when benchmarking models on the Atari 2600 Tennis dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Tennis\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ0247 | Factoid | {
"string": "What are the models that have been benchmarked on the Atari 2600 Zaxxon dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Zaxxon\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ0362 | Factoid | {
"string": "List the title and ID of research papers that contain a benchmark over the SemEval-2021 Task 11 dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"SemEval-2021 Task 11\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}"
} | T02 | Tree | WHICH-WHAT | true | 5 |
AQ0895 | Factoid | {
"string": "Can you list the metrics used to evaluate models on the ClueWeb09-B dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ClueWeb09-B\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ2384 | Factoid | {
"string": "Where can I find code references in papers that have used the DeiT-B model for benchmarking purposes?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"DeiT-B\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ1010 | Factoid | {
"string": "Can you list the metrics used to evaluate models on the Birdsnap dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Birdsnap\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ1383 | Factoid | {
"string": "What is the name of the top performing model in terms of F1 entity level score when benchmarked on the JNLPBA dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"F1 entity level\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"JNLPBA\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ2417 | Factoid | {
"string": "Provide a list of papers that have utilized the CvT-13-NAS model and include the links to their code?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"CvT-13-NAS\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ1757 | Factoid | {
"string": "Which model has achieved the highest Score score on the Atari 2600 Gravitar benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Score\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Gravitar\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ1401 | Factoid | {
"string": "What is the best performing model benchmarking the SemEval-2010 Task 8 dataset in terms of F1 metric?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"F1\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"SemEval-2010 Task 8\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ0528 | Factoid | {
"string": "Give me a list of research papers along with their titles and IDs, that have performed benchmarks on the CoNLL++ dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"CoNLL++\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}"
} | T02 | Tree | WHICH-WHAT | true | 5 |
AQ2431 | Factoid | {
"string": "Where can I find code references in papers that have used the DY-MobileNetV2 ×0.35 model for benchmarking purposes?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"DY-MobileNetV2 ×0.35\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ1940 | Factoid | {
"string": "List the code links in papers that use the CitClus model in any benchmark?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"CitClus\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ0549 | Factoid | {
"string": "List the title and ID of research papers that contain a benchmark over the Habitat 2020 Object Nav test-std dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Habitat 2020 Object Nav test-std\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}"
} | T02 | Tree | WHICH-WHAT | true | 5 |
AQ0937 | Factoid | {
"string": "What evaluation metrics are commonly used when benchmarking models on the Atari 2600 Amidar dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Amidar\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
HQ0054 | Non-factoid/Ranking | {
"string": "What is the minimum and maximum installed capacity for each energy source considered?"
} | [
"What are extreme values of installed capacity grouped by energy source?"
] | {
"sparql": "SELECT ?energy_sources_labels (MIN(?installed_cap_value) AS ?min_installed_cap_value) (MAX(?installed_cap_value) AS ?max_installed_cap_value)\nWHERE {\n orkgr:R153801 orkgp:compareContribution ?contrib.\n ?contrib orkgp:P43135 ?energy_sources.\n ?energy_sources rdfs:label ?energy_sources_labels;\n orkgp:P43133 ?installed_capacity.\n ?installed_capacity orkgp:HAS_VALUE ?value.\n BIND(xsd:float(?value) AS ?installed_cap_value)\n}"
} | null | tree | WHICH-WHAT | false | 5 |
AQ2463 | Factoid | {
"string": "Can you list benchmarked problems in the area of Computer Sciences?"
} | [] | {
"sparql": "SELECT DISTINCT ?problem ?problem_lbl\nWHERE {\n ?rf a orkgc:ResearchField;\n rdfs:label ?rf_label.\n FILTER (str(?rf_label) = \"Computer Sciences\")\n ?paper orkgp:P30 ?rf;\n orkgp:P31 ?cont.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:P32 ?problem.\n ?problem rdfs:label ?problem_lbl.\n}"
} | T08 | Tree | WHICH-WHAT | true | 5 |
AQ0065 | Factoid | {
"string": "Could you provide a list of models that have been tested on the ADE Corpus benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ADE Corpus\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ1586 | Factoid | {
"string": "Indicate the model that performed best in terms of F1 metric on the Paper Field benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"F1\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Paper Field\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ1906 | Factoid | {
"string": "Provide a list of papers that have utilized the RNN model and include the links to their code?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"RNN\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ0655 | Factoid | {
"string": "List the title and ID of research papers that contain a benchmark over the ScienceCite dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ScienceCite\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}"
} | T02 | Tree | WHICH-WHAT | true | 5 |
AQ0430 | Factoid | {
"string": "Provide a list of research paper titles and IDs that have benchmarked models on the WMT2016 Czech-English dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"WMT2016 Czech-English\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}"
} | T02 | Tree | WHICH-WHAT | true | 5 |
AQ0751 | Factoid | {
"string": "What are the metrics of evaluation over the ADE Corpus dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ADE Corpus\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ0049 | Factoid | {
"string": "Can you list the models that have been evaluated on the Penn Treebank dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Penn Treebank\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ1667 | Factoid | {
"string": "What is the name of the top performing model in terms of Accuracy (%) score when benchmarked on the DTD dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Accuracy (%)\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"DTD\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ2212 | Factoid | {
"string": "List the code links in papers that use the All-attention network - 36 layers model in any benchmark?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"All-attention network - 36 layers\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ2159 | Factoid | {
"string": "Can you provide links to code used in papers that benchmark the Trellis Network model?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Trellis Network\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ1427 | Factoid | {
"string": "Which model has achieved the highest BLEU score on the WMT2016 German-English benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"BLEU\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"WMT2016 German-English\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ2161 | Factoid | {
"string": "Provide a list of papers that have utilized the Transformer-XL model and include the links to their code?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Transformer-XL\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ0800 | Factoid | {
"string": "What evaluation metrics are commonly used when benchmarking models on the BoolQ dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"BoolQ\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ0765 | Factoid | {
"string": "What are the metrics of evaluation over the WMT2016 English-Romanian dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"WMT2016 English-Romanian\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ1254 | non-factoid | {
"string": "What is the top benchmark result (metric and value) over the dataset MLDoc Zero-Shot English-to-French?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"MLDoc Zero-Shot English-to-French\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl"
} | T04 | Tree | WHICH-WHAT | true | 13 |
AQ1724 | Factoid | {
"string": "Which model has achieved the highest Score score on the Atari 2600 Frostbite benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Score\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Frostbite\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ0884 | Factoid | {
"string": "List the metrics that are used to evaluate models on the Cartpole, swingup (DMControl500k) benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Cartpole, swingup (DMControl500k)\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ0221 | Factoid | {
"string": "Can you list the models that have been evaluated on the MLDoc Zero-Shot German-to-French dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"MLDoc Zero-Shot German-to-French\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ1261 | non-factoid | {
"string": "Can you provide the highest benchmark result, including the metric and score, for the CL-SciSumm dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"CL-SciSumm\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl"
} | T04 | Tree | WHICH-WHAT | true | 13 |
AQ2186 | Factoid | {
"string": "Where can I find code references in papers that have used the AWD-LSTM-MoS + ATOI model for benchmarking purposes?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"AWD-LSTM-MoS + ATOI\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ1594 | Factoid | {
"string": "What is the best performing model benchmarking the Open Entity dataset in terms of F1 metric?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"F1\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Open Entity\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ1670 | Factoid | {
"string": "What is the best performing model benchmarking the Reuters RCV1/RCV2 English-to-German dataset in terms of Accuracy metric?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Accuracy\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Reuters RCV1/RCV2 English-to-German\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ0631 | Factoid | {
"string": "Provide a list of research paper titles and IDs that have benchmarked models on the Atari 2600 Star Gunner dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Star Gunner\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}"
} | T02 | Tree | WHICH-WHAT | true | 5 |
AQ0754 | Factoid | {
"string": "What evaluation metrics are commonly used when benchmarking models on the DDI dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"DDI\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ0749 | Factoid | {
"string": "What are the metrics of evaluation over the NYT-single dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"NYT-single\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ1080 | non-factoid | {
"string": "What is the top benchmark score and its metric on the WebNLG dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"WebNLG\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl"
} | T04 | Tree | WHICH-WHAT | true | 13 |
HQ0056 | Non-factoid/Count | {
"string": "What is the average installed capacity for each energy source considered in 5 year intervals?"
} | [
"What are the mean values of installed capacity for 5 years intervals grouped by energy source ?"
] | {
"sparql": "SELECT ?rangeId ?energy_sources_labels (AVG(?installed_cap_value AS ?avg_installed_cap_value))\nWHERE {\n orkgr:R153801 orkgp:compareContribution ?contrib.\n ?paper orkgp:P31 ?contrib;\n orkgp:P29 ?year.\n BIND(xsd:int(?year) AS ?y).\n VALUES(?rangeId ?min ?max) {\n (\"2001-2005\" 2001 2005)\n (\"2006-2010\" 2006 2010)\n (\"2011-2015\" 2011 2015)\n (\"2016-2020\" 2016 2020)\n }\n FILTER(?min <= ?y && ?y <= ?max).\n ?contrib orkgp:P43135 ?energy_sources.\n ?energy_sources rdfs:label ?energy_sources_labels;\n orkgp:P43133 ?installed_capacity.\n ?installed_capacity orkgp:HAS_VALUE ?value.\n BIND(xsd:float(?value) AS ?installed_cap_value).\n}\nORDER BY ASC(?rangeId)"
} | null | tree | WHICH-WHAT | false | 7 |
AQ0901 | Factoid | {
"string": "What evaluation metrics are commonly used when benchmarking models on the DTD dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"DTD\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ1946 | Factoid | {
"string": "List the code links in papers that use the end-to-end relation extraction model model in any benchmark?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"end-to-end relation extraction model\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ0787 | Factoid | {
"string": "What evaluation metrics are commonly used when benchmarking models on the HMDB51 dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"HMDB51\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ1825 | Factoid | {
"string": "What is the best performing model benchmarking the NLP-TDMS (Exp, arXiv only) dataset in terms of Micro F1 metric?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Micro F1\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"NLP-TDMS (Exp, arXiv only)\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ1915 | Factoid | {
"string": "Where can I find code references in papers that have used the HNEABP (BWEL) model for benchmarking purposes?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"HNEABP (BWEL)\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ1266 | non-factoid | {
"string": "What is the top benchmark result (metric and value) over the dataset Atari 2600 Crazy Climber?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Crazy Climber\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl"
} | T04 | Tree | WHICH-WHAT | true | 13 |
AQ1572 | Factoid | {
"string": "What is the name of the top performing model in terms of F1 score when benchmarked on the BC5CDR dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"F1\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"BC5CDR\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
HQ0042 | Non-factoid | {
"string": "Who is the author of the most recent paper about insects?"
} | [
"By whom the lattest study about insects was undertaken?"
] | {
"sparql": "SELECT ?author_name\nWHERE {\n {\n SELECT ?publication_date_ AS ?date_of_the_latest_paper {\n ?paper_ a orkgc:Paper;\n rdfs:label ?title_;\n orkgp:P28 ?publication_month_;\n orkgp:P29 ?publication_year_.\n OPTIONAL {\n ?publication_month_ rdfs:label ?publication_month_label_\n }\n OPTIONAL {\n ?publication_year_ rdfs:label ?publication_year_label_\n }\n BIND(\n xsd:integer(\n IF(\n BOUND(?publication_month_label_),\n ?publication_month_label_,\n ?publication_month_\n )\n ) AS ?publication_month_as_number_\n )\n BIND(\n xsd:integer(\n IF(\n BOUND(?publication_year_label_),\n ?publication_year_label_,\n ?publication_year_\n )\n ) AS ?publication_year_as_number_\n )\n BIND(\n xsd:dateTime(\n CONCAT(\n ?publication_year_as_number_,\n \"-\",\n ?publication_month_as_number_,\n \"-01T00:00:00.000-00:00\"\n )\n )\n AS ?publication_date_\n )\n FILTER(\n ?publication_month_as_number_ > 0 && ?publication_month_as_number_ < 13 && ?publication_year_as_number_ > 0 && ?publication_year_as_number_ < 2023\n )\n FILTER(REGEX(STR(?title_), \"insect\"))\n }\n ORDER BY DESC(?publication_date_)\n LIMIT 1\n }\n ?paper a orkgc:Paper;\n rdfs:label ?title;\n orkgp:P27 ?author;\n orkgp:P28 ?publication_month;\n orkgp:P29 ?publication_year.\n OPTIONAL {\n ?publication_month rdfs:label ?publication_month_label\n }\n OPTIONAL {\n ?publication_year rdfs:label ?publication_year_label\n }\n OPTIONAL {\n ?author rdfs:label ?author_label\n }\n BIND(\n IF(\n BOUND(?author_label),\n ?author_label,\n ?author\n ) AS ?author_name\n )\n BIND(\n xsd:integer(\n IF(\n BOUND(?publication_month_label),\n ?publication_month_label,\n ?publication_month\n )\n ) AS ?publication_month_as_number\n )\n BIND(\n xsd:integer(\n IF(\n BOUND(?publication_year_label),\n ?publication_year_label,\n ?publication_year\n )\n ) AS ?publication_year_as_number\n )\n BIND(\n xsd:dateTime(\n CONCAT(\n ?publication_year_as_number,\n \"-\",\n ?publication_month_as_number,\n \"-01T00:00:00.000-00:00\"\n )\n )\n AS ?publication_date\n )\n FILTER(\n ?publication_month_as_number > 0 && ?publication_month_as_number < 13 && ?publication_year_as_number > 0 && ?publication_year_as_number < 2023\n )\n FILTER(\n REGEX(\n STR(?title),\n \"insect\"\n ) && ?publication_date = ?date_of_the_latest_paper\n )\n}"
} | null | forest | WHO-WHAT | false | 14 |
Subsets and Splits