Datasets:
Tasks:
Question Answering
Sub-tasks:
extractive-qa
Languages:
English
Size:
10K<n<100K
ArXiv:
Tags:
multi-hop
License:
Commit
•
d54a5ff
1
Parent(s):
08050e6
Delete legacy JSON metadata
Browse filesDelete legacy `dataset_infos.json`.
- dataset_infos.json +0 -1
dataset_infos.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"original": {"description": "WikiHop is open-domain and based on Wikipedia articles; the goal is to recover Wikidata information by hopping through documents. The goal is to answer text understanding queries by combining multiple facts that are spread across different documents.\n", "citation": "@misc{welbl2018constructing,\n title={Constructing Datasets for Multi-hop Reading Comprehension Across Documents}, \n author={Johannes Welbl and Pontus Stenetorp and Sebastian Riedel},\n year={2018},\n eprint={1710.06481},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "http://qangaroo.cs.ucl.ac.uk/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "query": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "candidates": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "supports": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "annotations": {"feature": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "wiki_hop", "config_name": "original", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 325952974, "num_examples": 43738, "dataset_name": "wiki_hop"}, "validation": {"name": "validation", "num_bytes": 41246536, "num_examples": 5129, "dataset_name": "wiki_hop"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1ytVZ4AhubFDOEL7o7XrIRIyhU8g9wvKA": {"num_bytes": 339843061, "checksum": "2f512869760cdad76a022a1465f025b486ae79dc5b8f0bf3ad901a4caf2d3050"}}, "download_size": 339843061, "post_processing_size": null, "dataset_size": 367199510, "size_in_bytes": 707042571}, "masked": {"description": "WikiHop is open-domain and based on Wikipedia articles; the goal is to recover Wikidata information by hopping through documents. The goal is to answer text understanding queries by combining multiple facts that are spread across different documents.\n", "citation": "@misc{welbl2018constructing,\n title={Constructing Datasets for Multi-hop Reading Comprehension Across Documents}, \n author={Johannes Welbl and Pontus Stenetorp and Sebastian Riedel},\n year={2018},\n eprint={1710.06481},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "http://qangaroo.cs.ucl.ac.uk/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "candidates": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "supports": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "annotations": {"feature": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "wiki_hop", "config_name": "masked", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 348249138, "num_examples": 43738, "dataset_name": "wiki_hop"}, "validation": {"name": "validation", "num_bytes": 44066862, "num_examples": 5129, "dataset_name": "wiki_hop"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1ytVZ4AhubFDOEL7o7XrIRIyhU8g9wvKA": {"num_bytes": 339843061, "checksum": "2f512869760cdad76a022a1465f025b486ae79dc5b8f0bf3ad901a4caf2d3050"}}, "download_size": 339843061, "post_processing_size": null, "dataset_size": 392316000, "size_in_bytes": 732159061}}
|
|
|
|