id
stringlengths 2
115
| private
bool 1
class | tags
list | description
stringlengths 0
5.93k
⌀ | downloads
int64 0
1.14M
| likes
int64 0
1.79k
|
---|---|---|---|---|---|
usc-isi/WikiConvert | false | [
"task_categories:fill-mask",
"task_categories:other",
"task_categories:text-generation",
"task_ids:language-modeling",
"task_ids:masked-language-modeling",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:100K<n<1M",
"source_datasets:extended|wikipedia",
"language:en",
"license:mit",
"numeracy",
"natural-language-understanding",
"tokenization"
] | Language Modelling with Cardinal Number Annotations. | 0 | 1 |
uva-irlab/canard_quretec | false | [
"arxiv:2005.11723"
] | CANARD has been preprocessed by Voskarides et al. to train and evaluate their Query Resolution Term Classification
model (QuReTeC).
CANARD is a dataset for question-in-context rewriting that consists of questions each given in a dialog context
together with a context-independent rewriting of the question. The context of each question is the dialog utterences
that precede the question. CANARD can be used to evaluate question rewriting models that handle important linguistic
phenomena such as coreference and ellipsis resolution. | 4 | 0 |
uva-irlab/trec-cast-2019-multi-turn | false | [
"task_categories:text-retrieval",
"task_ids:document-retrieval",
"multilinguality:monolingual",
"size_categories:10M<n<100M",
"language:en"
] | The Conversational Assistance Track (CAsT) is a new track for TREC 2019 to facilitate Conversational Information
Seeking (CIS) research and to create a large-scale reusable test collection for conversational search systems.
The document corpus is 38,426,252 passages from the TREC Complex Answer Retrieval (CAR) and Microsoft MAchine
Reading COmprehension (MARCO) datasets. | 0 | 0 |
uyeongjae/load_klue_re_agmented | false | [] | null | 0 | 0 |
valurank/12-factor | false | [
"multilinguality:monolingual",
"language:en",
"license:other"
] | null | 0 | 0 |
valurank/PoliticalBias | false | [
"multilinguality:monolingual",
"language:en",
"license:other"
] | null | 0 | 0 |
valurank/PoliticalBias_AllSides_Txt | false | [
"multilinguality:monolingual",
"language:en",
"license:other"
] | null | 1 | 1 |
valurank/PoliticalBias_Sources | false | [
"multilinguality:monolingual",
"language:en",
"license:other"
] | null | 2 | 0 |
valurank/hate-multi | false | [
"task_categories:text-classification",
"multilinguality:monolingual",
"size_categories:10K<n<100K",
"source_datasets:derived",
"language:en",
"license:other"
] | null | 0 | 0 |
valurank/news-12factor | false | [
"task_categories:text-classification",
"task_ids:multi-class-classification",
"multilinguality:monolingual",
"language:en",
"license:other"
] | null | 0 | 0 |
valurank/offensive-multi | false | [
"task_categories:text-classification",
"multilinguality:monolingual",
"size_categories:10K<n<100K",
"source_datasets:derived",
"language:en",
"license:other"
] | null | 0 | 0 |
vanadhi/finlitqa | false | [] | null | 0 | 0 |
vannacute/AmazonReviewHelpfulness | false | [] | null | 0 | 0 |
vannynakamura/leish | false | [] | null | 0 | 0 |
vasilis/et_corpora_parliament_processed | false | [] | null | 0 | 0 |
vasudevgupta/amazon-ml-hack | false | [] | null | 0 | 0 |
vasudevgupta/bigbird-tokenized-natural-questions | false | [] | null | 0 | 0 |
vasudevgupta/data | false | [] | null | 2 | 0 |
vasudevgupta/fairseq-ljspeech | false | [] | null | 0 | 0 |
vasudevgupta/gsoc-librispeech | false | [] | null | 0 | 0 |
vasudevgupta/natural-questions-validation | false | [] | null | 0 | 0 |
vasudevgupta/prml_data_contest | false | [] | null | 0 | 0 |
vasudevgupta/temperature-distribution-2d-plate | false | [] | null | 0 | 0 |
vasudevgupta/temperature-distribution-3d-cylinder | false | [] | null | 0 | 0 |
iitm-ddp/iiith-indic-speech | false | [] | null | 0 | 0 |
vblagoje/lfqa | false | [] | null | 35 | 9 |
vblagoje/lfqa_support_docs | false | [] | null | 84 | 4 |
vblagoje/wikipedia_snippets_streamed | false | [] | The dataset was built from the Wikipedia dump (https://dumps.wikimedia.org/).
Each example contains the content of one full Wikipedia article with cleaning to strip
markdown and unwanted sections (references, etc.). | 365 | 0 |
vctc92/sdsd | false | [] | null | 0 | 0 |
vctc92/test | false | [] | null | 0 | 0 |
vera-pro/ShadowLink | false | [] | null | 0 | 3 |
versae/bibles | false | [
"language:sq",
"language:ar",
"language:az",
"language:be",
"language:bg",
"language:ceb",
"language:zh",
"language:cs",
"language:da",
"language:en",
"language:es",
"language:fi",
"language:fr",
"language:de",
"language:el",
"language:ht",
"language:he",
"language:hi",
"language:hu",
"language:it",
"language:ko",
"language:la",
"language:nl",
"language:no",
"language:pt",
"language:rm",
"language:ru",
"language:sw",
"language:ta",
"language:th",
"language:tr",
"language:vi"
] | Multilingual Bibles | 1 | 0 |
versae/modernisa | false | [] | Modernisa | 0 | 0 |
versae/norwegian-t5-dataset-debug | false | [] | null | 0 | 0 |
versae/norwegian-t5-dataset-debug2 | false | [] | null | 0 | 0 |
versae/norwegian-t5-dataset-debug3 | false | [] | null | 0 | 0 |
vershasaxena91/datasets | false | [] | null | 0 | 0 |
vershasaxena91/squad_multitask | false | [] | \Stanford Question Answering Dataset (SQuAD) is a reading comprehension \dataset, consisting of questions posed by crowdworkers on a set of Wikipedia \articles, where the answer to every question is a segment of text, or span, \from the corresponding reading passage, or the question might be unanswerable. | 0 | 0 |
vesteinn/icelandic-ner-MIM-GOLD-NER | false | [] | This Icelandic named entity (NE) corpus, MIM-GOLD-NER, is a version of the MIM-GOLD corpus tagged for NEs. Over 48 thousand NEs are tagged in this corpus of one million tokens, which can be used for training named entity recognizers for Icelandic.
The MIM-GOLD-NER corpus was developed at Reykjavik University in 2018–2020, funded by the Strategic Research and Development Programme for Language Technology (LT). Two LT students were in charge of the corpus annotation and of training named entity recognizers using machine learning methods.
A semi-automatic approach was used for annotating the corpus. Lists of Icelandic person names, location names, and company names were compiled and used for extracting and classifying as many named entities as possible. Regular expressions were then used to find certain numerical entities in the corpus. After this automatic pre-processing step, the whole corpus was reviewed manually to correct any errors. The corpus is tagged for eight named entity types:
PERSON – names of humans, animals and other beings, real or fictional.
LOCATION – names of locations, real or fictional, i.e. buildings, street and place names, both real and fictional. All geographical and geopolitical entities such as cities, countries, counties and regions, as well as planet names and other outer space entities.
ORGANIZATION – companies and other organizations, public or private, real or fictional. Schools, churches, swimming pools, community centers, musical groups, other affiliations.
MISCELLANEOUS – proper nouns that don’t belong to the previous three categories, such as products, books and movie titles, events, such as wars, sports tournaments, festivals, concerts, etc.
DATE – absolute temporal units of a full day or longer, such as days, months, years, centuries, both written numerically and alphabetically.
TIME – absolute temporal units shorter than a full day, such as seconds, minutes, or hours, both written numerically and alphabetically.
MONEY – exact monetary amounts in any currency, both written numerically and alphabetically.
PERCENT – percentages, both written numerically and alphabetically
MIM-GOLD-NER is intended for training of named entity recognizers for Icelandic. It is in the CoNLL format, and the position of each token within the NE is marked using the BIO tagging format. The corpus can be used in its entirety or by training on subsets of the text types that best fit the intended domain.
The Named Entity Corpus corpus is distributed with the same special user license as MIM-GOLD, which is based on the MIM license, since the texts in MIM-GOLD were sampled from the MIM corpus. | 0 | 0 |
vesteinn/icelandic-qa-NQiI | false | [
"task_categories:question-answering",
"task_ids:open-domain-qa",
"task_ids:extractive-qa",
"annotations_creators:curated",
"language_creators:curated",
"multilinguality:monolingual",
"source_datasets:original",
"language:is",
"license:cc-by-sa-4.0"
] | \ | 0 | 1 |
victor/autonlp-data-imdb-reviews-sentiment | false | [] | null | 2 | 0 |
vidhur2k/multilingual-hate-speech | false | [] | null | 0 | 0 |
vincentclaes/mit_indoor_scenes | false | [] | null | 0 | 0 |
vishnun/huggingpics-data | false | [] | null | 0 | 0 |
vivekverma239/question-generation | false | [] | null | 0 | 0 |
vkhangpham/github-issues | false | [] | null | 0 | 0 |
vocab-transformers/wiki-en-passages-20210101 | false | [] | null | 0 | 0 |
vs4vijay/VizDS | false | [] | null | 0 | 0 |
vumichien/common_voice_large | false | [] | null | 0 | 0 |
vumichien/common_voice_large_jsut_jsss_css10 | false | [
"task_categories:automatic-speech-recognition",
"language_creators:expert-generated",
"multilinguality:monolingual",
"language:ja",
"license:cc-by-nc-nd-4.0"
] | null | 0 | 0 |
vumichien/ja_opus100_processed | false | [] | null | 0 | 0 |
w-nicole/childes_data | false | [] | null | 0 | 0 |
w-nicole/childes_data_no_tags | false | [] | null | 0 | 0 |
w-nicole/childes_data_no_tags_ | false | [] | null | 0 | 0 |
w-nicole/childes_data_with_tags | false | [] | null | 0 | 0 |
w-nicole/childes_data_with_tags_ | false | [] | null | 0 | 0 |
w11wo/imdb-javanese | false | [
"task_categories:text-classification",
"task_ids:sentiment-classification",
"annotations_creators:found",
"language_creators:machine-generated",
"multilinguality:monolingual",
"size_categories:10K<n<100K",
"source_datasets:original",
"language:jv",
"license:odbl"
] | Large Movie Review Dataset translated to Javanese.
This is a dataset for binary sentiment classification containing substantially
more data than previous benchmark datasets. We provide a set of 25,000 highly
polar movie reviews for training, and 25,000 for testing. There is additional
unlabeled data for use as well. We translated the original IMDB Dataset to
Javanese using the multi-lingual MarianMT Transformer model from
`Helsinki-NLP/opus-mt-en-mul`. | 0 | 0 |
wanagenst/maslow-six-choices | false | [] | null | 0 | 0 |
wanagenst/maslow-stories | false | [] | null | 0 | 0 |
wanagenst/plutchik-nine-choices | false | [] | null | 1 | 1 |
wanagenst/plutchik-stories | false | [] | null | 12 | 1 |
wanagenst/reiss-stories | false | [] | null | 0 | 0 |
wanagenst/reiss-twenty-choices | false | [] | null | 0 | 0 |
wardenga/lsoie | false | [
"task_categories:text-retrieval",
"annotations_creators:machine-generated",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:unknown",
"source_datasets:extended|qa_srl",
"language:en",
"license:mit",
"Open Information Extraction",
"arxiv:2101.11177"
] | The Large Scale Open Information Extraction Dataset (LSOIE), is a dataset 20
times larger than the next largest human-annotated Open Information Extraction
(OIE) dataset. LSOIE is a built upon the QA-SRL 2.0 dataset. | 0 | 0 |
warwickai/financial_phrasebank_mirror | false | [] | null | 0 | 0 |
webek18735/ddvoacantonesed | false | [] | null | 0 | 0 |
webek18735/dhikhscook | false | [] | null | 0 | 0 |
webimmunization/COVID-19-vaccine-attitude-tweets | false | [
"task_categories:text-classification",
"task_ids:sentiment-classification",
"task_ids:intent-classification",
"annotations_creators:crowdsourced",
"language_creators:other",
"multilinguality:monolingual",
"size_categories:54KB",
"source_datasets:original",
"language:en",
"license:cc-by-4.0"
] | null | 1 | 1 |
webis/args_me | false | [
"task_categories:text-retrieval",
"task_ids:document-retrieval",
"annotations_creators:machine-generated",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:100K<n<1M",
"source_datasets:original",
"language:en",
"license:cc-by-4.0"
] | The args.me corpus (version 1.0, cleaned) comprises 382 545 arguments crawled from four debate portals in the middle of 2019. The debate portals are Debatewise, IDebate.org, Debatepedia, and Debate.org. The arguments are extracted using heuristics that are designed for each debate portal. | 17 | 1 |
webis/conclugen | false | [] | The ConcluGen corpus is constructed for the task of argument summarization. It consists of 136,996 pairs of argumentative texts and their conclusions collected from the ChangeMyView subreddit, a web portal for argumentative discussions on controversial topics.
The corpus has three variants: aspects, topics, and targets. Each variation encodes the corresponding information via control codes. These provide additional argumentative knowledge for generating more informative conclusions. | 0 | 1 |
webis/ms-marco-anchor-text | false | [] | null | 0 | 2 |
weijieliu/senteval_cn | false | [] | null | 0 | 0 |
wesamhaddad14/testdata | false | [] | null | 0 | 0 |
wicho/stylekqc-style | false | [
"license:cc-by-sa-4.0"
] | null | 2 | 2 |
wietsedv/stsbenchmark | false | [
"license:cc-by-sa-4.0"
] | STS Benchmark comprises a selection of the English datasets used in the STS tasks organized in the context of SemEval between 2012 and 2017. The selection of datasets include text from image captions, news headlines and user forums. | 97 | 0 |
wikilee/ADFA_Mapping | false | [] | null | 0 | 0 |
wikimedia/wikipedia | false | [] | null | 2 | 0 |
wikimedia/wikisource | false | [] | null | 0 | 0 |
winvoker/turkish-sentiment-analysis-dataset | false | [
"task_categories:text-classification",
"task_ids:sentiment-classification",
"annotations_creators:crowdsourced",
"annotations_creators:expert-generated",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:unknown",
"language:tr",
"license:cc-by-sa-4.0"
] | null | 106 | 13 |
wisdomify/story | false | [] | This dataset is designed to provide forward and reverse dictionary of Korean proverbs. | 0 | 0 |
wmt/europarl | false | [] | null | 0 | 0 |
wmt/news-commentary | false | [] | null | 1 | 0 |
wmt/uncorpus | false | [] | null | 0 | 0 |
wmt/wikititles | false | [] | null | 0 | 0 |
wmt/wmt10 | false | [] | null | 0 | 0 |
wmt/wmt13 | false | [] | null | 0 | 0 |
wmt/wmt14 | false | [] | null | 0 | 0 |
wmt/wmt15 | false | [] | null | 0 | 0 |
wmt/wmt16 | false | [] | null | 0 | 0 |
wmt/wmt17 | false | [] | null | 1 | 0 |
wmt/wmt18 | false | [] | null | 0 | 0 |
wmt/wmt19 | false | [] | null | 0 | 0 |
wpicard/nostradamus-propheties | false | [
"task_ids:language-modeling",
"annotations_creators:no-annotation",
"multilinguality:monolingual",
"size_categories:unknown",
"language:en",
"license:unknown"
] | null | 0 | 0 |
wzkariampuzha/EpiClassifySet | false | [] | null | 0 | 0 |
wzkariampuzha/EpiExtract4GARD | false | [] | null | 0 | 0 |
wzywzy/telegram_summary | false | [] | null | 0 | 0 |
botisan-ai/cantonese-mandarin-translations | false | [
"task_categories:text2text-generation",
"task_categories:translation",
"annotations_creators:machine-generated",
"language_creators:found",
"multilinguality:translation",
"size_categories:unknown",
"source_datasets:original",
"language:zh",
"license:mit",
"conditional-text-generation"
] | null | 13 | 4 |
xiaj/ds_test | false | [] | null | 0 | 0 |
xiaj/test0919 | false | [] | null | 0 | 0 |
xiaobendanyn/demo | false | [] | null | 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.