id
stringlengths 2
115
| private
bool 1
class | tags
list | description
stringlengths 0
5.93k
⌀ | downloads
int64 0
1.14M
| likes
int64 0
1.79k
|
---|---|---|---|---|---|
strombergnlp/bornholmsk_parallel | false | [
"task_categories:translation",
"annotations_creators:expert-generated",
"language_creators:found",
"multilinguality:translation",
"size_categories:1K<n<10K",
"source_datasets:original",
"language:da",
"language:da-bornholm",
"license:cc-by-4.0"
]
| This dataset is parallel text for Bornholmsk and Danish.
For more details, see the paper [Bornholmsk Natural Language Processing: Resources and Tools](https://aclanthology.org/W19-6138/). | 0 | 2 |
mteb/reddit-clustering-p2p | false | [
"language:en"
]
| null | 98 | 0 |
lmqg/qg_subjqa | false | [
"task_categories:text-generation",
"task_ids:language-modeling",
"multilinguality:monolingual",
"size_categories:10K<n<100K",
"source_datasets:subjqa",
"language:en",
"license:cc-by-4.0",
"question-generation",
"arxiv:2210.03992"
]
| [SubjQA](https://github.com/megagonlabs/SubjQA) dataset for question generation (QG) task. | 421 | 0 |
Maddy132/customwb | false | []
| null | 0 | 0 |
mox/german_politicians_twitter_sentiment | false | []
| null | 1 | 3 |
HFFErica/triadiclabelled_dataset | false | []
| null | 0 | 0 |
LIUM/tedlium | false | [
"task_categories:automatic-speech-recognition",
"annotations_creators:expert-generated",
"language_creators:expert-generated",
"multilinguality:monolingual",
"size_categories:10K<n<100K",
"source_datasets:original",
"language:en"
]
| null | 241 | 9 |
MLRS/korpus_malti | false | [
"task_categories:text-generation",
"task_categories:fill-mask",
"task_ids:language-modeling",
"task_ids:masked-language-modeling",
"annotations_creators:no-annotation",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:10M<n<100M",
"source_datasets:original",
"language:mt",
"license:cc-by-nc-sa-4.0"
]
| General Corpora for the Maltese language. | 24 | 0 |
elmurod1202/uzbek-sentiment-analysis | false | []
| null | 3 | 1 |
zArabi/ArmanNERPublic | false | []
| null | 0 | 0 |
DDSC/dagw_reddit_filtered_v1.0.0 | false | [
"task_categories:text-generation",
"task_ids:language-modeling",
"annotations_creators:no-annotation",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:unknown",
"source_datasets:DDSC/partial-danish-gigaword-no-twitter",
"source_datasets:DDSC/reddit-da",
"language:da",
"license:cc-by-4.0",
"arxiv:2005.03521",
"arxiv:2112.11446"
]
| null | 0 | 1 |
selfishark/hf-issues-dataset-with-comments | false | []
| null | 0 | 0 |
Team-PIXEL/rendered-bookcorpus | false | [
"annotations_creators:no-annotation",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:1M<n<10M",
"source_datasets:rendered|BookCorpusOpen",
"language:en",
"license:unknown",
"arxiv:1506.06724",
"arxiv:2207.06991",
"arxiv:2105.05241"
]
| null | 1 | 3 |
Team-PIXEL/rendered-wikipedia-english | false | [
"annotations_creators:no-annotation",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:10M<n<100M",
"source_datasets:original",
"language:en",
"license:cc-by-sa-3.0",
"license:gfdl",
"arxiv:2207.06991"
]
| null | 2 | 2 |
Malaina/en-wiki-sentences-index | false | []
| null | 0 | 0 |
yjernite/DataMeasurementsClusterCache | false | [
"license:apache-2.0"
]
| null | 0 | 0 |
strombergnlp/named_timexes | false | [
"task_categories:token-classification",
"annotations_creators:expert-generated",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:100K<n<1M",
"source_datasets:original",
"language:en",
"license:cc-by-4.0"
]
| This is a dataset annotated for _named temporal expression_ chunks.
The
commonest temporal expressions typically
contain date and time words, like April or
hours. Research into recognising and interpreting these typical expressions is mature in many languages. However, there is
a class of expressions that are less typical,
very varied, and difficult to automatically
interpret. These indicate dates and times,
but are harder to detect because they often do not contain time words and are not
used frequently enough to appear in conventional temporally-annotated corpora –
for example *Michaelmas* or *Vasant Panchami*.
For more details see [https://aclanthology.org/R13-1015.pdf](https://aclanthology.org/R13-1015.pdf) | 2 | 1 |
strombergnlp/itu_faroese_danish | false | [
"task_categories:translation",
"annotations_creators:expert-generated",
"language_creators:found",
"multilinguality:multilingual",
"size_categories:1K<n<10K",
"source_datasets:original",
"language:da",
"language:fo",
"license:cc-by-4.0",
"arxiv:2206.08727",
"doi:10.57967/hf/0515"
]
| \ | 0 | 3 |
jontooy/Flickr8k-Image-Features | false | [
"language:ar"
]
| null | 2 | 0 |
najoungkim/edge_probing_dep_ewt_line_by_line | false | []
| null | 0 | 0 |
rajeshvarma/sla | false | []
| null | 0 | 0 |
ontophagic/experiment | false | []
| null | 0 | 0 |
myradeng/cs230-news | false | []
| null | 0 | 0 |
domenicrosati/TruthfulQA | false | [
"task_categories:question-answering",
"task_ids:extractive-qa",
"task_ids:open-domain-qa",
"task_ids:closed-domain-qa",
"annotations_creators:expert-generated",
"language_creators:expert-generated",
"multilinguality:monolingual",
"size_categories:n<1K",
"source_datasets:original",
"language:en",
"license:apache-2.0",
"arxiv:2109.07958"
]
| null | 2 | 2 |
nateraw/hf-hub-walkthrough-assets | false | [
"license:mit"
]
| null | 2 | 0 |
vijaygoriya/SentimentDetection | false | []
| null | 0 | 0 |
cradle-bio/TAPE_stability_clusters | false | []
| null | 0 | 0 |
danieleV9H/en_corpora_parliament_processed | false | []
| null | 0 | 0 |
ncats/EpiSet4NER-v2 | false | [
"task_categories:token-classification",
"task_ids:named-entity-recognition",
"annotations_creators:machine-generated",
"annotations_creators:expert-generated",
"language_creators:found",
"language_creators:expert-generated",
"multilinguality:monolingual",
"size_categories:100K<n<1M",
"source_datasets:original",
"language:en",
"license:other",
"epidemiology",
"rare disease",
"named entity recognition",
"NER",
"NIH"
]
| **REWRITE*
EpiSet4NER-2 is a dataset generated from 620 rare disease abstracts labeled using statistical and rule-base methods.
For more details see *INSERT PAPER* and https://github.com/ncats/epi4GARD/tree/master/EpiExtract4GARD#epiextract4gard | 0 | 0 |
reallycarlaost/emobank-single-binary | false | []
| null | 0 | 0 |
strombergnlp/rumoureval_2019 | false | [
"task_categories:text-classification",
"task_ids:fact-checking",
"annotations_creators:crowdsourced",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:10K<n<100K",
"language:en",
"license:cc-by-4.0",
"stance-detection",
"arxiv:1809.06683"
]
|
Stance prediction task in English. The goal is to predict whether a given reply to a claim either supports, denies, questions, or simply comments on the claim. Ran as a SemEval task in 2019. | 41 | 2 |
HugoLaurencon/libri_light_bytes | false | []
| Libri-light is a large dataset of 60K hours of unlabelled speech from audiobooks in English.
It is a benchmark for the training of automatic speech recognition (ASR) systems with limited or no supervision. | 0 | 0 |
met/customAmhTig | false | []
| null | 0 | 0 |
nickmuchi/rugd-dataset-all | false | []
| null | 7 | 0 |
beery/Dutch-SQuAD | false | []
| null | 0 | 1 |
piotr-rybak/poleval-passage-retrieval | false | []
| null | 0 | 1 |
cradle-bio/tape-fluorescence | false | []
| null | 0 | 1 |
cradle-bio/tape-fluorescence-processed | false | []
| null | 0 | 1 |
Roh/ryanspeech | false | [
"task_categories:automatic-speech-recognition",
"task_categories:audio-classification",
"annotations_creators:expert-generated",
"language_creators:expert-generated",
"multilinguality:monolingual",
"source_datasets:original",
"language:en",
"license:cc-by-nc-4.0",
"arxiv:2106.08468"
]
| RyanSpeech is a new speech corpus for research on automated text-to-speech (TTS) systems.
Publicly available TTS corpora are often noisy, recorded with multiple speakers, or do not have quality male speech data.
In order to meet the need for a high-quality, publicly available male speech corpus within the field of speech recognition, we designed and created RyanSpeech.
We have derived RyanSpeech’s textual materials from real-world conversational settings, and these materials contain over 10 hours of a professional male voice actor’s speech recorded at 44.1 kHz.
Both the design and pipeline of this corpus creation make RyanSpeech ideal for developing TTS systems in real-world applications.
To provide a baseline for future research, protocols, and benchmarks, we trained 4 state-of-the-art speech models and a vocoder on RyanSpeech.
The results show 3.36 in mean opinion scores (MOS) in our best model. We have made the trained models publicly available for download from. | 1 | 3 |
thomagram/StyleNeRF_Datasets | false | [
"license:cc-by-4.0"
]
| null | 2 | 0 |
HuggingFaceM4/webvid | false | []
| WebVid is a large-scale dataset of video clips with textual descriptions sourced from the web. The videos are diverse and rich in their content. | 0 | 3 |
colabtrial17/common_voice_ar_v9 | false | []
| null | 0 | 0 |
HuggingFaceM4/something_something_v2 | false | [
"task_categories:other",
"annotations_creators:crowdsourced",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:100K<n<1M",
"source_datasets:original",
"language:en",
"license:other",
"arxiv:1706.04261"
]
| The Something-Something dataset (version 2) is a collection of 220,847 labeled video clips of humans performing pre-defined, basic actions with everyday objects. It is designed to train machine learning models in fine-grained understanding of human hand gestures like putting something into something, turning something upside down and covering something with something. | 12 | 0 |
SetFit/toxic_conversations_50k | false | []
| null | 28 | 0 |
pensieves/newsgroups | false | [
"license:mit"
]
| null | 3 | 0 |
Leyo/ActivityNet_Captions | false | [
"task_ids:closed-domain-qa",
"annotations_creators:expert-generated",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:10k<n<100K",
"source_datasets:original",
"language:en",
"license:other",
"arxiv:1705.00754"
]
| The ActivityNet Captions dataset connects videos to a series of temporally annotated sentence descriptions.
Each sentence covers an unique segment of the video, describing multiple events that occur. These events
may occur over very long or short periods of time and are not limited in any capacity, allowing them to
co-occur. On average, each of the 20k videos contains 3.65 temporally localized sentences, resulting in
a total of 100k sentences. We find that the number of sentences per video follows a relatively normal
distribution. Furthermore, as the video duration increases, the number of sentences also increases.
Each sentence has an average length of 13.48 words, which is also normally distributed. You can find more
details of the dataset under the ActivityNet Captions Dataset section, and under supplementary materials
in the paper. | 3 | 0 |
gaganpathre/amgerindaf | false | [
"license:mit"
]
| null | 2 | 0 |
forcorpus/WikiCybersecurity | false | [
"license:cc-by-4.0"
]
| null | 0 | 0 |
Jyotika02/lala | false | []
| null | 0 | 0 |
tomekkorbak/pile-pii | false | []
| null | 0 | 0 |
Evelyn18/becas | false | []
| automatic translation of the Stanford Question Answering Dataset (SQuAD) v2 into Spanish | 0 | 0 |
HuggingFaceM4/vatex | false | []
| VATEX is a large-scale multilingual video description dataset, which contains over 41,250 videos and 825,000 captions
in both English and Chinese. VATEX is characterized by the following major unique properties.
First, it contains both English and Chinese descriptions at scale, which can support many multilingual studies
that are constrained by monolingual datasets. Secondly, VATEX has a high number of clip-sentence pairs
with each video clip annotated with multiple unique sentences, and every caption is unique in
the whole corpus. Third, VATEX contains more comprehensive yet representative video content,
covering 600 human activities in total. Furthermore, both the English and Chinese corpora in
VATEX are lexically richer and thus allow more natural and diverse caption generation. | 5 | 0 |
gagan3012/test | false | []
| null | 0 | 0 |
hxue3/autotrain-data-code_summarization | false | [
"language:en"
]
| null | 6 | 0 |
gagan3012/test1 | false | []
| null | 0 | 0 |
Barik/testt | false | []
| null | 0 | 0 |
itsroadtrip/test-dataset | false | [
"license:zlib"
]
| null | 0 | 0 |
mrojas/wl-disease | false | []
| null | 0 | 0 |
myradeng/cs230-news-unfiltered | false | []
| null | 0 | 0 |
morteza/cogtext | false | [
"task_categories:text-classification",
"task_ids:topic-classification",
"task_ids:semantic-similarity-classification",
"language_creators:found",
"language_creators:expert-generated",
"multilinguality:monolingual",
"size_categories:100K<n<1M",
"source_datasets:original",
"language:en",
"license:cc-by-4.0",
"Cognitive Control",
"PubMed",
"arxiv:2203.11016",
"doi:10.57967/hf/0548"
]
| CogText dataset contains a collection of PubMed abstracts, along with their GPT-3 embeddings and topic embeddings. | 0 | 0 |
PoolC/news-corpus-divided | false | []
| null | 0 | 0 |
reallycarlaost/emo-valence-5 | false | []
| null | 0 | 0 |
hidude562/XS-word-rating | false | []
| null | 0 | 0 |
reallycarlaost/emobank | false | []
| null | 0 | 0 |
pinecone/reddit-qa | false | []
| null | 2 | 1 |
nestoralvaro/data_prep_2021_12_26___t1_7.csv | false | []
| null | 0 | 0 |
ahmedlone123/sv_corpora_parliament_processed | false | []
| null | 0 | 0 |
iohadrubin/vqvae | false | []
| null | 0 | 0 |
Chr0my/freesound.org | false | [
"size_categories:100K<n<1M",
"language:en",
"music"
]
| null | 1 | 8 |
nouamanetazi/test111 | false | []
| MASSIVE is a parallel dataset of > 1M utterances across 51 languages with annotations
for the Natural Language Understanding tasks of intent prediction and slot annotation.
Utterances span 60 intents and include 55 slot types. MASSIVE was created by localizing
the SLURP dataset, composed of general Intelligent Voice Assistant single-shot interactions. | 0 | 0 |
mteb/amazon_massive_scenario | false | []
| MASSIVE is a parallel dataset of > 1M utterances across 51 languages with annotations
for the Natural Language Understanding tasks of intent prediction and slot annotation.
Utterances span 60 intents and include 55 slot types. MASSIVE was created by localizing
the SLURP dataset, composed of general Intelligent Voice Assistant single-shot interactions. | 971 | 0 |
mteb/amazon_massive_intent | false | [
"language:af",
"language:am",
"language:ar",
"language:az",
"language:bn",
"language:cy",
"language:da",
"language:de",
"language:el",
"language:en",
"language:es",
"language:fa",
"language:fr",
"language:he",
"language:hi",
"language:hu",
"language:hy",
"language:id",
"language:is",
"language:it",
"language:ja",
"language:jv",
"language:ka",
"language:km",
"language:kn",
"language:ko",
"language:lv",
"language:ml",
"language:mn",
"language:ms",
"language:my",
"language:nb",
"language:nl",
"language:pl",
"language:pt",
"language:ro",
"language:ru",
"language:sl",
"language:sq",
"language:sv",
"language:sw",
"language:ta",
"language:te",
"language:th",
"language:tl",
"language:tr",
"language:ur",
"language:vi",
"language:zh"
]
| MASSIVE is a parallel dataset of > 1M utterances across 51 languages with annotations
for the Natural Language Understanding tasks of intent prediction and slot annotation.
Utterances span 60 intents and include 55 slot types. MASSIVE was created by localizing
the SLURP dataset, composed of general Intelligent Voice Assistant single-shot interactions. | 920 | 5 |
fuliucansheng/kdd2022 | false | []
| KDD2022
Task1: Query Product Ranking
Task2: Multiclass Product Classification
Task3: Product Substitute Identification | 0 | 0 |
Moo/korean-parallel-corpora | false | [
"task_categories:translation",
"annotations_creators:other",
"language_creators:other",
"multilinguality:multilingual",
"multilinguality:translation",
"size_categories:10K<n<100K",
"source_datasets:original",
"language:ko",
"language:en",
"license:cc-by-sa-3.0"
]
| null | 109 | 1 |
jk-gjom/covid19weibo | false | [
"license:afl-3.0"
]
| null | 0 | 0 |
filwsyl/test | false | []
| null | 0 | 0 |
Sultannn/id_recipe | false | [
"task_categories:text2text-generation",
"task_categories:text-generation",
"task_ids:language-modeling",
"annotations_creators:no-annotation",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:10K<n<100K",
"source_datasets:original",
"language:id",
"license:mit"
]
| null | 0 | 0 |
Kastuks/embeddings_bh | false | []
| null | 0 | 0 |
ntt123/vi-text | false | [
"license:cc-by-nc-4.0"
]
| null | 0 | 0 |
mwritescode/slither-audited-smart-contracts | false | [
"task_categories:text-classification",
"task_categories:text-generation",
"task_ids:multi-label-classification",
"task_ids:multi-input-text-classification",
"task_ids:language-modeling",
"annotations_creators:other",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:100K<n<1M",
"source_datasets:original",
"language:en",
"license:mit"
]
| This dataset contains source code and deployed bytecode for Solidity Smart Contracts that have been verified on Etherscan.io, along with a classification of their vulnerabilities according to the Slither static analysis framework. | 935 | 2 |
wdc/products-2017 | false | [
"task_categories:text-classification",
"annotations_creators:weak supervision",
"annotations_creators:expert-generated",
"multilinguality:monolingual",
"size_categories:1K<n<10K",
"size_categories:10K<n<100K",
"source_datasets:original",
"language:en",
"license:unknown"
]
| Many e-shops have started to mark-up product data within their HTML pages using the schema.org vocabulary. The Web Data Commons project regularly extracts such data from the Common Crawl, a large public web crawl. The Web Data Commons Training and Test Sets for Large-Scale Product Matching contain product offers from different e-shops in the form of binary product pairs (with corresponding label "match" or "no match")
In order to support the evaluation of machine learning-based matching methods, the data is split into training, validation and test set. We provide training and validation sets in four different sizes for four product categories. The labels of the test sets were manually checked while those of the training sets were derived using shared product identifiers from the Web via weak supervision.
The data stems from the WDC Product Data Corpus for Large-Scale Product Matching - Version 2.0 which consists of 26 million product offers originating from 79 thousand websites. | 50 | 0 |
Abdelrahman-Rezk/Arabic_Dialect_Identification | false | [
"arxiv:2005.06557"
]
| null | 0 | 0 |
sharfo2/Wearable_Activity | false | []
| null | 0 | 0 |
augustoortiz/Test | false | [
"license:afl-3.0"
]
| null | 0 | 0 |
M-CLIP/ImageCaptions-7M-Translations | false | []
| null | 1 | 0 |
zchengc/wsb | false | []
| null | 0 | 0 |
araanbranco/cryptoweebs | false | []
| null | 0 | 0 |
mdroth/transformers_issues_labels | false | []
| null | 0 | 0 |
J3romee/CLEAR | false | [
"arxiv:2106.06147"
]
| null | 0 | 0 |
allenai/wmt22_african | false | []
| null | 409 | 3 |
erickdp/csvData | false | []
| null | 0 | 0 |
zchengc/wsb_full | false | []
| null | 0 | 0 |
STAM/agricore | false | [
"license:mit"
]
| null | 2 | 0 |
HuggingFaceM4/ActivitiyNet_Captions | false | [
"task_ids:closed-domain-qa",
"annotations_creators:expert-generated",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:10k<n<100K",
"source_datasets:original",
"language:en",
"license:other",
"arxiv:1705.00754"
]
| null | 5 | 0 |
HuggingFaceM4/TGIF | false | [
"task_categories:question-answering",
"task_categories:visual-question-answering",
"task_ids:closed-domain-qa",
"annotations_creators:expert-generated",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:100K<n<1M",
"source_datasets:original",
"language:en",
"license:other",
"arxiv:1604.02748"
]
| The Tumblr GIF (TGIF) dataset contains 100K animated GIFs and 120K sentences describing visual content of the animated GIFs.
The animated GIFs have been collected from Tumblr, from randomly selected posts published between May and June of 2015.
We provide the URLs of animated GIFs in this release. The sentences are collected via crowdsourcing, with a carefully designed
annotationinterface that ensures high quality dataset. We provide one sentence per animated GIF for the training and validation splits,
and three sentences per GIF for the test split. The dataset shall be used to evaluate animated GIF/video description techniques. | 1 | 2 |
mteb/banking77 | false | [
"language:en"
]
| null | 3,133 | 0 |
EMBO/sd-nlp-non-tokenized | false | [
"task_categories:token-classification",
"task_categories:text-classification",
"task_ids:multi-class-classification",
"task_ids:named-entity-recognition",
"task_ids:parsing",
"annotations_creators:expert-generated",
"language_creators:expert-generated",
"multilinguality:monolingual",
"size_categories:10K<n<100K",
"language:en",
"license:cc-by-4.0"
]
| This dataset is based on the SourceData database and is intented to facilitate training of NLP tasks in the cell and molecualr biology domain. | 3 | 0 |
Iyanuoluwa/YOSM | false | [
"task_categories:text-classification",
"task_ids:sentiment-analysis",
"annotations_creators:expert-generated",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:1K<n<10K",
"source_datasets:original",
"language:yo",
"license:unknown",
"movie reviews",
"nollywood",
"arxiv:2204.09711"
]
| YOSM: A NEW YORUBA SENTIMENT CORPUS FOR MOVIE REVIEWS
- Yoruba | 0 | 0 |
godwinh/fongbe-asr | false | [
"license:apache-2.0"
]
| null | 0 | 0 |
dcfidalgo/test_datasetdict | false | []
| null | 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.