metadata
license: cc-by-nc-sa-4.0
dataset_info:
- config_name: anl-news
features:
- name: text
dtype: string
splits:
- name: train
num_bytes: 1500707584
num_examples: 236443
download_size: 773593491
dataset_size: 1500707584
- config_name: azwiki
features:
- name: id
dtype: int64
- name: text
dtype: string
- name: title
dtype: string
splits:
- name: train
num_bytes: 360206818
num_examples: 129433
download_size: 204669909
dataset_size: 360206818
- config_name: bhos
features:
- name: title
dtype: string
- name: text
dtype: string
- name: id
dtype: int64
splits:
- name: train
num_bytes: 736156688
num_examples: 488390
download_size: 417517945
dataset_size: 736156688
- config_name: elite-blogs
features:
- name: id
dtype: int64
- name: source
dtype: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 7625261
num_examples: 755
download_size: 4031201
dataset_size: 7625261
- config_name: elite-books
features:
- name: text
dtype: string
- name: id
dtype: int64
splits:
- name: train
num_bytes: 38894982
num_examples: 104
download_size: 22016093
dataset_size: 38894982
- config_name: eqanun
features:
- name: text
dtype: string
- name: title
dtype: string
- name: id
dtype: int64
splits:
- name: train
num_bytes: 404638424
num_examples: 53656
download_size: 149151917
dataset_size: 404638424
- config_name: mediocore-books
features:
- name: ID
dtype: string
- name: ' Metadata'
dtype: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 2908183660
num_examples: 7807263
download_size: 695603782
dataset_size: 2908183660
- config_name: translated-enwiki
features:
- name: text
dtype: string
splits:
- name: train
num_bytes: 1629190007
num_examples: 280465
download_size: 919526548
dataset_size: 1629190007
configs:
- config_name: anl-news
data_files:
- split: train
path: anl-news/train-*
- config_name: azwiki
data_files:
- split: train
path: azwiki/train-*
- config_name: bhos
data_files:
- split: train
path: bhos/train-*
- config_name: elite-blogs
data_files:
- split: train
path: elite-blogs/train-*
- config_name: elite-books
data_files:
- split: train
path: elite-books/train-*
- config_name: eqanun
data_files:
- split: train
path: eqanun/train-*
- config_name: mediocore-books
data_files:
- split: train
path: mediocore-books/train-*
- config_name: translated-enwiki
data_files:
- split: train
path: translated-enwiki/train-*
task_categories:
- fill-mask
language:
- az
size_categories:
- 1M<n<10M
If you use this dataset, please cite us:
@inproceedings{isbarov-etal-2024-open,
title = "Open foundation models for {A}zerbaijani language",
author = "Isbarov, Jafar and
Huseynova, Kavsar and
Mammadov, Elvin and
Hajili, Mammad and
Ataman, Duygu",
editor = {Ataman, Duygu and
Derin, Mehmet Oguz and
Ivanova, Sardana and
K{\"o}ksal, Abdullatif and
S{\"a}lev{\"a}, Jonne and
Zeyrek, Deniz},
booktitle = "Proceedings of the First Workshop on Natural Language Processing for Turkic Languages (SIGTURK 2024)",
month = aug,
year = "2024",
address = "Bangkok, Thailand and Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.sigturk-1.2",
pages = "18--28",
abstract = "The emergence of multilingual large language models has enabled the development of language understanding and generation systems in Azerbaijani. However, most of the production-grade systems rely on cloud solutions, such as GPT-4. While there have been several attempts to develop open foundation models for Azerbaijani, these works have not found their way into common use due to a lack of systemic benchmarking. This paper encompasses several lines of work that promote open-source foundation models for Azerbaijani. We introduce (1) a large text corpus for Azerbaijani, (2) a family of encoder-only language models trained on this dataset, (3) labeled datasets for evaluating these models, and (4) extensive evaluation that covers all major open-source models with Azerbaijani support.",
}