metadata
dataset_info:
- config_name: arxiv
features:
- name: content
dtype: string
splits:
- name: train
num_bytes: 89223183645
num_examples: 1558306
download_size: 40911186876
dataset_size: 89223183645
- config_name: documentation
features:
- name: project
dtype: string
- name: source
dtype: string
- name: language
dtype: string
- name: content
dtype: string
splits:
- name: train
num_bytes: 5421472234
num_examples: 59733
download_size: 1853451922
dataset_size: 5421472234
- config_name: ir_cpp
features:
- name: __index_level_0__
dtype: string
- name: id
dtype: string
- name: content
dtype: string
splits:
- name: train
num_bytes: 102081135272
num_examples: 2916655
download_size: 26047978422
dataset_size: 102081135272
- config_name: ir_low_resource
features:
- name: __index_level_0__
dtype: string
- name: id
dtype: string
- name: content
dtype: string
- name: size
dtype: int64
splits:
- name: train
num_bytes: 10383382043
num_examples: 393988
download_size: 2464513603
dataset_size: 10383382043
- config_name: ir_python
features:
- name: id
dtype: string
- name: content
dtype: string
splits:
- name: train
num_bytes: 12446664464
num_examples: 154507
download_size: 3039297625
dataset_size: 12446664464
- config_name: ir_rust
features:
- name: __index_level_0__
dtype: string
- name: id
dtype: string
- name: content
dtype: string
splits:
- name: train
num_bytes: 4764927851
num_examples: 32720
download_size: 1254786199
dataset_size: 4764927851
- config_name: issues
features:
- name: repo_name
dtype: string
- name: content
dtype: string
- name: issue_id
dtype: string
splits:
- name: train
num_bytes: 31219575534.38484
num_examples: 15549682
download_size: 16483899047
dataset_size: 31219575534.38484
- config_name: jupyter_scripts
features:
- name: repo_name
dtype: string
- name: content
dtype: string
- name: path
dtype: string
splits:
- name: train
num_bytes: 40474200129.21856
num_examples: 3529416
download_size: 17513608795
dataset_size: 40474200129.21856
- config_name: jupyter_structured
features:
- name: repo_name
dtype: string
- name: content
dtype: string
- name: path
dtype: string
splits:
- name: train
num_bytes: 39594556099.87017
num_examples: 4252889
download_size: 16889239760
dataset_size: 39594556099.87017
- config_name: kaggle
features:
- name: content
dtype: string
- name: file_id
dtype: string
splits:
- name: train
num_bytes: 5228745262
num_examples: 580195
download_size: 2234440007
dataset_size: 5228745262
- config_name: lhq
features:
- name: content
dtype: string
- name: metadata
struct:
- name: difficulty
dtype: string
- name: field
dtype: string
- name: topic
dtype: string
splits:
- name: train
num_bytes: 751273849
num_examples: 7037500
download_size: 272913202
dataset_size: 751273849
- config_name: owm
features:
- name: url
dtype: string
- name: date
dtype: timestamp[s]
- name: metadata
dtype: string
- name: content
dtype: string
splits:
- name: train
num_bytes: 56294728333
num_examples: 6315233
download_size: 27160071916
dataset_size: 56294728333
- config_name: pull_requests
features:
- name: content
dtype: string
- name: guid
dtype: string
- name: ctx-size
dtype: int64
splits:
- name: train
num_bytes: 52871142902.97541
num_examples: 2555843
download_size: 16638673686
dataset_size: 52871142902.97541
- config_name: stackoverflow
features:
- name: date
dtype: string
- name: nb_tokens
dtype: int64
- name: text_size
dtype: int64
- name: content
dtype: string
splits:
- name: train
num_bytes: 35548199612
num_examples: 10404628
download_size: 17008831030
dataset_size: 35548199612
- config_name: wikipedia
features:
- name: content
dtype: string
- name: meta
dtype: string
- name: red_pajama_subset
dtype: string
splits:
- name: train
num_bytes: 21572720540
num_examples: 6630651
download_size: 12153445493
dataset_size: 21572720540
configs:
- config_name: arxiv
data_files:
- split: train
path: arxiv/train-*
- config_name: documentation
data_files:
- split: train
path: documentation/train-*
- config_name: ir_cpp
data_files:
- split: train
path: ir_cpp/train-*
- config_name: ir_low_resource
data_files:
- split: train
path: ir_low_resource/train-*
- config_name: ir_python
data_files:
- split: train
path: ir_python/train-*
- config_name: ir_rust
data_files:
- split: train
path: ir_rust/train-*
- config_name: issues
data_files:
- split: train
path: issues/train-*
- config_name: jupyter_scripts
data_files:
- split: train
path: jupyter_scripts/train-*
- config_name: jupyter_structured
data_files:
- split: train
path: jupyter_structured/train-*
- config_name: kaggle
data_files:
- split: train
path: kaggle/train-*
- config_name: lhq
data_files:
- split: train
path: lhq/train-*
- config_name: owm
data_files:
- split: train
path: owm/train-*
- config_name: pull_requests
data_files:
- split: train
path: pull_requests/train-*
- config_name: stackoverflow
data_files:
- split: train
path: stackoverflow/train-*
- config_name: wikipedia
data_files:
- split: train
path: wikipedia/train-*
StarCoder2data extras
This is the dataset of extra sources (different from pure Stack v2 code) used to train the StarCoder2 family of models. It contains the following subsets:
- Jupyter Scripts: Jupyter notebooks from The Stack v2 converted to scripts.
- Jupyter Structured: Jupyter notebooks from The Stack v2 converted to a structured format of code, markdown & output cells.
- Kaggle: Kaggle notebooks from Meta-Kaggle-Code dataset, converted to scripts and prefixed with information on the Kaggle datasets used in the notebook. The file headers have a similar format to Jupyter Structured but the code content is only one single script.
- Pull Requests: processed GitHub Pull Requests.
- StackOverflow: stackoverflow conversations from this [StackExchnage dump}(https://archive.org/details/stackexchange).
- Issues: processed GitHub issues.
- Owm: Open-Web-math dataset.
- LHQ: Leandro's High quality dataset, it is a compilation of high quality code files from: APPS-train, CodeContests, GSM8K-train, GSM8K-SciRel, DeepMind-Mathematics, Rosetta-Code, MultiPL-T, ProofSteps, ProofSteps-lean.
- Wiki: the english subset of the wikipedia dump in RedPajama.
- Arxiv: Arxiv subset of RedPajama dataset, further processed the dataset only to retain latex source files and remove preambles, comments, macros, and bibliographies from these files.
- IR_language: these are intermediate representations of Python, Rust, C++ and other low resource languages.
- Documentation: documentation of popular libraries.
For more details on the processing of each subset, check the StarCoder2 paper or The Stack v2 GitHub repository.