File size: 7,018 Bytes
c38c431 f5ebc20 3471bd1 f5ebc20 68d7d07 f5ebc20 c38c431 f5ebc20 c38c431 f5ebc20 c38c431 1ba0d4f f5ebc20 3471bd1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 |
---
dataset_info:
- config_name: arxiv
features:
- name: content
dtype: string
splits:
- name: train
num_bytes: 89223183645.0
num_examples: 1558306
download_size: 40911186876
dataset_size: 89223183645.0
- config_name: documentation
features:
- name: project
dtype: string
- name: source
dtype: string
- name: language
dtype: string
- name: content
dtype: string
splits:
- name: train
num_bytes: 5421472234.0
num_examples: 59733
download_size: 1853451922
dataset_size: 5421472234.0
- config_name: ir_cpp
features:
- name: __index_level_0__
dtype: string
- name: id
dtype: string
- name: content
dtype: string
splits:
- name: train
num_bytes: 102081135272.0
num_examples: 2916655
download_size: 26047978422
dataset_size: 102081135272.0
- config_name: ir_low_resource
features:
- name: __index_level_0__
dtype: string
- name: id
dtype: string
- name: content
dtype: string
- name: size
dtype: int64
splits:
- name: train
num_bytes: 10383382043.0
num_examples: 393988
download_size: 2464513603
dataset_size: 10383382043.0
- config_name: ir_python
features:
- name: id
dtype: string
- name: content
dtype: string
splits:
- name: train
num_bytes: 12446664464.0
num_examples: 154507
download_size: 3039297625
dataset_size: 12446664464.0
- config_name: ir_rust
features:
- name: __index_level_0__
dtype: string
- name: id
dtype: string
- name: content
dtype: string
splits:
- name: train
num_bytes: 4764927851.0
num_examples: 32720
download_size: 1254786199
dataset_size: 4764927851.0
- config_name: issues
features:
- name: repo_name
dtype: string
- name: content
dtype: string
- name: issue_id
dtype: string
splits:
- name: train
num_bytes: 31219575534.38484
num_examples: 15549682
download_size: 16483899047
dataset_size: 31219575534.38484
- config_name: kaggle
features:
- name: content
dtype: string
- name: file_id
dtype: string
splits:
- name: train
num_bytes: 5228745262.0
num_examples: 580195
download_size: 2234440007
dataset_size: 5228745262.0
- config_name: lhq
features:
- name: content
dtype: string
- name: metadata
struct:
- name: difficulty
dtype: string
- name: field
dtype: string
- name: topic
dtype: string
splits:
- name: train
num_bytes: 751273849.0
num_examples: 7037500
download_size: 272913202
dataset_size: 751273849.0
- config_name: owm
features:
- name: url
dtype: string
- name: date
dtype: timestamp[s]
- name: metadata
dtype: string
- name: content
dtype: string
splits:
- name: train
num_bytes: 56294728333.0
num_examples: 6315233
download_size: 27160071916
dataset_size: 56294728333.0
- config_name: stackoverflow
features:
- name: date
dtype: string
- name: nb_tokens
dtype: int64
- name: text_size
dtype: int64
- name: content
dtype: string
splits:
- name: train
num_bytes: 35548199612.0
num_examples: 10404628
download_size: 17008831030
dataset_size: 35548199612.0
- config_name: wikipedia
features:
- name: content
dtype: string
- name: meta
dtype: string
- name: red_pajama_subset
dtype: string
splits:
- name: train
num_bytes: 21572720540.0
num_examples: 6630651
download_size: 12153445493
dataset_size: 21572720540.0
configs:
- config_name: arxiv
data_files:
- split: train
path: arxiv/train-*
- config_name: documentation
data_files:
- split: train
path: documentation/train-*
- config_name: ir_cpp
data_files:
- split: train
path: ir_cpp/train-*
- config_name: ir_low_resource
data_files:
- split: train
path: ir_low_resource/train-*
- config_name: ir_python
data_files:
- split: train
path: ir_python/train-*
- config_name: ir_rust
data_files:
- split: train
path: ir_rust/train-*
- config_name: issues
data_files:
- split: train
path: issues/train-*
- config_name: kaggle
data_files:
- split: train
path: kaggle/train-*
- config_name: lhq
data_files:
- split: train
path: lhq/train-*
- config_name: owm
data_files:
- split: train
path: owm/train-*
- config_name: stackoverflow
data_files:
- split: train
path: stackoverflow/train-*
- config_name: wikipedia
data_files:
- split: train
path: wikipedia/train-*
---
# StarCoder2 Extras
This is the dataset of extra sources (besides Stack v2 code data) used to train the [StarCoder2](https://arxiv.org/abs/2402.19173) family of models. It contains the following subsets:
- Kaggle (`kaggle`): Kaggle notebooks from [Meta-Kaggle-Code](https://www.kaggle.com/datasets/kaggle/meta-kaggle-code) dataset, converted to scripts and prefixed with information on the Kaggle datasets used in the notebook. The file headers have a similar format to Jupyter Structured but the code content is only one single script.
- StackOverflow (`stackoverflow`): stackoverflow conversations from this [StackExchange dump](https://archive.org/details/stackexchange).
- Issues (`issues`): processed GitHub issues, same as the Stack v1 issues.
- OWM (`owm`): the [Open-Web-Math](https://huggingface.co/datasets/open-web-math/open-web-math) dataset.
- LHQ (`lhq`): Leandro's High quality dataset, it is a compilation of high quality code files from: APPS-train, CodeContests, GSM8K-train, GSM8K-SciRel, DeepMind-Mathematics, Rosetta-Code, MultiPL-T, ProofSteps, ProofSteps-lean.
- Wiki (`wikipedia`): the English subset of the Wikipedia dump in [RedPajama](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T).
- ArXiv (`arxiv`): the ArXiv subset of [RedPajama](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T) dataset, further processed the dataset only to retain latex source files and remove preambles, comments, macros, and bibliographies from these files.
- IR_language (`ir_cpp`, `ir_low_resource`, `ir_python`, `ir_rust`): these are intermediate representations of Python, Rust, C++ and other low resource languages.
- Documentation (`documentation`): documentation of popular libraries.
For more details on the processing of each subset, check the [StarCoder2 paper](https://arxiv.org/abs/2402.19173) or The Stack v2 [GitHub repository](https://github.com/bigcode-project/the-stack-v2/).
## Usage
```python
from datasets import load_dataset
# replace `kaggle` with one of the config names listed above
ds = load_dataset("bigcode/starcoder2data-extras", "kaggle", split="train")
```
## Citation
```
@article{lozhkov2024starcoder,
title={Starcoder 2 and the stack v2: The next generation},
author={Lozhkov, Anton and Li, Raymond and Allal, Loubna Ben and Cassano, Federico and Lamy-Poirier, Joel and Tazi, Nouamane and Tang, Ao and Pykhtar, Dmytro and Liu, Jiawei and Wei, Yuxiang and others},
journal={arXiv preprint arXiv:2402.19173},
year={2024}
}
``` |