Datasets:
Update README.md
Browse files
README.md
CHANGED
@@ -80,6 +80,42 @@ for config in ("random", "stepwise", "gaussian"):
|
|
80 |
print(config, sample)
|
81 |
break
|
82 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
### Supported Tasks and Leaderboards
|
85 |
|
|
|
80 |
print(config, sample)
|
81 |
break
|
82 |
```
|
83 |
+
Alternatively, you can bypass the `datasets` library and quickly download (~1.5hrs, depending on connection) a specific config in the same order used to pre-train BERTIN models in a massive (~200GB) JSON-lines files:
|
84 |
+
|
85 |
+
```python
|
86 |
+
import io
|
87 |
+
import gzip
|
88 |
+
import json
|
89 |
+
import sys
|
90 |
+
|
91 |
+
import requests
|
92 |
+
from tqdm import tqdm
|
93 |
+
|
94 |
+
_DATA_URL_TRAIN = "https://huggingface.co/datasets/bertin-project/mc4-es-sampled/resolve/main/mc4-es-train-50M-{config}-shard-{index:04d}-of-{n_shards:04d}.json.gz"
|
95 |
+
|
96 |
+
|
97 |
+
def main(config="stepwise"):
|
98 |
+
data_urls = [
|
99 |
+
_DATA_URL_TRAIN.format(
|
100 |
+
config=config,
|
101 |
+
index=index + 1,
|
102 |
+
n_shards=1024,
|
103 |
+
)
|
104 |
+
for index in range(1024)
|
105 |
+
]
|
106 |
+
with open(f"mc4-es-train-50M-{config}.jsonl", "w") as f:
|
107 |
+
for dara_url in tqdm(data_urls):
|
108 |
+
response = requests.get(dara_url)
|
109 |
+
bio = io.BytesIO(response.content)
|
110 |
+
with gzip.open(bio, "rt", encoding="utf8") as g:
|
111 |
+
for line in g:
|
112 |
+
json_line = json.loads(line.strip())
|
113 |
+
f.write(json.dumps(json_line) + "\n")
|
114 |
+
|
115 |
+
|
116 |
+
if __name__ == "__main__":
|
117 |
+
main(sys.argv[1])
|
118 |
+
```
|
119 |
|
120 |
### Supported Tasks and Leaderboards
|
121 |
|