Datasets:
CZLC
/

Modalities:
Tabular
Text
Formats:
json
Languages:
Czech
Libraries:
Datasets
pandas
License:
sumeczech_downsampled / downloader.py
mdocekal's picture
Upload folder using huggingface_hub
40b0307 verified
#!/usr/bin/env python3
#
# This file is part of SumeCzech corpus <http://hdl.handle.net/11234/1-2615>.
#
# Copyright 2018 Institute of Formal and Applied Linguistics, Faculty of
# Mathematics and Physics, Charles University in Prague, Czech Republic.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Changelog:
# - 13 Feb 2018: Original release of version 1.0.
# - 25 Feb 2023: An update with the following changes:
# - use the new domain https://data.commoncrawl.org of the CC download;
# - support Python 3.10 and 3.11, where `collections.Callable` was removed.
import argparse
import collections
import gzip
import json
import lzma
import multiprocessing.pool
import sys
import urllib.request
# For Python 3.10+, `collections.Callable` was removed, but it is needed
# by both beautifulsoup4==4.6.0 and python-dateutil==2.6.1.
collections.Callable = collections.abc.Callable
from downloader_extractor import Extractor
DATASETS = ["train", "dev", "test", "oodtest"]
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--download_start", default=None, type=int,
help="Index of the first element to download")
parser.add_argument("--download_end", default=None, type=int,
help="Index of the last element to download")
parser.add_argument("--index_file", default="sumeczech-1.0-index.jsonl.xz", type=str,
help="Name of the index file to use")
parser.add_argument("--no_verify_md5", default=False, action="store_true",
help="Dangerous; do not verify MD5 of the downloaded documents")
parser.add_argument("--parallel", default=16, type=int,
help="Number of parallel processes to use")
parser.add_argument("--output_file", default="sumeczech-1.0-{}.jsonl", type=str,
help="Output file name template to use")
args = parser.parse_args()
# Load the index
print("Loading the index file.", file=sys.stderr)
index = []
with lzma.LZMAFile(args.index_file, "r") as index_file:
for line in index_file:
index.append(json.loads(line.decode("utf-8")))
# Open the output files and load
print("Loading previously downloaded data.", file=sys.stderr)
datasets = {}
for dataset in DATASETS:
datasets[dataset] = {
"file": open(args.output_file.format(dataset), "a+", encoding="utf-8"),
"md5s": set()
}
datasets[dataset]["file"].seek(0)
for i, line in enumerate(datasets[dataset]["file"]):
assert line.endswith("\n"), "The last line of {} is not properly ended".format(
args.output_file.format(dataset))
try:
entry = json.loads(line)
datasets[dataset]["md5s"].add(entry["md5"])
except:
raise ValueError("Cannot decode the line {} from {}".format(
i + 1, args.output_file.format(dataset)))
# Download and extract the given entry
def download_extract(entry):
dataset = entry["dataset"]
if entry["md5"] in datasets[dataset]["md5s"]:
return None
tries = 0
while True:
try:
with urllib.request.urlopen(urllib.request.Request(
"https://data.commoncrawl.org/{}".format(entry["filename"]),
headers={"Range": "bytes={}-{}".format(entry["offset"], entry["offset"] + entry["length"] - 1)})) as response:
with gzip.GzipFile(fileobj=response) as decompressed_response:
entry["content"] = decompressed_response.read().decode("latin-1")
break
except:
tries += 1
if tries < 10:
print("Error during download of entry {}, retrying".format(entry), file=sys.stderr)
else:
print("Too many errors during download of entry {}, aborting".format(entry), file=sys.stderr)
raise
extracted = Extractor.extract_document(entry)
del entry["content"]
correct_hash = extracted["md5"] == entry["md5"]
if not correct_hash:
if not args.no_verify_md5:
assert correct_hash, "MD5 verification failed for entry {}, aborting".format(entry)
else:
print("MD5 verification failed for entry {}, but continuing as requested".format(entry), file=sys.stderr)
return extracted
print("Downloading the data.", file=sys.stderr)
entries = index[args.download_start:args.download_end]
if args.parallel > 1:
pool = multiprocessing.pool.Pool(args.parallel, initializer=lambda: sys.setrecursionlimit(1100))
processed_entries = pool.imap(download_extract, entries)
else:
processed_entries = map(download_extract, entries)
for i, processed_entry in enumerate(processed_entries):
if processed_entry is not None:
datasets[processed_entry["dataset"]]["file"].write(json.dumps(
processed_entry, ensure_ascii=False, sort_keys=True, indent=None, separators=(", ", ": ")) + "\n")
if (i + 1) % 1000 == 0:
print("Downloaded {}/{} documents.".format(i + 1, len(entries)), end="\r", file=sys.stderr, flush=True)
print("All data downloaded successfully.", file=sys.stderr)