|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import argparse |
|
import collections |
|
import gzip |
|
import json |
|
import lzma |
|
import multiprocessing.pool |
|
import sys |
|
import urllib.request |
|
|
|
|
|
|
|
collections.Callable = collections.abc.Callable |
|
|
|
from downloader_extractor import Extractor |
|
|
|
DATASETS = ["train", "dev", "test", "oodtest"] |
|
|
|
|
|
parser = argparse.ArgumentParser() |
|
parser.add_argument("--download_start", default=None, type=int, |
|
help="Index of the first element to download") |
|
parser.add_argument("--download_end", default=None, type=int, |
|
help="Index of the last element to download") |
|
parser.add_argument("--index_file", default="sumeczech-1.0-index.jsonl.xz", type=str, |
|
help="Name of the index file to use") |
|
parser.add_argument("--no_verify_md5", default=False, action="store_true", |
|
help="Dangerous; do not verify MD5 of the downloaded documents") |
|
parser.add_argument("--parallel", default=16, type=int, |
|
help="Number of parallel processes to use") |
|
parser.add_argument("--output_file", default="sumeczech-1.0-{}.jsonl", type=str, |
|
help="Output file name template to use") |
|
args = parser.parse_args() |
|
|
|
|
|
print("Loading the index file.", file=sys.stderr) |
|
index = [] |
|
with lzma.LZMAFile(args.index_file, "r") as index_file: |
|
for line in index_file: |
|
index.append(json.loads(line.decode("utf-8"))) |
|
|
|
|
|
print("Loading previously downloaded data.", file=sys.stderr) |
|
datasets = {} |
|
for dataset in DATASETS: |
|
datasets[dataset] = { |
|
"file": open(args.output_file.format(dataset), "a+", encoding="utf-8"), |
|
"md5s": set() |
|
} |
|
datasets[dataset]["file"].seek(0) |
|
for i, line in enumerate(datasets[dataset]["file"]): |
|
assert line.endswith("\n"), "The last line of {} is not properly ended".format( |
|
args.output_file.format(dataset)) |
|
try: |
|
entry = json.loads(line) |
|
datasets[dataset]["md5s"].add(entry["md5"]) |
|
except: |
|
raise ValueError("Cannot decode the line {} from {}".format( |
|
i + 1, args.output_file.format(dataset))) |
|
|
|
|
|
def download_extract(entry): |
|
dataset = entry["dataset"] |
|
if entry["md5"] in datasets[dataset]["md5s"]: |
|
return None |
|
|
|
tries = 0 |
|
while True: |
|
try: |
|
with urllib.request.urlopen(urllib.request.Request( |
|
"https://data.commoncrawl.org/{}".format(entry["filename"]), |
|
headers={"Range": "bytes={}-{}".format(entry["offset"], entry["offset"] + entry["length"] - 1)})) as response: |
|
with gzip.GzipFile(fileobj=response) as decompressed_response: |
|
entry["content"] = decompressed_response.read().decode("latin-1") |
|
break |
|
except: |
|
tries += 1 |
|
if tries < 10: |
|
print("Error during download of entry {}, retrying".format(entry), file=sys.stderr) |
|
else: |
|
print("Too many errors during download of entry {}, aborting".format(entry), file=sys.stderr) |
|
raise |
|
|
|
extracted = Extractor.extract_document(entry) |
|
del entry["content"] |
|
|
|
correct_hash = extracted["md5"] == entry["md5"] |
|
if not correct_hash: |
|
if not args.no_verify_md5: |
|
assert correct_hash, "MD5 verification failed for entry {}, aborting".format(entry) |
|
else: |
|
print("MD5 verification failed for entry {}, but continuing as requested".format(entry), file=sys.stderr) |
|
|
|
return extracted |
|
|
|
print("Downloading the data.", file=sys.stderr) |
|
entries = index[args.download_start:args.download_end] |
|
if args.parallel > 1: |
|
pool = multiprocessing.pool.Pool(args.parallel, initializer=lambda: sys.setrecursionlimit(1100)) |
|
processed_entries = pool.imap(download_extract, entries) |
|
else: |
|
processed_entries = map(download_extract, entries) |
|
|
|
for i, processed_entry in enumerate(processed_entries): |
|
if processed_entry is not None: |
|
datasets[processed_entry["dataset"]]["file"].write(json.dumps( |
|
processed_entry, ensure_ascii=False, sort_keys=True, indent=None, separators=(", ", ": ")) + "\n") |
|
if (i + 1) % 1000 == 0: |
|
print("Downloaded {}/{} documents.".format(i + 1, len(entries)), end="\r", file=sys.stderr, flush=True) |
|
|
|
print("All data downloaded successfully.", file=sys.stderr) |
|
|