Datasets:
ArXiv:
License:
File size: 3,607 Bytes
80f22ba |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
from collections import defaultdict
import json
import os
import sys
pwd = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(pwd, "../../"))
import datasets
from datasets import load_dataset, DownloadMode
from tqdm import tqdm
from language_identification import LANGUAGE_MAP
from project_settings import project_path
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_path", default="setimes", type=str)
parser.add_argument(
"--dataset_cache_dir",
default=(project_path / "hub_datasets").as_posix(),
type=str
)
parser.add_argument(
"--output_file",
default=(project_path / "data/setimes.jsonl"),
type=str
)
args = parser.parse_args()
return args
def main():
args = get_args()
name_list = [
"bg-bs",
"bg-el",
"bg-en",
"bg-hr",
"bg-mk",
"bg-ro",
"bg-sq",
"bg-sr",
"bg-tr",
"bs-el",
"bs-en",
"bs-hr",
"bs-mk",
"bs-ro",
"bs-sq",
"bs-sr",
"bs-tr",
"el-en",
"el-hr",
"el-mk",
"el-ro",
"el-sq",
"el-sr",
"el-tr",
"en-hr",
"en-mk",
"en-ro",
"en-sq",
"en-sr",
"en-tr",
"hr-mk",
"hr-ro",
"hr-sq",
"hr-sr",
"hr-tr",
"mk-ro",
"mk-sq",
"mk-sr",
"mk-tr",
"ro-sq",
"ro-sr",
"ro-tr",
"sq-sr",
"sq-tr",
"sr-tr",
]
# TODO: http://nlp.ffzg.hr 访问不到。
text_set = set()
counter = defaultdict(int)
with open(args.output_file, "w", encoding="utf-8") as f:
for name in name_list:
try:
dataset_dict = load_dataset(
path=args.dataset_path,
name=name,
cache_dir=args.dataset_cache_dir,
# download_mode=DownloadMode.FORCE_REDOWNLOAD
)
except datasets.builder.DatasetGenerationError:
print("skip subset: {}".format(name))
continue
for k, v in dataset_dict.items():
split = k
if split not in ("train", "validation", "test"):
print("skip split: {}".format(split))
continue
for sample in tqdm(v):
translation = sample["translation"]
for language, text in translation.items():
text = text.strip()
text = text.replace(" ", " ")
text = text.replace("", "-")
if text in text_set:
continue
text_set.add(text)
if language not in LANGUAGE_MAP.keys():
raise AssertionError("language: {}, text: {}".format(language, text))
row = {
"text": text,
"language": language,
"data_source": "setimes",
"split": split
}
row = json.dumps(row, ensure_ascii=False)
f.write("{}\n".format(row))
counter[split] += 1
print("counter: {}".format(counter))
return
if __name__ == "__main__":
main()
|