Datasets:
File size: 4,290 Bytes
6a5e475 322a677 eb1d4aa 44dd507 eb1d4aa 6a5e475 f1f0a39 6a5e475 f1f0a39 6a5e475 cc9ae4a 44dd507 6a5e475 89f5571 6a5e475 a9a26d9 b9b1542 6a5e475 484f2ea 9e6d05a 484f2ea 9e6d05a 6a5e475 7b9250e a9a26d9 f1f0a39 7b9250e 6a5e475 a9a26d9 f1f0a39 a9a26d9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 |
# coding=utf-8
# Source: https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py
"""ELRC-Medical-V2 : European parallel corpus for healthcare machine translation"""
import os
import csv
import datasets
from tqdm import tqdm
logger = datasets.logging.get_logger(__name__)
_CITATION = """
@inproceedings{losch-etal-2018-european,
title = "European Language Resource Coordination: Collecting Language Resources for Public Sector Multilingual Information Management",
author = {L{\"o}sch, Andrea and
Mapelli, Val{\'e}rie and
Piperidis, Stelios and
Vasi{\c{l}}jevs, Andrejs and
Smal, Lilli and
Declerck, Thierry and
Schnur, Eileen and
Choukri, Khalid and
van Genabith, Josef},
booktitle = "Proceedings of the Eleventh International Conference on Language Resources and Evaluation ({LREC} 2018)",
month = may,
year = "2018",
address = "Miyazaki, Japan",
publisher = "European Language Resources Association (ELRA)",
url = "https://aclanthology.org/L18-1213",
}
"""
_LANGUAGE_PAIRS = ["en-" + lang for lang in ["bg", "cs", "da", "de", "el", "es", "et", "fi", "fr", "ga", "hr", "hu", "it", "lt", "lv", "mt", "nl", "pl", "pt", "ro", "sk", "sl", "sv"]]
_LICENSE = """
This work is licensed under a <a rel="license" href="https://elrc-share.eu/static/metashare/licences/CC-BY-4.0.pdf">Attribution 4.0 International (CC BY 4.0) License</a>.
"""
# _URLS = {
# lang : "https://huggingface.co/datasets/qanastek/ELRC-Medical-V2/raw/main/csv/" + lang + ".csv" for lang in _LANGUAGE_PAIRS
# }
_URL = "https://huggingface.co/datasets/qanastek/ELRC-Medical-V2/resolve/main/ELRC-Medical-V2.zip"
# _URL = "https://raw.githubusercontent.com/qanastek/ELRC-Medical-V2/main/csv_corpus/"
_DESCRIPTION = "No description"
class ELRC_Medical_V2(datasets.GeneratorBasedBuilder):
"""ELRC-Medical-V2 dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(name=name, version=datasets.Version("2.0.0"), description="The ELRC-Medical-V2 corpora") for name in _LANGUAGE_PAIRS
]
DEFAULT_CONFIG_NAME = "en-fr"
def _info(self):
src, target = self.config.name.split("-")
pair = (src, target)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{"translation": datasets.features.Translation(languages=pair)}
),
supervised_keys=(src, target),
# features=datasets.Features({
# "doc_id": datasets.Value("int32"),
# "lang": datasets.Value("string"),
# "source_text": datasets.Value("string"),
# "target_text": datasets.Value("string"),
# }),
# supervised_keys=None,
homepage="https://github.com/qanastek/ELRC-Medical-V2/",
citation=_CITATION,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
# Download the CSV
data_dir = dl_manager.download_and_extract(_URL)
# data_dir = dl_manager.download(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": data_dir + "/" + self.config.name + ".csv",
"split": "train",
}
),
]
def _generate_examples(self, filepath, split):
logger.info("⏳ Generating examples from = %s", filepath)
key_ = 0
with open(filepath, encoding="utf-8") as f:
for id_, row in enumerate(csv.reader(f, delimiter=',')):
if id_ == 0:
continue
# Get langue pair
src, target = str(row[1]).split("-")
yield key_, {
"translation": {
src: str(row[2]).strip(),
target: str(row[3]).strip(),
},
}
# yield key_, {
# "doc_id": int(row[0]),
# "lang": str(row[1]),
# "source_text": str(row[2]),
# "target_text": str(row[3])
# }
key_ += 1
|