Datasets:

Modalities:
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
Dask
License:
albertvillanova HF staff commited on
Commit
b1f0cde
·
verified ·
1 Parent(s): 8ed6ec7

Delete loading script

Browse files
Files changed (1) hide show
  1. scielo.py +0 -121
scielo.py DELETED
@@ -1,121 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Parallel corpus of full-text articles in Portuguese, English and Spanish from SciELO"""
16
-
17
-
18
- import datasets
19
-
20
-
21
- _CITATION = """\
22
- @inproceedings{soares2018large,
23
- title={A Large Parallel Corpus of Full-Text Scientific Articles},
24
- author={Soares, Felipe and Moreira, Viviane and Becker, Karin},
25
- booktitle={Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC-2018)},
26
- year={2018}
27
- }
28
- """
29
-
30
-
31
- _DESCRIPTION = """\
32
- A parallel corpus of full-text scientific articles collected from Scielo database in the following languages: \
33
- English, Portuguese and Spanish. The corpus is sentence aligned for all language pairs, \
34
- as well as trilingual aligned for a small subset of sentences. Alignment was carried out using the Hunalign algorithm.
35
- """
36
-
37
-
38
- _HOMEPAGE = "https://sites.google.com/view/felipe-soares/datasets#h.p_92uSCyAjWSRB"
39
-
40
- _LANGUAGES = ["en-es", "en-pt", "en-pt-es"]
41
-
42
- _URLS = {
43
- "en-es": "https://ndownloader.figstatic.com/files/14019287",
44
- "en-pt": "https://ndownloader.figstatic.com/files/14019308",
45
- "en-pt-es": "https://ndownloader.figstatic.com/files/14019293",
46
- }
47
-
48
-
49
- class Scielo(datasets.GeneratorBasedBuilder):
50
- """Parallel corpus of full-text articles in Portuguese, English and Spanish from SciELO"""
51
-
52
- VERSION = datasets.Version("1.0.0")
53
-
54
- BUILDER_CONFIGS = [
55
- datasets.BuilderConfig(name="en-es", version=datasets.Version("1.0.0"), description="English-Spanish"),
56
- datasets.BuilderConfig(name="en-pt", version=datasets.Version("1.0.0"), description="English-Portuguese"),
57
- datasets.BuilderConfig(
58
- name="en-pt-es", version=datasets.Version("1.0.0"), description="English-Portuguese-Spanish"
59
- ),
60
- ]
61
-
62
- def _info(self):
63
- return datasets.DatasetInfo(
64
- description=_DESCRIPTION,
65
- features=datasets.Features(
66
- {"translation": datasets.features.Translation(languages=tuple(self.config.name.split("-")))}
67
- ),
68
- supervised_keys=None,
69
- homepage=_HOMEPAGE,
70
- citation=_CITATION,
71
- )
72
-
73
- def _split_generators(self, dl_manager):
74
- """Returns SplitGenerators."""
75
- archive = dl_manager.download(_URLS[self.config.name])
76
- lang_pair = self.config.name.split("-")
77
- fname = self.config.name.replace("-", "_")
78
-
79
- if self.config.name == "en-pt-es":
80
- return [
81
- datasets.SplitGenerator(
82
- name=datasets.Split.TRAIN,
83
- gen_kwargs={
84
- "source_file": f"{fname}.en",
85
- "target_file": f"{fname}.pt",
86
- "target_file_2": f"{fname}.es",
87
- "files": dl_manager.iter_archive(archive),
88
- },
89
- ),
90
- ]
91
-
92
- return [
93
- datasets.SplitGenerator(
94
- name=datasets.Split.TRAIN,
95
- gen_kwargs={
96
- "source_file": f"{fname}.{lang_pair[0]}",
97
- "target_file": f"{fname}.{lang_pair[1]}",
98
- "files": dl_manager.iter_archive(archive),
99
- },
100
- ),
101
- ]
102
-
103
- def _generate_examples(self, source_file, target_file, files, target_file_2=None):
104
- for path, f in files:
105
- if path == source_file:
106
- source_sentences = f.read().decode("utf-8").split("\n")
107
- elif path == target_file:
108
- target_sentences = f.read().decode("utf-8").split("\n")
109
- elif self.config.name == "en-pt-es" and path == target_file_2:
110
- target_sentences_2 = f.read().decode("utf-8").split("\n")
111
-
112
- if self.config.name == "en-pt-es":
113
- source, target, target_2 = tuple(self.config.name.split("-"))
114
- for idx, (l1, l2, l3) in enumerate(zip(source_sentences, target_sentences, target_sentences_2)):
115
- result = {"translation": {source: l1, target: l2, target_2: l3}}
116
- yield idx, result
117
- else:
118
- source, target = tuple(self.config.name.split("-"))
119
- for idx, (l1, l2) in enumerate(zip(source_sentences, target_sentences)):
120
- result = {"translation": {source: l1, target: l2}}
121
- yield idx, result