Datasets:

Modalities:
Text
Formats:
parquet
Languages:
Catalan
ArXiv:
Libraries:
Datasets
Dask
License:
albertvillanova HF staff commited on
Commit
7552d67
1 Parent(s): 6d2aa1d

Add dataset loading script

Browse files
Files changed (1) hide show
  1. catalan_textual_corpus.py +125 -0
catalan_textual_corpus.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Catalan Textual Corpus."""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+
22
+ import datasets
23
+
24
+
25
+ # TODO: Add BibTeX citation
26
+ # Find for instance the citation on arxiv or on the dataset repo/website
27
+ _CITATION = """\
28
+ @InProceedings{huggingface:dataset,
29
+ title = {A great new dataset},
30
+ author={huggingface, Inc.
31
+ },
32
+ year={2020}
33
+ }
34
+ """
35
+
36
+ # TODO: Add description of the dataset here
37
+ # You can copy an official description
38
+ _DESCRIPTION = """\
39
+ The Catalan Textual Corpus is a 1760-million-token web corpus of Catalan built from several sources: existing corpus such as DOGC, CaWac (non-dedup version), Oscar (unshuffled version), Open Subtitles, Catalan Wikipedia; and three brand new crawlings: the Catalan General Crawling, obtained by crawling the 500 most popular .cat and .ad domains; the Catalan Government Crawling, obtained by crawling the .gencat domain and subdomains, belonging to the Catalan Government; and the ACN corpus with 220k news items from March 2015 until October 2020, crawled from the Catalan News Agency.
40
+
41
+ It consists of 1.758.388.896 tokens, 73.172.152 sentences and 12.556.365 documents. Documents are separated by single new lines. These boundaries have been preserved as long as the license allowed it.
42
+ """
43
+
44
+ _HOMEPAGE = "https://zenodo.org/record/4519349#.YapK5roo9PY"
45
+
46
+ _LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International License"
47
+
48
+ _URL = "https://zenodo.org/record/4519349/files/catalan_textual_corpus.zip?download=1"
49
+
50
+
51
+ class CatalanTextualCorpus(datasets.GeneratorBasedBuilder):
52
+ """Catalan Textual Corpus."""
53
+
54
+ VERSION = datasets.Version("1.0.0")
55
+
56
+ def _info(self):
57
+ # # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
58
+ # if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
59
+ # features = datasets.Features(
60
+ # {
61
+ # "sentence": datasets.Value("string"),
62
+ # "option1": datasets.Value("string"),
63
+ # "answer": datasets.Value("string")
64
+ # # These are the features of your dataset like images, labels ...
65
+ # }
66
+ # )
67
+ # else: # This is an example to show how to have different features for "first_domain" and "second_domain"
68
+ # features = datasets.Features(
69
+ # {
70
+ # "sentence": datasets.Value("string"),
71
+ # "option2": datasets.Value("string"),
72
+ # "second_domain_answer": datasets.Value("string")
73
+ # # These are the features of your dataset like images, labels ...
74
+ # }
75
+ # )
76
+ return datasets.DatasetInfo(
77
+ description=_DESCRIPTION,
78
+ features=None,
79
+ # If there's a common (input, target) tuple from the features,
80
+ # specify them here. They'll be used if as_supervised=True in
81
+ # builder.as_dataset.
82
+ supervised_keys=None,
83
+ homepage=_HOMEPAGE,
84
+ license=_LICENSE,
85
+ citation=_CITATION,
86
+ )
87
+
88
+ def _split_generators(self, dl_manager):
89
+ data_dir = dl_manager.download_and_extract(_URL)
90
+ return [
91
+ datasets.SplitGenerator(
92
+ name=datasets.Split.TRAIN,
93
+ # These kwargs will be passed to _generate_examples
94
+ gen_kwargs={
95
+ "filepath": os.path.join(data_dir, "corpus", "catalan_textual_corpus.txt"),
96
+ # "split": "train",
97
+ },
98
+ ),
99
+ # datasets.SplitGenerator(
100
+ # name=datasets.Split.TEST,
101
+ # # These kwargs will be passed to _generate_examples
102
+ # gen_kwargs={
103
+ # "filepath": os.path.join(data_dir, "test.jsonl"),
104
+ # "split": "test"
105
+ # },
106
+ # ),
107
+ # datasets.SplitGenerator(
108
+ # name=datasets.Split.VALIDATION,
109
+ # # These kwargs will be passed to _generate_examples
110
+ # gen_kwargs={
111
+ # "filepath": os.path.join(data_dir, "dev.jsonl"),
112
+ # "split": "dev",
113
+ # },
114
+ # ),
115
+ ]
116
+
117
+ def _generate_examples(self, filepath):
118
+ with open(filepath, encoding="utf-8") as f:
119
+ text = ""
120
+ for id_, line in enumerate(f):
121
+ if line == "\n":
122
+ yield id_, {"text": text}
123
+ text = ""
124
+ else:
125
+ text += line