Datasets:

Modalities:
Text
Formats:
parquet
Languages:
Catalan
ArXiv:
Libraries:
Datasets
Dask
License:
albertvillanova HF staff commited on
Commit
7a7d482
1 Parent(s): 7552d67

Clean script

Browse files
Files changed (1) hide show
  1. catalan_textual_corpus.py +8 -53
catalan_textual_corpus.py CHANGED
@@ -14,27 +14,22 @@
14
  # limitations under the License.
15
  """Catalan Textual Corpus."""
16
 
17
-
18
- import csv
19
- import json
20
  import os
21
 
22
  import datasets
23
 
24
 
25
- # TODO: Add BibTeX citation
26
- # Find for instance the citation on arxiv or on the dataset repo/website
27
  _CITATION = """\
28
- @InProceedings{huggingface:dataset,
29
- title = {A great new dataset},
30
- author={huggingface, Inc.
31
- },
32
- year={2020}
 
 
33
  }
34
  """
35
 
36
- # TODO: Add description of the dataset here
37
- # You can copy an official description
38
  _DESCRIPTION = """\
39
  The Catalan Textual Corpus is a 1760-million-token web corpus of Catalan built from several sources: existing corpus such as DOGC, CaWac (non-dedup version), Oscar (unshuffled version), Open Subtitles, Catalan Wikipedia; and three brand new crawlings: the Catalan General Crawling, obtained by crawling the 500 most popular .cat and .ad domains; the Catalan Government Crawling, obtained by crawling the .gencat domain and subdomains, belonging to the Catalan Government; and the ACN corpus with 220k news items from March 2015 until October 2020, crawled from the Catalan News Agency.
40
 
@@ -54,31 +49,9 @@ class CatalanTextualCorpus(datasets.GeneratorBasedBuilder):
54
  VERSION = datasets.Version("1.0.0")
55
 
56
  def _info(self):
57
- # # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
58
- # if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
59
- # features = datasets.Features(
60
- # {
61
- # "sentence": datasets.Value("string"),
62
- # "option1": datasets.Value("string"),
63
- # "answer": datasets.Value("string")
64
- # # These are the features of your dataset like images, labels ...
65
- # }
66
- # )
67
- # else: # This is an example to show how to have different features for "first_domain" and "second_domain"
68
- # features = datasets.Features(
69
- # {
70
- # "sentence": datasets.Value("string"),
71
- # "option2": datasets.Value("string"),
72
- # "second_domain_answer": datasets.Value("string")
73
- # # These are the features of your dataset like images, labels ...
74
- # }
75
- # )
76
  return datasets.DatasetInfo(
77
  description=_DESCRIPTION,
78
- features=None,
79
- # If there's a common (input, target) tuple from the features,
80
- # specify them here. They'll be used if as_supervised=True in
81
- # builder.as_dataset.
82
  supervised_keys=None,
83
  homepage=_HOMEPAGE,
84
  license=_LICENSE,
@@ -90,28 +63,10 @@ class CatalanTextualCorpus(datasets.GeneratorBasedBuilder):
90
  return [
91
  datasets.SplitGenerator(
92
  name=datasets.Split.TRAIN,
93
- # These kwargs will be passed to _generate_examples
94
  gen_kwargs={
95
  "filepath": os.path.join(data_dir, "corpus", "catalan_textual_corpus.txt"),
96
- # "split": "train",
97
  },
98
  ),
99
- # datasets.SplitGenerator(
100
- # name=datasets.Split.TEST,
101
- # # These kwargs will be passed to _generate_examples
102
- # gen_kwargs={
103
- # "filepath": os.path.join(data_dir, "test.jsonl"),
104
- # "split": "test"
105
- # },
106
- # ),
107
- # datasets.SplitGenerator(
108
- # name=datasets.Split.VALIDATION,
109
- # # These kwargs will be passed to _generate_examples
110
- # gen_kwargs={
111
- # "filepath": os.path.join(data_dir, "dev.jsonl"),
112
- # "split": "dev",
113
- # },
114
- # ),
115
  ]
116
 
117
  def _generate_examples(self, filepath):
 
14
  # limitations under the License.
15
  """Catalan Textual Corpus."""
16
 
 
 
 
17
  import os
18
 
19
  import datasets
20
 
21
 
 
 
22
  _CITATION = """\
23
+ @misc{armengolestape2021multilingual,
24
+ title={Are Multilingual Models the Best Choice for Moderately Under-resourced Languages? A Comprehensive Assessment for Catalan},
25
+ author={Jordi Armengol{-}Estap{\'{e}} and Casimiro Pio Carrino and Carlos Rodriguez-Penagos and Ona de Gibert Bonet and Carme Armentano{-}Oller and Aitor Gonzalez{-}Agirre and Maite Melero and Marta Villegas},
26
+ year={2021},
27
+ eprint={2107.07903},
28
+ archivePrefix={arXiv},
29
+ primaryClass={cs.CL}
30
  }
31
  """
32
 
 
 
33
  _DESCRIPTION = """\
34
  The Catalan Textual Corpus is a 1760-million-token web corpus of Catalan built from several sources: existing corpus such as DOGC, CaWac (non-dedup version), Oscar (unshuffled version), Open Subtitles, Catalan Wikipedia; and three brand new crawlings: the Catalan General Crawling, obtained by crawling the 500 most popular .cat and .ad domains; the Catalan Government Crawling, obtained by crawling the .gencat domain and subdomains, belonging to the Catalan Government; and the ACN corpus with 220k news items from March 2015 until October 2020, crawled from the Catalan News Agency.
35
 
 
49
  VERSION = datasets.Version("1.0.0")
50
 
51
  def _info(self):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  return datasets.DatasetInfo(
53
  description=_DESCRIPTION,
54
+ features=datasets.Features({"text": datasets.Value("string")}),
 
 
 
55
  supervised_keys=None,
56
  homepage=_HOMEPAGE,
57
  license=_LICENSE,
 
63
  return [
64
  datasets.SplitGenerator(
65
  name=datasets.Split.TRAIN,
 
66
  gen_kwargs={
67
  "filepath": os.path.join(data_dir, "corpus", "catalan_textual_corpus.txt"),
 
68
  },
69
  ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  ]
71
 
72
  def _generate_examples(self, filepath):