Datasets:
MLRS
/

Modalities:
Text
Languages:
Maltese
Libraries:
Datasets
License:
KurtMica commited on
Commit
f9314b1
1 Parent(s): 4d47bb9

Domain-split data script & metadata.

Browse files
data/belles_lettres/.gitkeep ADDED
File without changes
dataset_infos.json CHANGED
The diff for this file is too large to render. See raw diff
 
dummy/belles_lettres/4.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1f57fdc48df365d12a6b08de4b60e947db6ae65db977a6fe74099c26d7c75fe
3
+ size 120
dummy/blogs/4.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27bf7e3c69497abb5a4b7801c99ddab72741d3abdeba18dda91bc4116077b894
3
+ size 1168926
dummy/comics/4.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cee1ba088c9da7636df48592c535e510c3dca99fa5519361607543d0ee0dc95
3
+ size 15806
dummy/court/4.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fcf32b4354df5a231586c1a92938e6b21883f66ebaf9aceb2f4e6d8b29616b2d
3
+ size 645965
dummy/eu_docs/4.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09d8367010895e2d6b1d1e443dc95a45c361dd822c789b01fa12c9d506d62f8d
3
+ size 3277738
dummy/gov_docs/4.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cef6540ca101d3f98e569eeb1d453802377f3bd970a5b08ec8d862912a414e74
3
+ size 450869
dummy/government_gazzette/4.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:805b0c0059e2905631448344fa225c95174bbc458cad056ef507e7a0905e8a50
3
+ size 1174942
dummy/law_eu/4.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:266d106c1fef4fd9c2de918bd2a782eead7296663f240355ec5f40fbc0190056
3
+ size 86164
dummy/law_mt/4.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d48a89a65c51e315999ccd03fd83c573427738c484b0e31ea989c1373b40bc3f
3
+ size 483614
dummy/legal/4.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a6ffdfe69630fe35aaeb0ebf4f128923d581677d1c8ad472d08c3784abade0f
3
+ size 161797
dummy/nonfiction/4.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b1f39ec43671806bdad22daeb2f2ff86d8bff26c24584573fdc4f009aabbe12
3
+ size 585141
dummy/parliament/4.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c76b1c5c2cb548bd1de3845e4313d95de656245b5258d0e68c9897682f65d98
3
+ size 4296478
dummy/press_eu/4.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81359f863bd8a95f4d6dc678bf8e5ec4e564d3e43843e907aa1ae26b512159cb
3
+ size 11521884
dummy/press_mt/4.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6ae80bb89ca4cf2a29a130e0c0fd17272dcea8237e2b6b7a078cd6adebdb8d8
3
+ size 6690048
dummy/speeches/4.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cffa9648aa98c76fd8e62a8d2cbcded6cc2fbd011d9921e5f73c341fdb4b81fa
3
+ size 10719
dummy/theses/4.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ca71067b6470dc5e675603c1afcdf7e401ceb2f4d8d0f942f68e8c64186c2b2
3
+ size 360337
dummy/umlib_oar/4.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b3dd47db54dc6f14c406cfa429709b86cd48bd7f95626ab0aa5396b1cfbbf09
3
+ size 1153343
dummy/web_general/4.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c52508ccdb2acaf93392f9523702187be375ba0ba04cc0b3c12e401990b07e5
3
+ size 31404337
dummy/wiki/4.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bde64c00fb26566b2344c100eacb649d0a17019aeb11cef70e4694d21a7d003
3
+ size 125641
korpus_malti.py CHANGED
@@ -1,4 +1,6 @@
 
1
  import os
 
2
 
3
  import datasets
4
 
@@ -14,6 +16,7 @@ _SHUFFLED_URL = {
14
  "validation": os.path.join(_URL, "shuffled/validation.txt"),
15
  "test": os.path.join(_URL, "shuffled/test.txt"),
16
  }
 
17
 
18
 
19
  class KorpusMalti(datasets.GeneratorBasedBuilder):
@@ -28,6 +31,82 @@ class KorpusMalti(datasets.GeneratorBasedBuilder):
28
  version=VERSION,
29
  description="The shuffled data from all subsets.",
30
  ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  ]
32
 
33
  def _info(self):
@@ -35,6 +114,10 @@ class KorpusMalti(datasets.GeneratorBasedBuilder):
35
  features = {
36
  "text": datasets.Value("string"),
37
  }
 
 
 
 
38
 
39
  return datasets.DatasetInfo(
40
  description=_DESCRIPTION,
@@ -65,6 +148,21 @@ class KorpusMalti(datasets.GeneratorBasedBuilder):
65
  },
66
  ),
67
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
  return data_split
70
 
@@ -76,3 +174,13 @@ class KorpusMalti(datasets.GeneratorBasedBuilder):
76
  yield key, {
77
  "text": line,
78
  }
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
  import os
3
+ from pathlib import Path
4
 
5
  import datasets
6
 
 
16
  "validation": os.path.join(_URL, "shuffled/validation.txt"),
17
  "test": os.path.join(_URL, "shuffled/test.txt"),
18
  }
19
+ _SUBSET_URL_PATTERN = "{}/**/*.jsonl"
20
 
21
 
22
  class KorpusMalti(datasets.GeneratorBasedBuilder):
 
31
  version=VERSION,
32
  description="The shuffled data from all subsets.",
33
  ),
34
+ datasets.BuilderConfig(name="belles_lettres",
35
+ version=VERSION,
36
+ description="Literary texts, usually published and included in the corpus by permission of the copyright holder. Unfortunately these cannot be disseminated in their integral form.",
37
+ ),
38
+ datasets.BuilderConfig(name="blogs",
39
+ version=VERSION,
40
+ description="Online blog articles from specific blogs, identified in advance and known to contain text written (or human-translated into) Maltese.",
41
+ ),
42
+ datasets.BuilderConfig(name="comics",
43
+ version=VERSION,
44
+ description="A small set of online information about comic books in Maltese.",
45
+ ),
46
+ datasets.BuilderConfig(name="court",
47
+ version=VERSION,
48
+ description="Publicly available proceedings form the courts of Malta.",
49
+ ),
50
+ datasets.BuilderConfig(name="eu_docs",
51
+ version=VERSION,
52
+ description="Miscellaneous policy documents from the European Union institutions.",
53
+ ),
54
+ datasets.BuilderConfig(name="gov_docs",
55
+ version=VERSION,
56
+ description="Miscellaneous policy documents from the Government of Malta.",
57
+ ),
58
+ datasets.BuilderConfig(name="government_gazzette",
59
+ version=VERSION,
60
+ description="The official, publicly available gazette of the Government of Malta. The gazzette is bilingual; only the Maltese text is included.",
61
+ ),
62
+ datasets.BuilderConfig(name="law_eu",
63
+ version=VERSION,
64
+ description="Miscellaneous EU laws in their official Maltese translation, obtained via the Eur-Lex repository and including the segments of the Acquis Communautaire available in the DGT translation memory.",
65
+ ),
66
+ datasets.BuilderConfig(name="law_mt",
67
+ version=VERSION,
68
+ description="Maltese laws.",
69
+ ),
70
+ datasets.BuilderConfig(name="legal",
71
+ version=VERSION,
72
+ description="Miscellaneous legal text.",
73
+ ),
74
+ datasets.BuilderConfig(name="nonfiction",
75
+ version=VERSION,
76
+ description="Miscellaneous nonfiction, published or unpublished. Published texts are included with the permission of the copyright holder, where relevant.",
77
+ ),
78
+ datasets.BuilderConfig(name="parliament",
79
+ version=VERSION,
80
+ description="The officially released transcripts of parliamentary debates of the Maltese parliament.",
81
+ ),
82
+ datasets.BuilderConfig(name="press_eu",
83
+ version=VERSION,
84
+ description="Press releases in Maltese by the European Council of Ministers, European Parliament and European Commission.",
85
+ ),
86
+ datasets.BuilderConfig(name="press_mt",
87
+ version=VERSION,
88
+ description="Articles in the Maltese press, sourced primarily from the online portals of Maltese newspapers.",
89
+ ),
90
+ datasets.BuilderConfig(name="speeches",
91
+ version=VERSION,
92
+ description="Miscellaneous speeches in Maltese (pre-written).",
93
+ ),
94
+ datasets.BuilderConfig(name="theses",
95
+ version=VERSION,
96
+ description="Academic dissertations written in Maltese.",
97
+ ),
98
+ datasets.BuilderConfig(name="umlib_oar",
99
+ version=VERSION,
100
+ description="Very broad variety of nonfiction texts which are publicly available in the University of Malta Open Access Repository. Included with help and permission from the University of Malta library.",
101
+ ),
102
+ datasets.BuilderConfig(name="web_general",
103
+ version=VERSION,
104
+ description="Miscellaneous text scraped from pre-identified web pages in Maltese.",
105
+ ),
106
+ datasets.BuilderConfig(name="wiki",
107
+ version=VERSION,
108
+ description="The Maltese Wikipedia dump (downloaded 26th May, 2020).",
109
+ ),
110
  ]
111
 
112
  def _info(self):
 
114
  features = {
115
  "text": datasets.Value("string"),
116
  }
117
+ else:
118
+ features = {
119
+ "text": datasets.Sequence(datasets.Value("string")),
120
+ }
121
 
122
  return datasets.DatasetInfo(
123
  description=_DESCRIPTION,
 
148
  },
149
  ),
150
  ]
151
+ else:
152
+ file_pattern = _SUBSET_URL_PATTERN.format(self.config.name)
153
+ base_path = self.base_path or ""
154
+ file_paths = [path.relative_to(base_path)
155
+ for path in Path(os.path.join(base_path, _URL)).glob(file_pattern)]
156
+
157
+ data_files = dl_manager.download_and_extract(file_paths)
158
+ data_split = [
159
+ datasets.SplitGenerator(
160
+ name=datasets.Split.TRAIN,
161
+ gen_kwargs={
162
+ "filepath": data_files,
163
+ },
164
+ ),
165
+ ]
166
 
167
  return data_split
168
 
 
174
  yield key, {
175
  "text": line,
176
  }
177
+ else:
178
+ key = 0
179
+ for path in filepath:
180
+ with open(path, encoding="utf-8") as file:
181
+ for line in file:
182
+ data = json.loads(line)
183
+ yield key, {
184
+ "text": data["text"],
185
+ }
186
+ key += 1