JP-SystemsX
commited on
Commit
·
e7849ae
1
Parent(s):
e26c06b
Update super_eurlex.py
Browse files- super_eurlex.py +24 -16
super_eurlex.py
CHANGED
@@ -12,18 +12,12 @@
|
|
12 |
# See the License for the specific language governing permissions and
|
13 |
# limitations under the License.
|
14 |
# TODO: Address all TODOs and remove all explanatory comments
|
15 |
-
"""
|
16 |
-
|
17 |
-
|
18 |
-
import csv
|
19 |
-
import json
|
20 |
-
import os
|
21 |
|
22 |
import numpy as np
|
23 |
import pandas as pd
|
24 |
|
25 |
import datasets
|
26 |
-
from tqdm.auto import tqdm
|
27 |
|
28 |
|
29 |
# TODO: Add BibTeX citation
|
@@ -32,7 +26,28 @@ _CITATION = """ """
|
|
32 |
|
33 |
# TODO: Add description of the dataset here
|
34 |
# You can copy an official description
|
35 |
-
_DESCRIPTION = """
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
# TODO: Add a link to an official homepage for the dataset here
|
38 |
_HOMEPAGE = ""
|
@@ -40,13 +55,6 @@ _HOMEPAGE = ""
|
|
40 |
# TODO: Add the licence for the dataset here if you can find it
|
41 |
_LICENSE = ""
|
42 |
|
43 |
-
# TODO: Add link to the official dataset URLs here
|
44 |
-
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
45 |
-
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
46 |
-
_URLS = {
|
47 |
-
"first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
|
48 |
-
"second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
|
49 |
-
}
|
50 |
AVAILABLE_LANGUAGES=['DE']#, 'EN'
|
51 |
SECTORS=['0', '1', '2', '3', '4', '5', '6', '8', '9', 'C', 'E']#'7',
|
52 |
|
@@ -386,7 +394,7 @@ class SuperEurlex(datasets.GeneratorBasedBuilder):
|
|
386 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
387 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
388 |
urls = {'text': self.config.text_data_url,
|
389 |
-
'meta': self.config.meta_data_url}
|
390 |
try:
|
391 |
data_dir = dl_manager.download_and_extract(urls)
|
392 |
except FileNotFoundError:
|
|
|
12 |
# See the License for the specific language governing permissions and
|
13 |
# limitations under the License.
|
14 |
# TODO: Address all TODOs and remove all explanatory comments
|
15 |
+
"""Super-EURLEX dataset containing legal documents from multiple languages"""
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
import numpy as np
|
18 |
import pandas as pd
|
19 |
|
20 |
import datasets
|
|
|
21 |
|
22 |
|
23 |
# TODO: Add BibTeX citation
|
|
|
26 |
|
27 |
# TODO: Add description of the dataset here
|
28 |
# You can copy an official description
|
29 |
+
_DESCRIPTION = """
|
30 |
+
Super-EURLEX dataset containing legal documents from multiple languages.
|
31 |
+
The datasets are build/scrapped from the EURLEX Website [https://eur-lex.europa.eu/homepage.html]
|
32 |
+
With one split per language and sector, because the available features (metadata) differs for each
|
33 |
+
sector. Therefore, each sample contains the content of a full legal document in up to 3 different
|
34 |
+
formats. Those are raw HTML and cleaned HTML (if the HTML format was available on the EURLEX website
|
35 |
+
during the scrapping process) and cleaned text.
|
36 |
+
The cleaned text should be available for each sample and was extracted from HTML or PDF.
|
37 |
+
'Cleaned' HTML stands here for minor cleaning that was done to preserve to a large extent the necessary
|
38 |
+
HTML information like table structures while removing unnecessary complexity which was introduced to the
|
39 |
+
original documents due to actions like writing each sentence into a new object.
|
40 |
+
Additionally, each sample contains metadata which was scrapped on the fly, this implies the following
|
41 |
+
2 things. First, not every sector contains the same metadata. Second, most metadata might be
|
42 |
+
irrelevant for most use cases.
|
43 |
+
In our minds the most interesting metadata is the celex-id which is used to identify the legal
|
44 |
+
document at hand, but also contains a lot of information about the document
|
45 |
+
see [https://eur-lex.europa.eu/content/tools/eur-lex-celex-infographic-A3.pdf] as well as eurovoc-
|
46 |
+
concepts, which are labels that define the content of the documents.
|
47 |
+
Eurovoc-Concepts are, for example, only available for the sectors 1, 2, 3, 4, 5, 6, 9, C, and E.
|
48 |
+
The Naming of most metadata is kept like it was on the eurlex website, except for converting
|
49 |
+
it to lower case and replacing whitespaces with '_'.
|
50 |
+
"""
|
51 |
|
52 |
# TODO: Add a link to an official homepage for the dataset here
|
53 |
_HOMEPAGE = ""
|
|
|
55 |
# TODO: Add the licence for the dataset here if you can find it
|
56 |
_LICENSE = ""
|
57 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
AVAILABLE_LANGUAGES=['DE']#, 'EN'
|
59 |
SECTORS=['0', '1', '2', '3', '4', '5', '6', '8', '9', 'C', 'E']#'7',
|
60 |
|
|
|
394 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
395 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
396 |
urls = {'text': self.config.text_data_url,
|
397 |
+
'meta': self.config.meta_data_url}
|
398 |
try:
|
399 |
data_dir = dl_manager.download_and_extract(urls)
|
400 |
except FileNotFoundError:
|