Datasets:
rcds
/

Modalities:
Text
Formats:
json
ArXiv:
Libraries:
Datasets
pandas
License:
joelniklaus commited on
Commit
96283e3
1 Parent(s): 1f8922b

Delete swiss_legislation.py

Browse files
Files changed (1) hide show
  1. swiss_legislation.py +0 -114
swiss_legislation.py DELETED
@@ -1,114 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import json
16
- import lzma
17
- import os
18
-
19
- import datasets
20
- try:
21
- import lzma as xz
22
- except ImportError:
23
- import pylzma as xz
24
-
25
-
26
- # TODO: Add BibTeX citation
27
- # Find for instance the citation on arxiv or on the dataset repo/website
28
- _CITATION = """\
29
- @InProceedings{huggingface:dataset,
30
- title = {A great new dataset},
31
- author={huggingface, Inc.
32
- },
33
- year={2020}
34
- }
35
- """
36
-
37
- # You can copy an official description
38
- _DESCRIPTION = """\
39
- This dataset contains Swiss law articles
40
- """
41
-
42
- _URLS = {
43
- "full": "https://huggingface.co/datasets/rcds/swiss_legislation/resolve/main",
44
- }
45
-
46
-
47
- class SwissLegilation(datasets.GeneratorBasedBuilder):
48
- """This dataset contains court decision for doc2doc information retrieval task."""
49
-
50
-
51
- BUILDER_CONFIGS = [
52
- datasets.BuilderConfig(name="full", description="This part covers the whole dataset"),
53
- ]
54
-
55
- DEFAULT_CONFIG_NAME = "full" # It's not mandatory to have a default configuration. Just use one if it make sense.
56
-
57
- def _info(self):
58
- if self.config.name == "full":
59
- features = datasets.Features(
60
- {
61
- "canton": datasets.Value("string"),
62
- "language": datasets.Value("string"),
63
- "uuid": datasets.Value("string"),
64
- "title": datasets.Value("string"),
65
- "short": datasets.Value("string"),
66
- "abbreviation": datasets.Value("string"),
67
- "sr_number": datasets.Value("string"),
68
- "pdf_content": datasets.Value("string")
69
- }
70
- )
71
- return datasets.DatasetInfo(
72
- description=_DESCRIPTION,
73
- features=features, # Here we define them above because they are different between the two configurations
74
- )
75
-
76
- def _split_generators(self, dl_manager):
77
- urls = _URLS[self.config.name]
78
- filepath = dl_manager.download(os.path.join(urls, "lexfind_v2.jsonl.xz"))
79
- return [
80
- datasets.SplitGenerator(
81
- name=datasets.Split.TRAIN,
82
- # These kwargs will be passed to _generate_examples
83
- gen_kwargs={
84
- "filepath": filepath,
85
- "split": "train",
86
- },
87
- )
88
- ]
89
-
90
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
91
- def _generate_examples(self, filepath, split):
92
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
93
- line_counter = 0
94
- try:
95
- with xz.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
96
- for id, line in enumerate(f):
97
- line_counter += 1
98
- if line:
99
- data = json.loads(line)
100
- if self.config.name == "full":
101
- yield id, {
102
- "canton": data["canton"],
103
- "language": data["language"],
104
- "uuid": data["uuid"],
105
- "short": data["short"],
106
- "title": data["title"],
107
- "abbreviation": data["abbreviation"],
108
- "sr_number": data["sr_number"],
109
- "pdf_content": data["pdf_content"],
110
- }
111
- except lzma.LZMAError as e:
112
- print(split, e)
113
- if line_counter == 0:
114
- raise e