Datasets:
rcds
/

Nina Baumgartner commited on
Commit
f64fd5e
·
1 Parent(s): 235eb4f

Changes to loading script, adapted from sjp

Browse files
occlusion_swiss_judgment_prediction.py CHANGED
@@ -14,11 +14,11 @@
14
  """Dataset for the Occlusion task."""
15
 
16
  import json
17
- import lzma
18
- import os
19
 
20
  import datasets
21
 
 
 
22
  try:
23
  import lzma as xz
24
  except ImportError:
@@ -43,122 +43,121 @@ This dataset contains an implementation of occlusion for the SwissJudgmentPredic
43
  """
44
  _LICENSE = "cc-by-sa-4.0"
45
 
 
 
 
 
 
 
 
 
46
  _URLS = {
47
- "full": "https://huggingface.co/datasets/rcds/occlusion_swiss_judgment_prediction"
 
 
 
48
  }
49
 
50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  class OcclusionSwissJudgmentPrediction(datasets.GeneratorBasedBuilder):
52
- """This dataset contains court decision for the occlusion task"""
53
 
54
  VERSION = datasets.Version("1.1.0")
55
-
56
  BUILDER_CONFIGS = [
57
- datasets.BuilderConfig(name="full", description="This part covers the whole dataset"),
58
- ]
59
-
60
- DEFAULT_CONFIG_NAME = "full" # It's not mandatory to have a default configuration. Just use one if it make sense.
 
 
 
 
 
 
 
 
 
 
 
61
 
62
  def _info(self):
63
- if self.config.name == "full": # This is the name of the configuration selected in BUILDER_CONFIGS above
64
- features = datasets.Features(
65
- {
66
- "id": datasets.Value("int32"),
67
- "year": datasets.Value("int32"),
68
- "label": datasets.Value("string"),
69
- "language": datasets.Value("string"),
70
- "region": datasets.Value("string"),
71
- "canton": datasets.Value("string"),
72
- "legal_area": datasets.Value("string"),
73
- "explainability_label": datasets.Value("string"),
74
- "occluded_text": datasets.Value("string"),
75
- "text": datasets.Value("string")
76
-
77
- }
78
- )
79
  return datasets.DatasetInfo(
80
- # This is the description that will appear on the datasets page.
81
  description=_DESCRIPTION,
82
- # This defines the different columns of the dataset and their types
83
- features=features, # Here we define them above because they are different between the two configurations
84
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
85
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
86
- # supervised_keys=("sentence", "label"),
87
- # License for the dataset if available
88
  license=_LICENSE,
89
- # Citation for the dataset
90
  citation=_CITATION,
91
  )
92
 
93
  def _split_generators(self, dl_manager):
94
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
95
-
96
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
97
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
98
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
99
- urls = _URLS[self.config.name]
100
- filepath_test_1 = dl_manager.download(os.path.join(urls, "data/test_occ_1.jsonl.xz"))
101
- filepath_test_2 = dl_manager.download(os.path.join(urls, "data/test_occ_2.jsonl.xz"))
102
- filepath_test_3 = dl_manager.download(os.path.join(urls, "data/test_occ_3.jsonl.xz"))
103
- filepath_test_4 = dl_manager.download(os.path.join(urls, "data/test_occ_4.jsonl.xz"))
104
-
105
  return [
106
  datasets.SplitGenerator(
107
  name="test_1",
108
  # These kwargs will be passed to _generate_examples
109
- gen_kwargs={
110
- "filepath": filepath_test_1
111
- },
112
  ),
113
  datasets.SplitGenerator(
114
  name="test_2",
115
  # These kwargs will be passed to _generate_examples
116
- gen_kwargs={
117
- "filepath": filepath_test_2,
118
- },
119
  ),
120
  datasets.SplitGenerator(
121
  name="test_3",
122
  # These kwargs will be passed to _generate_examples
123
- gen_kwargs={
124
- "filepath": filepath_test_3
125
- },
126
  ),
127
  datasets.SplitGenerator(
128
  name="test_4",
129
  # These kwargs will be passed to _generate_examples
130
- gen_kwargs={
131
- "filepath": filepath_test_4,
132
- },
133
- )
134
  ]
135
 
136
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
137
-
138
- def _generate_examples(self, filepath, split):
139
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
140
- line_counter = 0
141
- try:
142
- with xz.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
143
- for id, line in enumerate(f):
144
- line_counter += 1
145
- if line:
146
- data = json.loads(line)
147
- if self.config.name == "full":
148
- yield id, {
149
- "id": data["id"],
150
- "year": data["year"],
151
- "label": data["label"],
152
- "language": data["language"],
153
- "region": data["region"],
154
- "canton": data["canton"],
155
- "legal_area": data["legal_area"],
156
- "explainability_label": data["explainability_label"],
157
- "occluded_text": data["occluded_text"],
158
- "text": data["text"]
159
-
160
- }
161
- except lzma.LZMAError as e:
162
- print(split, e)
163
- if line_counter == 0:
164
- raise e
 
14
  """Dataset for the Occlusion task."""
15
 
16
  import json
 
 
17
 
18
  import datasets
19
 
20
+ logger = datasets.logging.get_logger(__name__)
21
+
22
  try:
23
  import lzma as xz
24
  except ImportError:
 
43
  """
44
  _LICENSE = "cc-by-sa-4.0"
45
 
46
+ _LANGUAGES = [
47
+ "de",
48
+ "fr",
49
+ "it",
50
+ ]
51
+
52
+ _URL = "https://huggingface.co/datasets/rcds/occlusion_swiss_judgment_prediction/tree/main/data"
53
+
54
  _URLS = {
55
+ "test_1": _URL + "test_occ_1.jsonl",
56
+ "test_2": _URL + "test_occ_2.jsonl",
57
+ "test_3": _URL + "test_occ_3.jsonl",
58
+ "test_4": _URL + "test_occ_4.jsonl",
59
  }
60
 
61
 
62
+ class OcclusionSwissJudgmentPredictionConfig(datasets.BuilderConfig):
63
+ """BuilderConfig for OcclusionSwissJudgmentPrediction."""
64
+
65
+ def __init__(self, language: str, **kwargs):
66
+ """BuilderConfig for OcclusionSwissJudgmentPrediction.
67
+ Args:
68
+ language: One of de, fr, it, or all
69
+ **kwargs: keyword arguments forwarded to super.
70
+ """
71
+ super(OcclusionSwissJudgmentPredictionConfig, self).__init__(**kwargs)
72
+ self.language = language
73
+
74
+
75
  class OcclusionSwissJudgmentPrediction(datasets.GeneratorBasedBuilder):
76
+ """This dataset contains court decision for the occlusion task in swiss judgment prediction"""
77
 
78
  VERSION = datasets.Version("1.1.0")
79
+ BUILDER_CONFIG_CLASS = OcclusionSwissJudgmentPredictionConfig
80
  BUILDER_CONFIGS = [
81
+ OcclusionSwissJudgmentPredictionConfig(
82
+ name=lang,
83
+ language=lang,
84
+ version=datasets.Version("1.1.0", ""),
85
+ description=f"Plain text import of OcclusionSwissJudgmentPrediction for the {lang} language",
86
+ )
87
+ for lang in _LANGUAGES
88
+ ] + [
89
+ OcclusionSwissJudgmentPredictionConfig(
90
+ name="all",
91
+ language="all",
92
+ version=datasets.Version("1.1.0", ""),
93
+ description="Plain text import of OcclusionSwissJudgmentPrediction for all languages",
94
+ )
95
+ ]
96
 
97
  def _info(self):
98
+ features = datasets.Features(
99
+ {
100
+ "id": datasets.Value("int32"),
101
+ "year": datasets.Value("int32"),
102
+ "label": datasets.Value("string"),
103
+ "language": datasets.Value("string"),
104
+ "region": datasets.Value("string"),
105
+ "canton": datasets.Value("string"),
106
+ "legal_area": datasets.Value("string"),
107
+ "explainability_label": datasets.Value("string"),
108
+ "occluded_text": datasets.Value("string"),
109
+ "text": datasets.Value("string")
110
+
111
+ }
112
+ )
 
113
  return datasets.DatasetInfo(
 
114
  description=_DESCRIPTION,
115
+ features=features,
116
+ supervised_keys=None,
117
+ homepage="https://github.com/ninabaumgartner/SwissCourtRulingCorpus",
 
 
 
118
  license=_LICENSE,
 
119
  citation=_CITATION,
120
  )
121
 
122
  def _split_generators(self, dl_manager):
123
+ # dl_manager is a datasets.download.DownloadManager that can be used to
124
+ # download and extract URLs
125
+ try:
126
+ dl_dir = dl_manager.download(_URLS)
127
+ except Exception:
128
+ logger.warning(
129
+ "If this download failed try a few times before reporting an issue"
130
+ )
131
+ raise
 
 
132
  return [
133
  datasets.SplitGenerator(
134
  name="test_1",
135
  # These kwargs will be passed to _generate_examples
136
+ gen_kwargs={"filepath": dl_dir["test_1"]},
 
 
137
  ),
138
  datasets.SplitGenerator(
139
  name="test_2",
140
  # These kwargs will be passed to _generate_examples
141
+ gen_kwargs={"filepath": dl_dir["test_2"]},
 
 
142
  ),
143
  datasets.SplitGenerator(
144
  name="test_3",
145
  # These kwargs will be passed to _generate_examples
146
+ gen_kwargs={"filepath": dl_dir["test_3"]},
 
 
147
  ),
148
  datasets.SplitGenerator(
149
  name="test_4",
150
  # These kwargs will be passed to _generate_examples
151
+ gen_kwargs={"filepath": dl_dir["test_4"]},
152
+ ),
 
 
153
  ]
154
 
155
+ def _generate_examples(self, filepath):
156
+ """This function returns the examples in the raw (text) form."""
157
+ if self.config.language in ["all"] + _LANGUAGES:
158
+ with open(filepath, encoding="utf-8") as f:
159
+ for id_, row in enumerate(f):
160
+ data = json.loads(row)
161
+ _ = data.setdefault("source_language", "n/a")
162
+ if self.config.language in ["all"] or data["language"] == self.config.language:
163
+ yield id_, data