Datasets:

Languages:
English
License:
Lucia Zheng commited on
Commit
4246981
·
1 Parent(s): c44a7cf

Public MBE subset of Bar Exam QA

Browse files
.gitattributes CHANGED
@@ -56,3 +56,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
56
  # Video files - compressed
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  # Video files - compressed
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
59
+ data/qa/qa.csv filter=lfs diff=lfs merge=lfs -text
60
+ data/qa/test.csv filter=lfs diff=lfs merge=lfs -text
61
+ data/qa/train.csv filter=lfs diff=lfs merge=lfs -text
62
+ data/qa/validation.csv filter=lfs diff=lfs merge=lfs -text
63
+ data/passages/passages.tsv filter=lfs diff=lfs merge=lfs -text
64
+ data/passages/test.tsv filter=lfs diff=lfs merge=lfs -text
65
+ data/passages/train.tsv filter=lfs diff=lfs merge=lfs -text
66
+ data/passages/validation.tsv filter=lfs diff=lfs merge=lfs -text
67
+ data/passages/validation.tsv.zip filter=lfs diff=lfs merge=lfs -text
68
+ data/passages/passages.tsv.zip filter=lfs diff=lfs merge=lfs -text
69
+ data/passages/test.tsv.zip filter=lfs diff=lfs merge=lfs -text
70
+ data/passages/train.tsv.zip filter=lfs diff=lfs merge=lfs -text
data/passages/passages.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0f2af137d64dfaed29952a05f8ac1370d2f0fd0ca0c7bf5a773135d4141d3cd
3
+ size 558636463
data/passages/passages.tsv.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba53a01c34f9cfdadf13bd1c6079b49f63b8baeda366cd0e4232ba2a81c6778f
3
+ size 177521801
data/passages/test.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a3ce28edc0bd16f23b98dc244e7e9493df1908759b3a4ba3cb544dea49e6ba0
3
+ size 55782786
data/passages/test.tsv.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fe7f02e8270f96b0af94d7b77def636f8b00fdbf6954db0d0c33f185105f029
3
+ size 21104815
data/passages/train.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a0eff74a31466da9953ff9bdfc4e5dde7f5d7c7e69bbbc3d861ecd39bcf8834
3
+ size 447902075
data/passages/train.tsv.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4a6dd2498d2a2bdc7cebfe3986a60fa950e268a564e32018335c98394433e19
3
+ size 169452208
data/passages/validation.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc6362d4c415532660604cb070f4411ca5c89995c3499a8e96c4f9dcae759b89
3
+ size 54947161
data/passages/validation.tsv.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e3af207a25fe7db5ab8d60fa252fc6667ab00b8b21547c5d3361cfb3228e21a
3
+ size 20804593
data/qa/qa.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01589a7c02740cd525da73a54a599dff66a4999864e66d1c803405e338e193be
3
+ size 2401088
data/qa/test.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc0cfd5253ae837e447e70d2ee22645b83bee43c383384f5e1882f72f9279c4b
3
+ size 225178
data/qa/train.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91f8a3eb1f872f81e9c420241b6e44b1ba0c6592378708f1c947d575e28d9e8a
3
+ size 1930776
data/qa/validation.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25faae1290920494fe25d0eccf2487ee9a32f48475fd0be697869b50c9371129
3
+ size 244900
mbe.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import datasets
16
+ import pandas as pd
17
+
18
+
19
+ _CITATION = """"""
20
+ _DESCRIPTION = """"""
21
+ _HOMEPAGE = ""
22
+ _LICENSE = ""
23
+ _URLS = {
24
+ "qa": {
25
+ "train": "data/qa/train.csv",
26
+ "validation": "data/qa/validation.csv",
27
+ "test": "data/qa/test.csv",
28
+ "all": "data/qa/qa.csv",
29
+ },
30
+ "passages": {
31
+ "train": "data/passages/train.tsv",
32
+ "validation": "data/passages/validation.tsv",
33
+ "test": "data/passages/test.tsv",
34
+ "all": "data/passages/passages.tsv"
35
+ },
36
+ }
37
+
38
+ _CONFIGS = {}
39
+
40
+ _CONFIGS["qa"] = {
41
+ "description": "Answer bar exam questions",
42
+ "features": {
43
+ "idx": datasets.Value("string"),
44
+ "dataset": datasets.Value("string"),
45
+ "example_id": datasets.Value("string"),
46
+ "prompt_id": datasets.Value("string"),
47
+ "source": datasets.Value("string"),
48
+ "subject": datasets.Value("string"),
49
+ "question_number": datasets.Value("string"),
50
+ "prompt": datasets.Value("string"),
51
+ "question": datasets.Value("string"),
52
+ "choice_a": datasets.Value("string"),
53
+ "choice_b": datasets.Value("string"),
54
+ "choice_c": datasets.Value("string"),
55
+ "choice_d": datasets.Value("string"),
56
+ "answer": datasets.Value("string"),
57
+ "gold_passage": datasets.Value("string"),
58
+ "gold_idx": datasets.Value("string"),
59
+ },
60
+ "license": None,
61
+ }
62
+
63
+ _CONFIGS["passages"] = {
64
+ "description": "Passage corpus of bar exam question explanations, Wex definitions and primary sources, and caselaw",
65
+ "features": {
66
+ "idx": datasets.Value("string"),
67
+ "source": datasets.Value("string"),
68
+ "faiss_id": datasets.Value("string"),
69
+ "case_id": datasets.Value("string"),
70
+ "absolute_paragraph_id": datasets.Value("string"),
71
+ "opinion_id": datasets.Value("string"),
72
+ "relative_paragraph_id": datasets.Value("string"),
73
+ "text": datasets.Value("string"),
74
+ },
75
+ "license": None,
76
+ }
77
+
78
+
79
+ class BarExamQA(datasets.GeneratorBasedBuilder):
80
+ """Legal retrieval/QA dataset for the multistate bar exam"""
81
+ BUILDER_CONFIGS = [
82
+ datasets.BuilderConfig(
83
+ name=task, version=datasets.Version("1.0.0"), description=task,
84
+ )
85
+ for task in _CONFIGS
86
+ ]
87
+
88
+
89
+ def _info(self):
90
+ features = _CONFIGS[self.config.name]["features"]
91
+ return datasets.DatasetInfo(
92
+ description=_DESCRIPTION,
93
+ features=datasets.Features(features),
94
+ homepage=_HOMEPAGE,
95
+ citation=_CITATION,
96
+ license=_CONFIGS[self.config.name]["license"],
97
+ )
98
+
99
+
100
+ def _split_generators(self, dl_manager):
101
+ downloaded_file_dir = dl_manager.download_and_extract(_URLS[self.config.name])
102
+ splits = [
103
+ datasets.SplitGenerator(
104
+ name=datasets.Split.TRAIN,
105
+ gen_kwargs={
106
+ "fpath": downloaded_file_dir["train"],
107
+ "name": self.config.name,
108
+ },
109
+ ),
110
+ datasets.SplitGenerator(
111
+ name=datasets.Split.VALIDATION,
112
+ gen_kwargs={
113
+ "fpath": downloaded_file_dir["validation"],
114
+ "name": self.config.name,
115
+ },
116
+ ),
117
+ datasets.SplitGenerator(
118
+ name=datasets.Split.TEST,
119
+ gen_kwargs={
120
+ "fpath": downloaded_file_dir["test"],
121
+ "name": self.config.name,
122
+ },
123
+ ),
124
+ ]
125
+ return splits
126
+
127
+
128
+ def _generate_examples(self, fpath, name):
129
+ """Yields examples as (key, example) tuples."""
130
+ if name in ["qa"]:
131
+ data = pd.read_csv(fpath)
132
+ data = data.to_dict(orient="records")
133
+ for id_line, example in enumerate(data):
134
+ yield id_line, example
135
+
136
+ if name in ["passages"]:
137
+ data = pd.read_csv(fpath, sep='\t')
138
+ data = data.to_dict(orient="records")
139
+ for id_line, example in enumerate(data):
140
+ yield id_line, example