yuvalkirstain commited on
Commit
ac762f8
·
1 Parent(s): a369e64

make hotpotqa less weird

Browse files
Files changed (1) hide show
  1. mrqa.py +49 -44
mrqa.py CHANGED
@@ -123,38 +123,39 @@ class MRQA(datasets.GeneratorBasedBuilder):
123
  {
124
  "subset": datasets.Value("string"),
125
  "context": datasets.Value("string"),
126
- "context_tokens": datasets.Sequence(
127
- {
128
- "tokens": datasets.Value("string"),
129
- "offsets": datasets.Value("int32"),
130
- }
131
- ),
132
  "qid": datasets.Value("string"),
133
  "question": datasets.Value("string"),
134
- "question_tokens": datasets.Sequence(
135
- {
136
- "tokens": datasets.Value("string"),
137
- "offsets": datasets.Value("int32"),
138
- }
139
- ),
140
- "detected_answers": datasets.Sequence(
141
- {
142
- "text": datasets.Value("string"),
143
- "char_spans": datasets.Sequence(
144
- {
145
- "start": datasets.Value("int32"),
146
- "end": datasets.Value("int32"),
147
- }
148
- ),
149
- "token_spans": datasets.Sequence(
150
- {
151
- "start": datasets.Value("int32"),
152
- "end": datasets.Value("int32"),
153
- }
154
- ),
155
- }
156
- ),
157
  "answers": datasets.Sequence(datasets.Value("string")),
 
158
  }
159
  ),
160
  supervised_keys=None,
@@ -203,29 +204,33 @@ class MRQA(datasets.GeneratorBasedBuilder):
203
  for row in f:
204
  paragraph = json.loads(row)
205
  context = paragraph["context"].strip()
206
- context_tokens = [{"tokens": t[0], "offsets": t[1]} for t in paragraph["context_tokens"]]
 
 
 
 
207
  for qa in paragraph["qas"]:
208
  qid = qa["qid"]
209
  question = qa["question"].strip()
210
- question_tokens = [{"tokens": t[0], "offsets": t[1]} for t in qa["question_tokens"]]
211
- detected_answers = []
212
- for detect_ans in qa["detected_answers"]:
213
- detected_answers.append(
214
- {
215
- "text": detect_ans["text"].strip(),
216
- "char_spans": [{"start": t[0], "end": t[1]} for t in detect_ans["char_spans"]],
217
- "token_spans": [{"start": t[0], "end": t[1]} for t in detect_ans["token_spans"]],
218
- }
219
- )
220
  answers = qa["answers"]
221
  final_row = {
222
  "subset": subset,
223
  "context": context,
224
- "context_tokens": context_tokens,
225
  "qid": qid,
226
  "question": question,
227
- "question_tokens": question_tokens,
228
- "detected_answers": detected_answers,
229
  "answers": answers,
230
  "answer": answers[0]
231
  }
@@ -234,5 +239,5 @@ class MRQA(datasets.GeneratorBasedBuilder):
234
 
235
  if __name__ == '__main__':
236
  from datasets import load_dataset
237
- ssfd_debug = load_dataset("/Users/yuvalkirstain/repos/mrqa", name="hotpotqa")
238
  x = 5
 
123
  {
124
  "subset": datasets.Value("string"),
125
  "context": datasets.Value("string"),
126
+ # "context_tokens": datasets.Sequence(
127
+ # {
128
+ # "tokens": datasets.Value("string"),
129
+ # "offsets": datasets.Value("int32"),
130
+ # }
131
+ # ),
132
  "qid": datasets.Value("string"),
133
  "question": datasets.Value("string"),
134
+ # "question_tokens": datasets.Sequence(
135
+ # {
136
+ # "tokens": datasets.Value("string"),
137
+ # "offsets": datasets.Value("int32"),
138
+ # }
139
+ # ),
140
+ # "detected_answers": datasets.Sequence(
141
+ # {
142
+ # "text": datasets.Value("string"),
143
+ # "char_spans": datasets.Sequence(
144
+ # {
145
+ # "start": datasets.Value("int32"),
146
+ # "end": datasets.Value("int32"),
147
+ # }
148
+ # ),
149
+ # "token_spans": datasets.Sequence(
150
+ # {
151
+ # "start": datasets.Value("int32"),
152
+ # "end": datasets.Value("int32"),
153
+ # }
154
+ # ),
155
+ # }
156
+ # ),
157
  "answers": datasets.Sequence(datasets.Value("string")),
158
+ "answer": datasets.Value("string"),
159
  }
160
  ),
161
  supervised_keys=None,
 
204
  for row in f:
205
  paragraph = json.loads(row)
206
  context = paragraph["context"].strip()
207
+ if subset == "HotpotQA":
208
+ context = context.replace("[PAR] ", "\n\n")
209
+ context = context.replace("[TLE]", "Title:")
210
+ context = context.replace("[SEP]", "\nPassage:").strip()
211
+ # context_tokens = [{"tokens": t[0], "offsets": t[1]} for t in paragraph["context_tokens"]]
212
  for qa in paragraph["qas"]:
213
  qid = qa["qid"]
214
  question = qa["question"].strip()
215
+ # question_tokens = [{"tokens": t[0], "offsets": t[1]} for t in qa["question_tokens"]]
216
+ # detected_answers = []
217
+ # for detect_ans in qa["detected_answers"]:
218
+ # detected_answers.append(
219
+ # {
220
+ # "text": detect_ans["text"].strip(),
221
+ # "char_spans": [{"start": t[0], "end": t[1]} for t in detect_ans["char_spans"]],
222
+ # "token_spans": [{"start": t[0], "end": t[1]} for t in detect_ans["token_spans"]],
223
+ # }
224
+ # )
225
  answers = qa["answers"]
226
  final_row = {
227
  "subset": subset,
228
  "context": context,
229
+ # "context_tokens": context_tokens,
230
  "qid": qid,
231
  "question": question,
232
+ # "question_tokens": question_tokens,
233
+ # "detected_answers": detected_answers,
234
  "answers": answers,
235
  "answer": answers[0]
236
  }
 
239
 
240
  if __name__ == '__main__':
241
  from datasets import load_dataset
242
+ ssfd_debug = load_dataset("/Users/yuvalkirstain/repos/mrqa", name="newsqa")
243
  x = 5