Pradeep Kumar commited on
Commit
0b89b7a
·
verified ·
1 Parent(s): 7496baf

Delete squad_lib.py

Browse files
Files changed (1) hide show
  1. squad_lib.py +0 -975
squad_lib.py DELETED
@@ -1,975 +0,0 @@
1
- # Copyright 2024 The TensorFlow Authors. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- """Library to process data for SQuAD 1.1 and SQuAD 2.0."""
16
- # pylint: disable=g-bad-import-order
17
- import collections
18
- import copy
19
- import json
20
- import math
21
- import os
22
-
23
- import six
24
-
25
- from absl import logging
26
- import tensorflow as tf, tf_keras
27
-
28
- from official.nlp.tools import tokenization
29
-
30
-
31
- class SquadExample(object):
32
- """A single training/test example for simple sequence classification.
33
-
34
- For examples without an answer, the start and end position are -1.
35
-
36
- Attributes:
37
- qas_id: ID of the question-answer pair.
38
- question_text: Original text for the question.
39
- doc_tokens: The list of tokens in the context obtained by splitting on
40
- whitespace only.
41
- orig_answer_text: Original text for the answer.
42
- start_position: Starting index of the answer in `doc_tokens`.
43
- end_position: Ending index of the answer in `doc_tokens`.
44
- is_impossible: Whether the question is impossible to answer given the
45
- context. Only used in SQuAD 2.0.
46
- """
47
-
48
- def __init__(self,
49
- qas_id,
50
- question_text,
51
- doc_tokens,
52
- orig_answer_text=None,
53
- start_position=None,
54
- end_position=None,
55
- is_impossible=False):
56
- self.qas_id = qas_id
57
- self.question_text = question_text
58
- self.doc_tokens = doc_tokens
59
- self.orig_answer_text = orig_answer_text
60
- self.start_position = start_position
61
- self.end_position = end_position
62
- self.is_impossible = is_impossible
63
-
64
- def __str__(self):
65
- return self.__repr__()
66
-
67
- def __repr__(self):
68
- s = ""
69
- s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
70
- s += ", question_text: %s" % (
71
- tokenization.printable_text(self.question_text))
72
- s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
73
- if self.start_position:
74
- s += ", start_position: %d" % (self.start_position)
75
- if self.start_position:
76
- s += ", end_position: %d" % (self.end_position)
77
- if self.start_position:
78
- s += ", is_impossible: %r" % (self.is_impossible)
79
- return s
80
-
81
-
82
- class InputFeatures(object):
83
- """A single set of features of data."""
84
-
85
- def __init__(self,
86
- unique_id,
87
- example_index,
88
- doc_span_index,
89
- tokens,
90
- token_to_orig_map,
91
- token_is_max_context,
92
- input_ids,
93
- input_mask,
94
- segment_ids,
95
- paragraph_mask=None,
96
- class_index=None,
97
- start_position=None,
98
- end_position=None,
99
- is_impossible=None):
100
- self.unique_id = unique_id
101
- self.example_index = example_index
102
- self.doc_span_index = doc_span_index
103
- self.tokens = tokens
104
- self.token_to_orig_map = token_to_orig_map
105
- self.token_is_max_context = token_is_max_context
106
- self.input_ids = input_ids
107
- self.input_mask = input_mask
108
- self.segment_ids = segment_ids
109
- self.start_position = start_position
110
- self.end_position = end_position
111
- self.is_impossible = is_impossible
112
- self.paragraph_mask = paragraph_mask
113
- self.class_index = class_index
114
-
115
-
116
- class FeatureWriter(object):
117
- """Writes InputFeature to TF example file."""
118
-
119
- def __init__(self, filename, is_training):
120
- self.filename = filename
121
- self.is_training = is_training
122
- self.num_features = 0
123
- tf.io.gfile.makedirs(os.path.dirname(filename))
124
- self._writer = tf.io.TFRecordWriter(filename)
125
-
126
- def process_feature(self, feature):
127
- """Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
128
- self.num_features += 1
129
-
130
- def create_int_feature(values):
131
- feature = tf.train.Feature(
132
- int64_list=tf.train.Int64List(value=list(values)))
133
- return feature
134
-
135
- features = collections.OrderedDict()
136
- features["unique_ids"] = create_int_feature([feature.unique_id])
137
- features["input_ids"] = create_int_feature(feature.input_ids)
138
- features["input_mask"] = create_int_feature(feature.input_mask)
139
- features["segment_ids"] = create_int_feature(feature.segment_ids)
140
-
141
- if feature.paragraph_mask is not None:
142
- features["paragraph_mask"] = create_int_feature(feature.paragraph_mask)
143
- if feature.class_index is not None:
144
- features["class_index"] = create_int_feature([feature.class_index])
145
-
146
- if self.is_training:
147
- features["start_positions"] = create_int_feature([feature.start_position])
148
- features["end_positions"] = create_int_feature([feature.end_position])
149
- impossible = 0
150
- if feature.is_impossible:
151
- impossible = 1
152
- features["is_impossible"] = create_int_feature([impossible])
153
-
154
- tf_example = tf.train.Example(features=tf.train.Features(feature=features))
155
- self._writer.write(tf_example.SerializeToString())
156
-
157
- def close(self):
158
- self._writer.close()
159
-
160
-
161
- def read_squad_examples(input_file, is_training,
162
- version_2_with_negative,
163
- translated_input_folder=None):
164
- """Read a SQuAD json file into a list of SquadExample."""
165
- with tf.io.gfile.GFile(input_file, "r") as reader:
166
- input_data = json.load(reader)["data"]
167
-
168
- if translated_input_folder is not None:
169
- translated_files = tf.io.gfile.glob(
170
- os.path.join(translated_input_folder, "*.json"))
171
- for file in translated_files:
172
- with tf.io.gfile.GFile(file, "r") as reader:
173
- input_data.extend(json.load(reader)["data"])
174
-
175
- def is_whitespace(c):
176
- if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
177
- return True
178
- return False
179
-
180
- examples = []
181
- for entry in input_data:
182
- for paragraph in entry["paragraphs"]:
183
- paragraph_text = paragraph["context"]
184
- doc_tokens = []
185
- char_to_word_offset = []
186
- prev_is_whitespace = True
187
- for c in paragraph_text:
188
- if is_whitespace(c):
189
- prev_is_whitespace = True
190
- else:
191
- if prev_is_whitespace:
192
- doc_tokens.append(c)
193
- else:
194
- doc_tokens[-1] += c
195
- prev_is_whitespace = False
196
- char_to_word_offset.append(len(doc_tokens) - 1)
197
-
198
- for qa in paragraph["qas"]:
199
- qas_id = qa["id"]
200
- question_text = qa["question"]
201
- start_position = None
202
- end_position = None
203
- orig_answer_text = None
204
- is_impossible = False
205
- if is_training:
206
-
207
- if version_2_with_negative:
208
- is_impossible = qa["is_impossible"]
209
- if (len(qa["answers"]) != 1) and (not is_impossible):
210
- raise ValueError(
211
- "For training, each question should have exactly 1 answer.")
212
- if not is_impossible:
213
- answer = qa["answers"][0]
214
- orig_answer_text = answer["text"]
215
- answer_offset = answer["answer_start"]
216
- answer_length = len(orig_answer_text)
217
- start_position = char_to_word_offset[answer_offset]
218
- end_position = char_to_word_offset[answer_offset + answer_length -
219
- 1]
220
- # Only add answers where the text can be exactly recovered from the
221
- # document. If this CAN'T happen it's likely due to weird Unicode
222
- # stuff so we will just skip the example.
223
- #
224
- # Note that this means for training mode, every example is NOT
225
- # guaranteed to be preserved.
226
- actual_text = " ".join(doc_tokens[start_position:(end_position +
227
- 1)])
228
- cleaned_answer_text = " ".join(
229
- tokenization.whitespace_tokenize(orig_answer_text))
230
- if actual_text.find(cleaned_answer_text) == -1:
231
- logging.warning("Could not find answer: '%s' vs. '%s'",
232
- actual_text, cleaned_answer_text)
233
- continue
234
- else:
235
- start_position = -1
236
- end_position = -1
237
- orig_answer_text = ""
238
-
239
- example = SquadExample(
240
- qas_id=qas_id,
241
- question_text=question_text,
242
- doc_tokens=doc_tokens,
243
- orig_answer_text=orig_answer_text,
244
- start_position=start_position,
245
- end_position=end_position,
246
- is_impossible=is_impossible)
247
- examples.append(example)
248
-
249
- return examples
250
-
251
-
252
- def convert_examples_to_features(examples,
253
- tokenizer,
254
- max_seq_length,
255
- doc_stride,
256
- max_query_length,
257
- is_training,
258
- output_fn,
259
- xlnet_format=False,
260
- batch_size=None):
261
- """Loads a data file into a list of `InputBatch`s."""
262
-
263
- base_id = 1000000000
264
- unique_id = base_id
265
- feature = None
266
- for (example_index, example) in enumerate(examples):
267
- query_tokens = tokenizer.tokenize(example.question_text)
268
-
269
- if len(query_tokens) > max_query_length:
270
- query_tokens = query_tokens[0:max_query_length]
271
-
272
- tok_to_orig_index = []
273
- orig_to_tok_index = []
274
- all_doc_tokens = []
275
- for (i, token) in enumerate(example.doc_tokens):
276
- orig_to_tok_index.append(len(all_doc_tokens))
277
- sub_tokens = tokenizer.tokenize(token)
278
- for sub_token in sub_tokens:
279
- tok_to_orig_index.append(i)
280
- all_doc_tokens.append(sub_token)
281
-
282
- tok_start_position = None
283
- tok_end_position = None
284
- if is_training and example.is_impossible:
285
- tok_start_position = -1
286
- tok_end_position = -1
287
- if is_training and not example.is_impossible:
288
- tok_start_position = orig_to_tok_index[example.start_position]
289
- if example.end_position < len(example.doc_tokens) - 1:
290
- tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
291
- else:
292
- tok_end_position = len(all_doc_tokens) - 1
293
- (tok_start_position, tok_end_position) = _improve_answer_span(
294
- all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
295
- example.orig_answer_text)
296
-
297
- # The -3 accounts for [CLS], [SEP] and [SEP]
298
- max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
299
-
300
- # We can have documents that are longer than the maximum sequence length.
301
- # To deal with this we do a sliding window approach, where we take chunks
302
- # of the up to our max length with a stride of `doc_stride`.
303
- _DocSpan = collections.namedtuple( # pylint: disable=invalid-name
304
- "DocSpan", ["start", "length"])
305
- doc_spans = []
306
- start_offset = 0
307
- while start_offset < len(all_doc_tokens):
308
- length = len(all_doc_tokens) - start_offset
309
- if length > max_tokens_for_doc:
310
- length = max_tokens_for_doc
311
- doc_spans.append(_DocSpan(start=start_offset, length=length))
312
- if start_offset + length == len(all_doc_tokens):
313
- break
314
- start_offset += min(length, doc_stride)
315
-
316
- for (doc_span_index, doc_span) in enumerate(doc_spans):
317
- tokens = []
318
- token_to_orig_map = {}
319
- token_is_max_context = {}
320
- segment_ids = []
321
-
322
- # Paragraph mask used in XLNet.
323
- # 1 represents paragraph and class tokens.
324
- # 0 represents query and other special tokens.
325
- paragraph_mask = []
326
-
327
- # pylint: disable=cell-var-from-loop
328
- def process_query(seg_q):
329
- for token in query_tokens:
330
- tokens.append(token)
331
- segment_ids.append(seg_q)
332
- paragraph_mask.append(0)
333
- tokens.append("[SEP]")
334
- segment_ids.append(seg_q)
335
- paragraph_mask.append(0)
336
-
337
- def process_paragraph(seg_p):
338
- for i in range(doc_span.length):
339
- split_token_index = doc_span.start + i
340
- token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
341
-
342
- is_max_context = _check_is_max_context(doc_spans, doc_span_index,
343
- split_token_index)
344
- token_is_max_context[len(tokens)] = is_max_context
345
- tokens.append(all_doc_tokens[split_token_index])
346
- segment_ids.append(seg_p)
347
- paragraph_mask.append(1)
348
- tokens.append("[SEP]")
349
- segment_ids.append(seg_p)
350
- paragraph_mask.append(0)
351
-
352
- def process_class(seg_class):
353
- class_index = len(segment_ids)
354
- tokens.append("[CLS]")
355
- segment_ids.append(seg_class)
356
- paragraph_mask.append(1)
357
- return class_index
358
-
359
- if xlnet_format:
360
- seg_p, seg_q, seg_class, seg_pad = 0, 1, 2, 3
361
- process_paragraph(seg_p)
362
- process_query(seg_q)
363
- class_index = process_class(seg_class)
364
- else:
365
- seg_p, seg_q, seg_class, seg_pad = 1, 0, 0, 0
366
- class_index = process_class(seg_class)
367
- process_query(seg_q)
368
- process_paragraph(seg_p)
369
-
370
- input_ids = tokenizer.convert_tokens_to_ids(tokens)
371
-
372
- # The mask has 1 for real tokens and 0 for padding tokens. Only real
373
- # tokens are attended to.
374
- input_mask = [1] * len(input_ids)
375
-
376
- # Zero-pad up to the sequence length.
377
- while len(input_ids) < max_seq_length:
378
- input_ids.append(0)
379
- input_mask.append(0)
380
- segment_ids.append(seg_pad)
381
- paragraph_mask.append(0)
382
-
383
- assert len(input_ids) == max_seq_length
384
- assert len(input_mask) == max_seq_length
385
- assert len(segment_ids) == max_seq_length
386
- assert len(paragraph_mask) == max_seq_length
387
-
388
- start_position = 0
389
- end_position = 0
390
- span_contains_answer = False
391
-
392
- if is_training and not example.is_impossible:
393
- # For training, if our document chunk does not contain an annotation
394
- # we throw it out, since there is nothing to predict.
395
- doc_start = doc_span.start
396
- doc_end = doc_span.start + doc_span.length - 1
397
- span_contains_answer = (tok_start_position >= doc_start and
398
- tok_end_position <= doc_end)
399
- if span_contains_answer:
400
- doc_offset = 0 if xlnet_format else len(query_tokens) + 2
401
- start_position = tok_start_position - doc_start + doc_offset
402
- end_position = tok_end_position - doc_start + doc_offset
403
-
404
- if example_index < 20:
405
- logging.info("*** Example ***")
406
- logging.info("unique_id: %s", (unique_id))
407
- logging.info("example_index: %s", (example_index))
408
- logging.info("doc_span_index: %s", (doc_span_index))
409
- logging.info("tokens: %s",
410
- " ".join([tokenization.printable_text(x) for x in tokens]))
411
- logging.info(
412
- "token_to_orig_map: %s", " ".join([
413
- "%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)
414
- ]))
415
- logging.info(
416
- "token_is_max_context: %s", " ".join([
417
- "%d:%s" % (x, y)
418
- for (x, y) in six.iteritems(token_is_max_context)
419
- ]))
420
- logging.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
421
- logging.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
422
- logging.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
423
- logging.info("paragraph_mask: %s", " ".join(
424
- [str(x) for x in paragraph_mask]))
425
- logging.info("class_index: %d", class_index)
426
- if is_training:
427
- if span_contains_answer:
428
- answer_text = " ".join(tokens[start_position:(end_position + 1)])
429
- logging.info("start_position: %d", (start_position))
430
- logging.info("end_position: %d", (end_position))
431
- logging.info("answer: %s", tokenization.printable_text(answer_text))
432
- else:
433
- logging.info("document span doesn't contain answer")
434
-
435
- feature = InputFeatures(
436
- unique_id=unique_id,
437
- example_index=example_index,
438
- doc_span_index=doc_span_index,
439
- tokens=tokens,
440
- paragraph_mask=paragraph_mask,
441
- class_index=class_index,
442
- token_to_orig_map=token_to_orig_map,
443
- token_is_max_context=token_is_max_context,
444
- input_ids=input_ids,
445
- input_mask=input_mask,
446
- segment_ids=segment_ids,
447
- start_position=start_position,
448
- end_position=end_position,
449
- is_impossible=not span_contains_answer)
450
-
451
- # Run callback
452
- if is_training:
453
- output_fn(feature)
454
- else:
455
- output_fn(feature, is_padding=False)
456
-
457
- unique_id += 1
458
-
459
- if not is_training and feature:
460
- assert batch_size
461
- num_padding = 0
462
- num_examples = unique_id - base_id
463
- if unique_id % batch_size != 0:
464
- num_padding = batch_size - (num_examples % batch_size)
465
- logging.info("Adding padding examples to make sure no partial batch.")
466
- logging.info("Adds %d padding examples for inference.", num_padding)
467
- dummy_feature = copy.deepcopy(feature)
468
- for _ in range(num_padding):
469
- dummy_feature.unique_id = unique_id
470
-
471
- # Run callback
472
- output_fn(feature, is_padding=True)
473
- unique_id += 1
474
- return unique_id - base_id
475
-
476
-
477
- def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
478
- orig_answer_text):
479
- """Returns tokenized answer spans that better match the annotated answer."""
480
-
481
- # The SQuAD annotations are character based. We first project them to
482
- # whitespace-tokenized words. But then after WordPiece tokenization, we can
483
- # often find a "better match". For example:
484
- #
485
- # Question: What year was John Smith born?
486
- # Context: The leader was John Smith (1895-1943).
487
- # Answer: 1895
488
- #
489
- # The original whitespace-tokenized answer will be "(1895-1943).". However
490
- # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
491
- # the exact answer, 1895.
492
- #
493
- # However, this is not always possible. Consider the following:
494
- #
495
- # Question: What country is the top exporter of electronics?
496
- # Context: The Japanese electronics industry is the lagest in the world.
497
- # Answer: Japan
498
- #
499
- # In this case, the annotator chose "Japan" as a character sub-span of
500
- # the word "Japanese". Since our WordPiece tokenizer does not split
501
- # "Japanese", we just use "Japanese" as the annotation. This is fairly rare
502
- # in SQuAD, but does happen.
503
- tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
504
-
505
- for new_start in range(input_start, input_end + 1):
506
- for new_end in range(input_end, new_start - 1, -1):
507
- text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
508
- if text_span == tok_answer_text:
509
- return (new_start, new_end)
510
-
511
- return (input_start, input_end)
512
-
513
-
514
- def _check_is_max_context(doc_spans, cur_span_index, position):
515
- """Check if this is the 'max context' doc span for the token."""
516
-
517
- # Because of the sliding window approach taken to scoring documents, a single
518
- # token can appear in multiple documents. E.g.
519
- # Doc: the man went to the store and bought a gallon of milk
520
- # Span A: the man went to the
521
- # Span B: to the store and bought
522
- # Span C: and bought a gallon of
523
- # ...
524
- #
525
- # Now the word 'bought' will have two scores from spans B and C. We only
526
- # want to consider the score with "maximum context", which we define as
527
- # the *minimum* of its left and right context (the *sum* of left and
528
- # right context will always be the same, of course).
529
- #
530
- # In the example the maximum context for 'bought' would be span C since
531
- # it has 1 left context and 3 right context, while span B has 4 left context
532
- # and 0 right context.
533
- best_score = None
534
- best_span_index = None
535
- for (span_index, doc_span) in enumerate(doc_spans):
536
- end = doc_span.start + doc_span.length - 1
537
- if position < doc_span.start:
538
- continue
539
- if position > end:
540
- continue
541
- num_left_context = position - doc_span.start
542
- num_right_context = end - position
543
- score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
544
- if best_score is None or score > best_score:
545
- best_score = score
546
- best_span_index = span_index
547
-
548
- return cur_span_index == best_span_index
549
-
550
-
551
- def write_predictions(all_examples,
552
- all_features,
553
- all_results,
554
- n_best_size,
555
- max_answer_length,
556
- do_lower_case,
557
- output_prediction_file,
558
- output_nbest_file,
559
- output_null_log_odds_file,
560
- version_2_with_negative=False,
561
- null_score_diff_threshold=0.0,
562
- verbose=False):
563
- """Write final predictions to the json file and log-odds of null if needed."""
564
- logging.info("Writing predictions to: %s", (output_prediction_file))
565
- logging.info("Writing nbest to: %s", (output_nbest_file))
566
-
567
- all_predictions, all_nbest_json, scores_diff_json = (
568
- postprocess_output(
569
- all_examples=all_examples,
570
- all_features=all_features,
571
- all_results=all_results,
572
- n_best_size=n_best_size,
573
- max_answer_length=max_answer_length,
574
- do_lower_case=do_lower_case,
575
- version_2_with_negative=version_2_with_negative,
576
- null_score_diff_threshold=null_score_diff_threshold,
577
- verbose=verbose))
578
-
579
- write_to_json_files(all_predictions, output_prediction_file)
580
- write_to_json_files(all_nbest_json, output_nbest_file)
581
- if version_2_with_negative:
582
- write_to_json_files(scores_diff_json, output_null_log_odds_file)
583
-
584
-
585
- def postprocess_output(all_examples,
586
- all_features,
587
- all_results,
588
- n_best_size,
589
- max_answer_length,
590
- do_lower_case,
591
- version_2_with_negative=False,
592
- null_score_diff_threshold=0.0,
593
- xlnet_format=False,
594
- verbose=False):
595
- """Postprocess model output, to form predicton results."""
596
-
597
- example_index_to_features = collections.defaultdict(list)
598
- for feature in all_features:
599
- example_index_to_features[feature.example_index].append(feature)
600
- unique_id_to_result = {}
601
- for result in all_results:
602
- unique_id_to_result[result.unique_id] = result
603
-
604
- _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
605
- "PrelimPrediction",
606
- ["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
607
-
608
- all_predictions = collections.OrderedDict()
609
- all_nbest_json = collections.OrderedDict()
610
- scores_diff_json = collections.OrderedDict()
611
-
612
- for (example_index, example) in enumerate(all_examples):
613
- features = example_index_to_features[example_index]
614
-
615
- prelim_predictions = []
616
- # keep track of the minimum score of null start+end of position 0
617
- score_null = 1000000 # large and positive
618
- min_null_feature_index = 0 # the paragraph slice with min mull score
619
- null_start_logit = 0 # the start logit at the slice with min null score
620
- null_end_logit = 0 # the end logit at the slice with min null score
621
- for (feature_index, feature) in enumerate(features):
622
- if feature.unique_id not in unique_id_to_result:
623
- logging.info("Skip eval example %s, not in pred.", feature.unique_id)
624
- continue
625
- result = unique_id_to_result[feature.unique_id]
626
-
627
- # if we could have irrelevant answers, get the min score of irrelevant
628
- if version_2_with_negative:
629
- if xlnet_format:
630
- feature_null_score = result.class_logits
631
- else:
632
- feature_null_score = result.start_logits[0] + result.end_logits[0]
633
- if feature_null_score < score_null:
634
- score_null = feature_null_score
635
- min_null_feature_index = feature_index
636
- null_start_logit = result.start_logits[0]
637
- null_end_logit = result.end_logits[0]
638
- for (start_index, start_logit,
639
- end_index, end_logit) in _get_best_indexes_and_logits(
640
- result=result,
641
- n_best_size=n_best_size,
642
- xlnet_format=xlnet_format):
643
- # We could hypothetically create invalid predictions, e.g., predict
644
- # that the start of the span is in the question. We throw out all
645
- # invalid predictions.
646
- if start_index >= len(feature.tokens):
647
- continue
648
- if end_index >= len(feature.tokens):
649
- continue
650
- if start_index not in feature.token_to_orig_map:
651
- continue
652
- if end_index not in feature.token_to_orig_map:
653
- continue
654
- if not feature.token_is_max_context.get(start_index, False):
655
- continue
656
- if end_index < start_index:
657
- continue
658
- length = end_index - start_index + 1
659
- if length > max_answer_length:
660
- continue
661
- prelim_predictions.append(
662
- _PrelimPrediction(
663
- feature_index=feature_index,
664
- start_index=start_index,
665
- end_index=end_index,
666
- start_logit=start_logit,
667
- end_logit=end_logit))
668
-
669
- if version_2_with_negative and not xlnet_format:
670
- prelim_predictions.append(
671
- _PrelimPrediction(
672
- feature_index=min_null_feature_index,
673
- start_index=0,
674
- end_index=0,
675
- start_logit=null_start_logit,
676
- end_logit=null_end_logit))
677
- prelim_predictions = sorted(
678
- prelim_predictions,
679
- key=lambda x: (x.start_logit + x.end_logit),
680
- reverse=True)
681
-
682
- _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
683
- "NbestPrediction", ["text", "start_logit", "end_logit"])
684
-
685
- seen_predictions = {}
686
- nbest = []
687
- for pred in prelim_predictions:
688
- if len(nbest) >= n_best_size:
689
- break
690
- feature = features[pred.feature_index]
691
- if pred.start_index > 0 or xlnet_format: # this is a non-null prediction
692
- tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
693
- orig_doc_start = feature.token_to_orig_map[pred.start_index]
694
- orig_doc_end = feature.token_to_orig_map[pred.end_index]
695
- orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
696
- tok_text = " ".join(tok_tokens)
697
-
698
- # De-tokenize WordPieces that have been split off.
699
- tok_text = tok_text.replace(" ##", "")
700
- tok_text = tok_text.replace("##", "")
701
-
702
- # Clean whitespace
703
- tok_text = tok_text.strip()
704
- tok_text = " ".join(tok_text.split())
705
- orig_text = " ".join(orig_tokens)
706
-
707
- final_text = get_final_text(
708
- tok_text, orig_text, do_lower_case, verbose=verbose)
709
- if final_text in seen_predictions:
710
- continue
711
-
712
- seen_predictions[final_text] = True
713
- else:
714
- final_text = ""
715
- seen_predictions[final_text] = True
716
-
717
- nbest.append(
718
- _NbestPrediction(
719
- text=final_text,
720
- start_logit=pred.start_logit,
721
- end_logit=pred.end_logit))
722
-
723
- # if we didn't include the empty option in the n-best, include it
724
- if version_2_with_negative and not xlnet_format:
725
- if "" not in seen_predictions:
726
- nbest.append(
727
- _NbestPrediction(
728
- text="", start_logit=null_start_logit,
729
- end_logit=null_end_logit))
730
- # In very rare edge cases we could have no valid predictions. So we
731
- # just create a nonce prediction in this case to avoid failure.
732
- if not nbest:
733
- nbest.append(
734
- _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
735
-
736
- assert len(nbest) >= 1
737
-
738
- total_scores = []
739
- best_non_null_entry = None
740
- for entry in nbest:
741
- total_scores.append(entry.start_logit + entry.end_logit)
742
- if not best_non_null_entry:
743
- if entry.text:
744
- best_non_null_entry = entry
745
-
746
- probs = _compute_softmax(total_scores)
747
-
748
- nbest_json = []
749
- for (i, entry) in enumerate(nbest):
750
- output = collections.OrderedDict()
751
- output["text"] = entry.text
752
- output["probability"] = probs[i]
753
- output["start_logit"] = entry.start_logit
754
- output["end_logit"] = entry.end_logit
755
- nbest_json.append(output)
756
-
757
- assert len(nbest_json) >= 1
758
-
759
- if not version_2_with_negative:
760
- all_predictions[example.qas_id] = nbest_json[0]["text"]
761
- else:
762
- # pytype: disable=attribute-error
763
- # predict "" iff the null score - the score of best non-null > threshold
764
- if best_non_null_entry is not None:
765
- if xlnet_format:
766
- score_diff = score_null
767
- scores_diff_json[example.qas_id] = score_diff
768
- all_predictions[example.qas_id] = best_non_null_entry.text
769
- else:
770
- score_diff = score_null - best_non_null_entry.start_logit - (
771
- best_non_null_entry.end_logit)
772
- scores_diff_json[example.qas_id] = score_diff
773
- if score_diff > null_score_diff_threshold:
774
- all_predictions[example.qas_id] = ""
775
- else:
776
- all_predictions[example.qas_id] = best_non_null_entry.text
777
- else:
778
- logging.warning("best_non_null_entry is None")
779
- scores_diff_json[example.qas_id] = score_null
780
- all_predictions[example.qas_id] = ""
781
- # pytype: enable=attribute-error
782
-
783
- all_nbest_json[example.qas_id] = nbest_json
784
-
785
- return all_predictions, all_nbest_json, scores_diff_json
786
-
787
-
788
- def write_to_json_files(json_records, json_file):
789
- with tf.io.gfile.GFile(json_file, "w") as writer:
790
- writer.write(json.dumps(json_records, indent=4) + "\n")
791
-
792
-
793
- def get_final_text(pred_text, orig_text, do_lower_case, verbose=False):
794
- """Project the tokenized prediction back to the original text."""
795
-
796
- # When we created the data, we kept track of the alignment between original
797
- # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
798
- # now `orig_text` contains the span of our original text corresponding to the
799
- # span that we predicted.
800
- #
801
- # However, `orig_text` may contain extra characters that we don't want in
802
- # our prediction.
803
- #
804
- # For example, let's say:
805
- # pred_text = steve smith
806
- # orig_text = Steve Smith's
807
- #
808
- # We don't want to return `orig_text` because it contains the extra "'s".
809
- #
810
- # We don't want to return `pred_text` because it's already been normalized
811
- # (the SQuAD eval script also does punctuation stripping/lower casing but
812
- # our tokenizer does additional normalization like stripping accent
813
- # characters).
814
- #
815
- # What we really want to return is "Steve Smith".
816
- #
817
- # Therefore, we have to apply a semi-complicated alignment heruistic between
818
- # `pred_text` and `orig_text` to get a character-to-character alignment. This
819
- # can fail in certain cases in which case we just return `orig_text`.
820
-
821
- def _strip_spaces(text):
822
- ns_chars = []
823
- ns_to_s_map = collections.OrderedDict()
824
- for (i, c) in enumerate(text):
825
- if c == " ":
826
- continue
827
- ns_to_s_map[len(ns_chars)] = i
828
- ns_chars.append(c)
829
- ns_text = "".join(ns_chars)
830
- return (ns_text, ns_to_s_map)
831
-
832
- # We first tokenize `orig_text`, strip whitespace from the result
833
- # and `pred_text`, and check if they are the same length. If they are
834
- # NOT the same length, the heuristic has failed. If they are the same
835
- # length, we assume the characters are one-to-one aligned.
836
- tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
837
-
838
- tok_text = " ".join(tokenizer.tokenize(orig_text))
839
-
840
- start_position = tok_text.find(pred_text)
841
- if start_position == -1:
842
- if verbose:
843
- logging.info("Unable to find text: '%s' in '%s'", pred_text, orig_text)
844
- return orig_text
845
- end_position = start_position + len(pred_text) - 1
846
-
847
- (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
848
- (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
849
-
850
- if len(orig_ns_text) != len(tok_ns_text):
851
- if verbose:
852
- logging.info("Length not equal after stripping spaces: '%s' vs '%s'",
853
- orig_ns_text, tok_ns_text)
854
- return orig_text
855
-
856
- # We then project the characters in `pred_text` back to `orig_text` using
857
- # the character-to-character alignment.
858
- tok_s_to_ns_map = {}
859
- for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
860
- tok_s_to_ns_map[tok_index] = i
861
-
862
- orig_start_position = None
863
- if start_position in tok_s_to_ns_map:
864
- ns_start_position = tok_s_to_ns_map[start_position]
865
- if ns_start_position in orig_ns_to_s_map:
866
- orig_start_position = orig_ns_to_s_map[ns_start_position]
867
-
868
- if orig_start_position is None:
869
- if verbose:
870
- logging.info("Couldn't map start position")
871
- return orig_text
872
-
873
- orig_end_position = None
874
- if end_position in tok_s_to_ns_map:
875
- ns_end_position = tok_s_to_ns_map[end_position]
876
- if ns_end_position in orig_ns_to_s_map:
877
- orig_end_position = orig_ns_to_s_map[ns_end_position]
878
-
879
- if orig_end_position is None:
880
- if verbose:
881
- logging.info("Couldn't map end position")
882
- return orig_text
883
-
884
- output_text = orig_text[orig_start_position:(orig_end_position + 1)]
885
- return output_text
886
-
887
-
888
- def _get_best_indexes_and_logits(result,
889
- n_best_size,
890
- xlnet_format=False):
891
- """Generates the n-best indexes and logits from a list."""
892
- if xlnet_format:
893
- for i in range(n_best_size):
894
- for j in range(n_best_size):
895
- j_index = i * n_best_size + j
896
- yield (result.start_indexes[i], result.start_logits[i],
897
- result.end_indexes[j_index], result.end_logits[j_index])
898
- else:
899
- start_index_and_score = sorted(enumerate(result.start_logits),
900
- key=lambda x: x[1], reverse=True)
901
- end_index_and_score = sorted(enumerate(result.end_logits),
902
- key=lambda x: x[1], reverse=True)
903
- for i in range(len(start_index_and_score)):
904
- if i >= n_best_size:
905
- break
906
- for j in range(len(end_index_and_score)):
907
- if j >= n_best_size:
908
- break
909
- yield (start_index_and_score[i][0], start_index_and_score[i][1],
910
- end_index_and_score[j][0], end_index_and_score[j][1])
911
-
912
-
913
- def _compute_softmax(scores):
914
- """Compute softmax probability over raw logits."""
915
- if not scores:
916
- return []
917
-
918
- max_score = None
919
- for score in scores:
920
- if max_score is None or score > max_score:
921
- max_score = score
922
-
923
- exp_scores = []
924
- total_sum = 0.0
925
- for score in scores:
926
- x = math.exp(score - max_score)
927
- exp_scores.append(x)
928
- total_sum += x
929
-
930
- probs = []
931
- for score in exp_scores:
932
- probs.append(score / total_sum)
933
- return probs
934
-
935
-
936
- def generate_tf_record_from_json_file(input_file_path,
937
- vocab_file_path,
938
- output_path,
939
- translated_input_folder=None,
940
- max_seq_length=384,
941
- do_lower_case=True,
942
- max_query_length=64,
943
- doc_stride=128,
944
- version_2_with_negative=False,
945
- xlnet_format=False):
946
- """Generates and saves training data into a tf record file."""
947
- train_examples = read_squad_examples(
948
- input_file=input_file_path,
949
- is_training=True,
950
- version_2_with_negative=version_2_with_negative,
951
- translated_input_folder=translated_input_folder)
952
- tokenizer = tokenization.FullTokenizer(
953
- vocab_file=vocab_file_path, do_lower_case=do_lower_case)
954
- train_writer = FeatureWriter(filename=output_path, is_training=True)
955
- number_of_examples = convert_examples_to_features(
956
- examples=train_examples,
957
- tokenizer=tokenizer,
958
- max_seq_length=max_seq_length,
959
- doc_stride=doc_stride,
960
- max_query_length=max_query_length,
961
- is_training=True,
962
- output_fn=train_writer.process_feature,
963
- xlnet_format=xlnet_format)
964
- train_writer.close()
965
-
966
- meta_data = {
967
- "task_type": "bert_squad",
968
- "train_data_size": number_of_examples,
969
- "max_seq_length": max_seq_length,
970
- "max_query_length": max_query_length,
971
- "doc_stride": doc_stride,
972
- "version_2_with_negative": version_2_with_negative,
973
- }
974
-
975
- return meta_data