ag2435 commited on
Commit
a8de101
·
1 Parent(s): 1011e08

added original qasper data & preprocessed version using dataset_reader.py script

Browse files
.gitattributes CHANGED
@@ -53,3 +53,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ *.csv filter=lfs diff=lfs merge=lfs -text
57
+ *.json filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ #.idea/
161
+
162
+ *.DS_Store
README.md ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ configs:
3
+ - config_name: default
4
+ data_files:
5
+ - split: train
6
+ path: "data/train_instances.json"
7
+ - split: dev
8
+ path: "data/dev_instances.json"
9
+ - split: test
10
+ path: "data/test_instances.json"
11
+ ---
12
+
13
+ # Preprocessed QASPER dataset
14
+
15
+ Working doc: https://docs.google.com/document/d/1gYPhPNJ5LGttgjix1dwai8pdNcqS6PbqhsM7W0rhKNQ/edit?usp=sharing
16
+
17
+ Original:
18
+ - Dataset: https://github.com/allenai/qasper-led-baseline
19
+ - Baseline repo: https://github.com/allenai/qasper-led-baseline
20
+ - HF: https://huggingface.co/datasets/allenai/qasper
21
+
22
+
23
+ Differences of our implementation over the original implementation:
24
+ 1. We use the dataset provided at https://huggingface.co/datasets/allenai/qasper since it doesn't require manually downloading files.
25
+ 2. We remove usage of `allennlp` since the Python package cannot be installed anymore.
26
+ 3. We add baselines to [qasper/models](qasper/models/). Currently, we have
27
+ - QASPER (Longformer Encoder Decoder)
28
+ - GPT-3.5-Turbo
29
+ - TODO: RAG (with R=TF-IDF or Contriever) implemented in LangChain?
30
+ 4. We replace `allennlp` special tokens with the special tokens of the HF transformer tokenizer:
31
+ - paragraph separator: '</s>' -> tokenizer.sep_token
32
+ - sequence pair start tokens: _tokenizer.sequence_pair_start_tokens -> tokenizer.bos_token
33
+
34
+ ## Usage
35
+
36
+ ```
37
+ from datasets import load_dataset
38
+
39
+ dataset = load_dataset("ag2435/qasper")
40
+ ```
data/dev_instances.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83809d9b5c0f41e5651f828851bb9c76056ecc16191b511fd6f0284c3f02768d
3
+ size 290717275
data/test_instances.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5b87b8a24fe75cdedfa820b0d25fc0964153d2f204a54a0c3c66c509dd1730f
3
+ size 412658386
data/train_instances.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9814da951c76f774c07ea8f86e03f1bdde4e842d8122f9aae7e5c34b1594c3e1
3
+ size 811748147
dataset_reader.py ADDED
@@ -0,0 +1,426 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Adapted from https://github.com/allenai/qasper-led-baseline/blob/main/qasper_baselines/dataset_reader.py
3
+ to get ride of allennlp dependencies.
4
+ """
5
+
6
+ import json
7
+ import logging
8
+ import random
9
+ from enum import Enum
10
+ from collections import defaultdict
11
+ from typing import Any, Dict, List, Optional, Iterable, Tuple
12
+
13
+ # from overrides import overrides
14
+
15
+ # import spacy
16
+ import torch
17
+
18
+ # from allennlp.common.util import JsonDict
19
+ # from allennlp.data.fields import (
20
+ # MetadataField,
21
+ # TextField,
22
+ # IndexField,
23
+ # ListField,
24
+ # TensorField,
25
+ # )
26
+ # from allennlp.common.file_utils import cached_path, open_compressed
27
+ # from allennlp.data.dataset_readers.dataset_reader import DatasetReader
28
+ # from allennlp.data.instance import Instance
29
+ # from allennlp.data.token_indexers import PretrainedTransformerIndexer
30
+ # from allennlp.data.tokenizers import Token, PretrainedTransformerTokenizer
31
+ from transformers import AutoTokenizer
32
+
33
+ logger = logging.getLogger(__name__)
34
+
35
+
36
+ class AnswerType(Enum):
37
+ EXTRACTIVE = 1
38
+ ABSTRACTIVE = 2
39
+ BOOLEAN = 3
40
+ NONE = 4
41
+
42
+
43
+ # @DatasetReader.register("qasper")
44
+ class QasperReader(object):
45
+ """
46
+ Reads a JSON-formatted Qasper data file and returns a `Dataset` where the `Instances` have
47
+ four fields:
48
+ * `question_with_context`, a `TextField` that contains the concatenation of question and
49
+ context,
50
+ * `paragraph_indices`, a `ListField` of `IndexFields` indicating paragraph-start tokens
51
+ in `question_with_context`.
52
+ * `global_attention_mask`, a mask that can be used by a longformer to specify which tokens in
53
+ `question_with_context` should have global attention (only present if
54
+ `include_global_attention_mask` is `True`).
55
+ * `evidence`, a 0/1 `TensorField` indicating whether each paragraph in `paragraph_indices`
56
+ should be selected as evidence.
57
+ * `answer`, a `TextField` that contains the (wordpiece-tokenized) answer to the question
58
+ * A `MetadataField` that stores the instance's ID, paper ID, the original question, the
59
+ original passage text, both of these in tokenized form, and the context also broken into
60
+ paragraphs, and the gold evidence spans, accessible as `metadata['question_id']`,
61
+ `metadata['article_id']`, `metadata['question']`, `metadata['context']`,
62
+ `metadata['question_tokens']`, `metadata['context_tokens']`,
63
+ `metadata['context_paragraphs']`, `metadata['all_evidence']`, `metadata['all_answers']`.
64
+
65
+ Parameters
66
+ ----------
67
+ transformer_model_name : `str`, optional (default=`allenai/led-large-16384`)
68
+ This reader chooses tokenizer and token indexer according to this setting.
69
+ max_query_length : `int`, optional (default=128)
70
+ The maximum number of wordpieces dedicated to the question. If the question is longer than
71
+ this, it will be truncated.
72
+ max_document_length : `int` , optional (default=16384)
73
+ This is the maximum number of wordpieces allowed per one whole document (including the
74
+ question, for simplicity). If the document is longer than this many word pieces, it will be
75
+ truncated.
76
+ paragraph_separator : `Optional[str]`, optional (default="</s>")
77
+ If given, we will use this as a separator token in between paragraphs. Pass in `None` to
78
+ have this not be used.
79
+ include_global_attention_mask : `bool` (default = True)
80
+ If `True`, we will include a field in the output containing a global attention mask for use
81
+ with a longformer, which is `True` for all starts of paragraphs and question tokens, so
82
+ attention will always be placed on those tokens.
83
+ context : `str` (default = `full_text`)
84
+ To reproduce the baselines from the paper that do not have access to the full text of the paper
85
+ you can change this argument. Options are `question_only`, `question_and_abstract`,
86
+ `question_and_introduction`, `question_and_evidence`. If this is set to `question_andevidence`,
87
+ the reader will ignore answers that are `None`, and those that are boolean.
88
+ for_training : `bool` (default = False)
89
+ This flag affects how questions with multiple answers are handled. When set to True, this flag
90
+ causes the reader to yield one instance per answer. When set to False, the instance will contain
91
+ only the first answer. The metadata will always contain all the answers and evidence, which can be
92
+ used at evaluation time to compute aggregated metrics.
93
+ """
94
+
95
+ def __init__(
96
+ self,
97
+ transformer_model_name: str = "allenai/led-base-16384",
98
+ max_query_length: int = 128,
99
+ max_document_length: int = 16384,
100
+ paragraph_separator: Optional[str] = "</s>",
101
+ include_global_attention_mask: bool = True,
102
+ context: str = "full_text",
103
+ for_training: bool = False,
104
+ **kwargs,
105
+ ) -> None:
106
+ # super().__init__(
107
+ # manual_distributed_sharding=True,
108
+ # manual_multiprocess_sharding=True,
109
+ # **kwargs,
110
+ # )
111
+ self._transformer_model_name = transformer_model_name
112
+ # self._tokenizer = PretrainedTransformerTokenizer(
113
+ # transformer_model_name, add_special_tokens=False
114
+ # )
115
+ self._tokenizer = AutoTokenizer.from_pretrained(transformer_model_name)
116
+ # Albert: hack
117
+ self._tokenizer.sequence_pair_start_tokens = [self._tokenizer.bos_token,]
118
+
119
+ self._include_global_attention_mask = include_global_attention_mask
120
+ # self._token_indexers = {
121
+ # "tokens": PretrainedTransformerIndexer(transformer_model_name)
122
+ # }
123
+ self.max_query_length = max_query_length
124
+ self.max_document_length = max_document_length
125
+ self._paragraph_separator = paragraph_separator
126
+ if context not in [
127
+ "full_text",
128
+ "question_only",
129
+ "question_and_abstract",
130
+ "question_and_introduction",
131
+ "question_and_evidence"
132
+ ]:
133
+ raise RuntimeError(f"Unrecognized context type: {context}")
134
+ self._context = context
135
+ self._for_training = for_training
136
+ self._stats = defaultdict(int)
137
+
138
+ # @overrides
139
+ def _read(self, file_path: str):
140
+ # if `file_path` is a URL, redirect to the cache
141
+ # file_path = cached_path(file_path)
142
+
143
+ logger.info("Reading the dataset")
144
+ if file_path.endswith(".json"):
145
+ yield from self._read_json(file_path)
146
+ elif file_path.endswith(".jsonl"):
147
+ yield from self._read_json_lines(file_path)
148
+ else:
149
+ raise RuntimeError(
150
+ f"Unsupported extension on file: {file_path}. Only json and jsonl are supported."
151
+ )
152
+
153
+ def _read_json(self, file_path: str):
154
+ logger.info("Reading json file at %s", file_path)
155
+ with open(file_path, 'r') as dataset_file:
156
+ dataset = json.load(dataset_file)
157
+ for article_id, article in dataset.items():
158
+ if not article["full_text"]:
159
+ continue
160
+ article["article_id"] = article_id
161
+ yield from self._article_to_instances(article)
162
+ self._log_stats()
163
+
164
+ def _read_json_lines(self, file_path: str):
165
+ logger.info("Reading json lines file at %s", file_path)
166
+ with open(file_path, 'r') as dataset_file:
167
+ for data_line in self.shard_iterable(dataset_file):
168
+ data = json.loads(data_line)
169
+ yield from self._article_to_instances(data)
170
+ self._log_stats()
171
+
172
+ def _log_stats(self) -> None:
173
+ logger.info("Stats:")
174
+ for key, value in self._stats.items():
175
+ logger.info("%s: %d", key, value)
176
+
177
+ def _article_to_instances(self, article: Dict[str, Any]):
178
+ paragraphs = self._get_paragraphs_from_article(article)
179
+ tokenized_context = None
180
+ paragraph_start_indices = None
181
+ # If the context is evidence, text_to_instance will make the appropriate tokenized_context.
182
+ if not self._context == "question_and_evidence":
183
+ tokenized_context, paragraph_start_indices = self._tokenize_paragraphs(
184
+ paragraphs
185
+ )
186
+
187
+ self._stats["number of documents"] += 1
188
+ for question_answer in article["qas"]:
189
+ self._stats["number of questions"] += 1
190
+ self._stats["number of answers"] += len(question_answer["answers"])
191
+ if len(question_answer["answers"]) > 1:
192
+ self._stats["questions with multiple answers"] += 1
193
+
194
+ all_answers = []
195
+ all_evidence = []
196
+ all_evidence_masks = []
197
+ for answer_annotation in question_answer["answers"]:
198
+ answer, evidence, answer_type = self._extract_answer_and_evidence(
199
+ answer_annotation["answer"]
200
+ )
201
+ all_answers.append({"text": answer, "type": answer_type})
202
+ all_evidence.append(evidence)
203
+ evidence_mask = self._get_evidence_mask(evidence, paragraphs)
204
+ all_evidence_masks.append(evidence_mask)
205
+
206
+ additional_metadata = {
207
+ "question_id": question_answer["question_id"],
208
+ "article_id": article.get("article_id"),
209
+ "all_answers": all_answers,
210
+ "all_evidence": all_evidence,
211
+ "all_evidence_masks": all_evidence_masks,
212
+ }
213
+ answers_to_yield = [x['text'] for x in all_answers] if self._for_training else [all_answers[0]['text']]
214
+ evidence_masks_to_yield = all_evidence_masks if self._for_training else [all_evidence_masks[0]]
215
+ evidence_to_yield = all_evidence if self._for_training else [all_evidence[0]]
216
+ for answer, evidence, evidence_mask in zip(answers_to_yield, evidence_to_yield, evidence_masks_to_yield):
217
+ if self._context == "question_and_evidence" and answer in ['Unanswerable', 'Yes', 'No']:
218
+ continue
219
+ yield self.text_to_instance(
220
+ question_answer["question"],
221
+ paragraphs,
222
+ tokenized_context,
223
+ paragraph_start_indices,
224
+ evidence_mask,
225
+ answer,
226
+ evidence,
227
+ additional_metadata,
228
+ )
229
+
230
+ @staticmethod
231
+ def _get_evidence_mask(evidence: List[str], paragraphs: List[str]) -> List[int]:
232
+ """
233
+ Takes a list of evidence snippets, and the list of all the paragraphs from the
234
+ paper, and returns a list of indices of the paragraphs that contain the evidence.
235
+ """
236
+ evidence_mask = []
237
+ for paragraph in paragraphs:
238
+ for evidence_str in evidence:
239
+ if evidence_str in paragraph:
240
+ evidence_mask.append(1)
241
+ break
242
+ else:
243
+ evidence_mask.append(0)
244
+ return evidence_mask
245
+
246
+ # @overrides
247
+ def text_to_instance(
248
+ self, # type: ignore # pylint: disable=arguments-differ
249
+ question: str,
250
+ paragraphs: List[str],
251
+ tokenized_context: List = None,
252
+ paragraph_start_indices: List[int] = None,
253
+ evidence_mask: List[int] = None,
254
+ answer: str = None,
255
+ evidence: List[str] = None,
256
+ additional_metadata: Dict[str, Any] = None):
257
+ fields = {}
258
+
259
+ tokenized_question = self._tokenizer.tokenize(question)
260
+ if len(tokenized_question) > self.max_query_length:
261
+ self._stats["number of truncated questions"] += 1
262
+ tokenized_question = tokenized_question[:self.max_query_length]
263
+
264
+ if tokenized_context is None or paragraph_start_indices is None:
265
+ if self._context == "question_and_evidence":
266
+ tokenized_context, paragraph_start_indices = self._tokenize_paragraphs(
267
+ evidence
268
+ )
269
+ else:
270
+ tokenized_context, paragraph_start_indices = self._tokenize_paragraphs(
271
+ paragraphs
272
+ )
273
+
274
+ allowed_context_length = (
275
+ self.max_document_length
276
+ - len(tokenized_question)
277
+ - len(self._tokenizer.sequence_pair_start_tokens)
278
+ - 1 # for paragraph seperator
279
+ )
280
+ if len(tokenized_context) > allowed_context_length:
281
+ self._stats["number of truncated contexts"] += 1
282
+ tokenized_context = tokenized_context[:allowed_context_length]
283
+ paragraph_start_indices = [index for index in paragraph_start_indices
284
+ if index <= allowed_context_length]
285
+ if evidence_mask is not None:
286
+ num_paragraphs = len(paragraph_start_indices)
287
+ evidence_mask = evidence_mask[:num_paragraphs]
288
+
289
+ # This is what Iz's code does.
290
+ question_and_context = (
291
+ self._tokenizer.sequence_pair_start_tokens
292
+ + tokenized_question
293
+ + [self._paragraph_separator]
294
+ + tokenized_context
295
+ )
296
+ # make the question field
297
+ question_field = question_and_context
298
+ fields["question_with_context"] = question_field
299
+
300
+ start_of_context = (
301
+ len(self._tokenizer.sequence_pair_start_tokens)
302
+ + len(tokenized_question)
303
+ )
304
+
305
+ paragraph_indices_list = [x + start_of_context for x in paragraph_start_indices]
306
+
307
+ paragraph_indices_field = (
308
+ [x for x in paragraph_indices_list] if paragraph_indices_list else
309
+ [-1]
310
+ )
311
+
312
+ fields["paragraph_indices"] = paragraph_indices_field
313
+
314
+ if self._include_global_attention_mask:
315
+ # We need to make a global attention array. We'll use all the paragraph indices and the
316
+ # indices of question tokens.
317
+ mask_indices = set(list(range(start_of_context)) + paragraph_indices_list)
318
+ mask = [
319
+ True if i in mask_indices else False for i in range(len(question_field))
320
+ ]
321
+ fields["global_attention_mask"] = torch.tensor(mask)
322
+
323
+ if evidence_mask is not None:
324
+ # evidence_field = torch.tensor(evidence_mask)
325
+ evidence_field = evidence_mask
326
+ fields["evidence"] = evidence_field
327
+
328
+ if answer:
329
+ # fields["answer"] = (
330
+ # self._tokenizer.add_special_tokens(self._tokenizer.tokenize(answer))
331
+ # )
332
+ fields["answer"] = self._tokenizer.tokenize(answer) #, add_special_tokens=True)
333
+
334
+ # make the metadata
335
+ metadata = {
336
+ "question": question,
337
+ "question_tokens": tokenized_question,
338
+ "paragraphs": paragraphs,
339
+ "context_tokens": tokenized_context,
340
+ }
341
+ if additional_metadata is not None:
342
+ metadata.update(additional_metadata)
343
+ fields["metadata"] = metadata
344
+ return fields
345
+
346
+ # @overrides
347
+ def apply_token_indexers(self, instance) -> None:
348
+ instance.fields["question_with_context"].token_indexers = self._token_indexers
349
+ instance.fields["answer"].token_indexers = self._token_indexers
350
+
351
+ def _tokenize_paragraphs(
352
+ self, paragraphs: List[str]):
353
+
354
+ tokenized_context = []
355
+ paragraph_start_indices = []
356
+ for paragraph in paragraphs:
357
+ tokenized_paragraph = self._tokenizer.tokenize(paragraph)
358
+ paragraph_start_indices.append(len(tokenized_context))
359
+ tokenized_context.extend(tokenized_paragraph)
360
+ if self._paragraph_separator:
361
+ tokenized_context.append(self._paragraph_separator)
362
+ if self._paragraph_separator:
363
+ # We added the separator after every paragraph, so we remove it after the last one.
364
+ tokenized_context = tokenized_context[:-1]
365
+ return tokenized_context, paragraph_start_indices
366
+
367
+ def _extract_answer_and_evidence(
368
+ self, answer: List
369
+ ) -> Tuple[str, List[str]]:
370
+ evidence_spans = [x.replace("\n", " ").strip() for x in answer["evidence"]]
371
+ evidence_spans = [x for x in evidence_spans if x != ""]
372
+ if not evidence_spans:
373
+ self._stats["answers with no evidence"] += 1
374
+ # TODO (pradeep): Deal with figures and tables.
375
+ if any(["FLOAT SELECTED" in span for span in evidence_spans]):
376
+ # Ignoring question if any of the selected evidence is a table or a figure.
377
+ self._stats["answers with table or figure as evidence"] += 1
378
+ if len(evidence_spans) > 1:
379
+ self._stats["multiple_evidence_spans_count"] += 1
380
+
381
+ answer_string = None
382
+ answer_type = None
383
+ if answer.get("unanswerable", False):
384
+ self._stats["unanswerable questions"] += 1
385
+ answer_string = "Unanswerable"
386
+ answer_type = AnswerType.NONE.name
387
+ elif answer.get("yes_no") is not None:
388
+ self._stats["yes/no questions"] += 1
389
+ answer_string = "Yes" if answer["yes_no"] else "No"
390
+ answer_type = AnswerType.BOOLEAN.name
391
+ elif answer.get("extractive_spans", []):
392
+ self._stats["extractive questions"] += 1
393
+ if len(answer["extractive_spans"]) > 1:
394
+ self._stats["extractive questions with multiple spans"] += 1
395
+ answer_string = ", ".join(answer["extractive_spans"])
396
+ answer_type = AnswerType.EXTRACTIVE.name
397
+ else:
398
+ answer_string = answer.get("free_form_answer", "")
399
+ if not answer_string:
400
+ self._stats["questions with empty answer"] += 1
401
+ else:
402
+ self._stats["freeform answers"] += 1
403
+ answer_type = AnswerType.ABSTRACTIVE.name
404
+
405
+ return answer_string, evidence_spans, answer_type
406
+
407
+ def _get_paragraphs_from_article(self, article: Dict) -> List[str]:
408
+ if self._context == "question_only":
409
+ return []
410
+ if self._context == "question_and_abstract":
411
+ return [article["abstract"]]
412
+ full_text = article["full_text"]
413
+ paragraphs = []
414
+ for section_info in full_text:
415
+ # TODO (pradeep): It is possible there are other discrepancies between plain text, LaTeX and HTML.
416
+ # Do a thorough investigation and add tests.
417
+ if section_info["section_name"] is not None:
418
+ paragraphs.append(section_info["section_name"])
419
+ for paragraph in section_info["paragraphs"]:
420
+ paragraph_text = paragraph.replace("\n", " ").strip()
421
+ if paragraph_text:
422
+ paragraphs.append(paragraph_text)
423
+ if self._context == "question_and_introduction":
424
+ # Assuming the first section is the introduction and stopping here.
425
+ break
426
+ return paragraphs
original_data/qasper-test-and-evaluator-v0.3/README-test.md ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Dataset of Information Seeking Questions and Answers Anchored in Research Papers: Test Set and Evaluator
2
+ --------------------------------------------------------------------------------------------------------
3
+
4
+ ## Version: 0.3
5
+
6
+ The tarball you found this file in should contain the test split of the Qasper dataset version 0.3 and the official evaluator script.
7
+
8
+ Please make sure you access the test file only to evaluate your finalized model.
9
+
10
+ ## Images of tables and figures
11
+
12
+ You can download them here: https://qasper-dataset.s3.us-west-2.amazonaws.com/test_figures_and_tables.tgz
13
+
14
+ ## Evaluation
15
+
16
+ You can evaluate your model using the stand alone evaluator as follows:
17
+
18
+ ```
19
+ python qasper_evaluator.py --predictions predictions.jsonl --gold qasper-test-v0.3.json [--text_evidence_only]
20
+ ```
21
+
22
+ Run the following to understand the arguments
23
+
24
+ ```
25
+ python qasper_evaluator.py -h
26
+ ```
original_data/qasper-test-and-evaluator-v0.3/qasper-test-v0.3.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e29ad410e6e39aa1936017fb965b30a20eb2e7751997f55b97c9d281aa884e5
3
+ size 18078957
original_data/qasper-test-and-evaluator-v0.3/qasper_evaluator.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Official script for evaluating models built for the Qasper dataset. The script
3
+ outputs Answer F1 and Evidence F1 reported in the paper.
4
+ """
5
+ from collections import Counter
6
+ import argparse
7
+ import string
8
+ import re
9
+ import json
10
+
11
+
12
+ def normalize_answer(s):
13
+ """
14
+ Taken from the official evaluation script for v1.1 of the SQuAD dataset.
15
+ Lower text and remove punctuation, articles and extra whitespace.
16
+ """
17
+
18
+ def remove_articles(text):
19
+ return re.sub(r"\b(a|an|the)\b", " ", text)
20
+
21
+ def white_space_fix(text):
22
+ return " ".join(text.split())
23
+
24
+ def remove_punc(text):
25
+ exclude = set(string.punctuation)
26
+ return "".join(ch for ch in text if ch not in exclude)
27
+
28
+ def lower(text):
29
+ return text.lower()
30
+
31
+ return white_space_fix(remove_articles(remove_punc(lower(s))))
32
+
33
+
34
+ def token_f1_score(prediction, ground_truth):
35
+ """
36
+ Taken from the official evaluation script for v1.1 of the SQuAD dataset.
37
+ """
38
+ prediction_tokens = normalize_answer(prediction).split()
39
+ ground_truth_tokens = normalize_answer(ground_truth).split()
40
+ common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
41
+ num_same = sum(common.values())
42
+ if num_same == 0:
43
+ return 0
44
+ precision = 1.0 * num_same / len(prediction_tokens)
45
+ recall = 1.0 * num_same / len(ground_truth_tokens)
46
+ f1 = (2 * precision * recall) / (precision + recall)
47
+ return f1
48
+
49
+
50
+ def paragraph_f1_score(prediction, ground_truth):
51
+ if not ground_truth and not prediction:
52
+ # The question is unanswerable and the prediction is empty.
53
+ return 1.0
54
+ num_same = len(set(ground_truth).intersection(set(prediction)))
55
+ if num_same == 0:
56
+ return 0.0
57
+ precision = num_same / len(prediction)
58
+ recall = num_same / len(ground_truth)
59
+ f1 = (2 * precision * recall) / (precision + recall)
60
+ return f1
61
+
62
+
63
+ def get_answers_and_evidence(data, text_evidence_only):
64
+ answers_and_evidence = {}
65
+ for paper_data in data.values():
66
+ for qa_info in paper_data["qas"]:
67
+ question_id = qa_info["question_id"]
68
+ references = []
69
+ for annotation_info in qa_info["answers"]:
70
+ answer_info = annotation_info["answer"]
71
+ if answer_info["unanswerable"]:
72
+ references.append({"answer": "Unanswerable", "evidence": [], "type": "none"})
73
+ else:
74
+ if answer_info["extractive_spans"]:
75
+ answer = ", ".join(answer_info["extractive_spans"])
76
+ answer_type = "extractive"
77
+ elif answer_info["free_form_answer"]:
78
+ answer = answer_info["free_form_answer"]
79
+ answer_type = "abstractive"
80
+ elif answer_info["yes_no"]:
81
+ answer = "Yes"
82
+ answer_type = "boolean"
83
+ elif answer_info["yes_no"] is not None:
84
+ answer = "No"
85
+ answer_type = "boolean"
86
+ else:
87
+ raise RuntimeError(f"Annotation {answer_info['annotation_id']} does not contain an answer")
88
+ if text_evidence_only:
89
+ evidence = [text for text in answer_info["evidence"] if "FLOAT SELECTED" not in text]
90
+ else:
91
+ evidence = answer_info["evidence"]
92
+ references.append({"answer": answer, "evidence": evidence, "type": answer_type})
93
+ answers_and_evidence[question_id] = references
94
+
95
+ return answers_and_evidence
96
+
97
+
98
+ def evaluate(gold, predicted):
99
+ max_answer_f1s = []
100
+ max_evidence_f1s = []
101
+ max_answer_f1s_by_type = {
102
+ "extractive": [],
103
+ "abstractive": [],
104
+ "boolean": [],
105
+ "none": [],
106
+ }
107
+ num_missing_predictions = 0
108
+ for question_id, references in gold.items():
109
+ if question_id not in predicted:
110
+ num_missing_predictions += 1
111
+ max_answer_f1s.append(0.0)
112
+ max_evidence_f1s.append(0.0)
113
+ continue
114
+ answer_f1s_and_types = [
115
+ (token_f1_score(predicted[question_id]["answer"], reference["answer"]),
116
+ reference["type"])
117
+ for reference in gold[question_id]
118
+ ]
119
+ max_answer_f1, answer_type = sorted(answer_f1s_and_types, key=lambda x: x[0], reverse=True)[0]
120
+ max_answer_f1s.append(max_answer_f1)
121
+ max_answer_f1s_by_type[answer_type].append(max_answer_f1)
122
+ evidence_f1s = [
123
+ paragraph_f1_score(predicted[question_id]["evidence"], reference["evidence"])
124
+ for reference in gold[question_id]
125
+ ]
126
+ max_evidence_f1s.append(max(evidence_f1s))
127
+
128
+ mean = lambda x: sum(x) / len(x) if x else 0.0
129
+ return {
130
+ "Answer F1": mean(max_answer_f1s),
131
+ "Answer F1 by type": {key: mean(value) for key, value in max_answer_f1s_by_type.items()},
132
+ "Evidence F1": mean(max_evidence_f1s),
133
+ "Missing predictions": num_missing_predictions
134
+ }
135
+
136
+ if __name__ == "__main__":
137
+ parser = argparse.ArgumentParser()
138
+ parser.add_argument(
139
+ "--predictions",
140
+ type=str,
141
+ required=True,
142
+ help="""JSON lines file with each line in format:
143
+ {'question_id': str, 'predicted_answer': str, 'predicted_evidence': List[str]}"""
144
+ )
145
+ parser.add_argument(
146
+ "--gold",
147
+ type=str,
148
+ required=True,
149
+ help="Test or dev set from the released dataset"
150
+ )
151
+ parser.add_argument(
152
+ "--text_evidence_only",
153
+ action="store_true",
154
+ help="If set, the evaluator will ignore evidence in figures and tables while reporting evidence f1"
155
+ )
156
+ args = parser.parse_args()
157
+ gold_data = json.load(open(args.gold))
158
+ gold_answers_and_evidence = get_answers_and_evidence(gold_data, args.text_evidence_only)
159
+ predicted_answers_and_evidence = {}
160
+ for line in open(args.predictions):
161
+ prediction_data = json.loads(line)
162
+ predicted_answers_and_evidence[prediction_data["question_id"]] = {
163
+ "answer": prediction_data["predicted_answer"],
164
+ "evidence": prediction_data["predicted_evidence"]
165
+ }
166
+ evaluation_output = evaluate(gold_answers_and_evidence, predicted_answers_and_evidence)
167
+ print(json.dumps(evaluation_output, indent=2))
original_data/qasper-train-dev-v0.3/README.md ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ A Dataset of Information Seeking Questions and Answers Anchored in Research Papers
2
+ ----------------------------------------------------------------------------------
3
+
4
+ ## Version 0.3
5
+
6
+ The tarball you found this README in should contain the training and development sets of Qasper version 0.3. The images of the tables and figures
7
+ in the papers associated can be found here: https://qasper-dataset.s3.us-west-2.amazonaws.com/train_dev_figures_and_tables.tgz
8
+
9
+ The full text of the papers is extracted from S2ORC (Lo et al., 2020).
10
+
11
+ Each file is in JSON format, where the keys are arxiv ids, and the values are dicts containing `title`, `abstract`, `full_text`, `figures_and_tables`, and `qas` (QA pairs).
12
+
13
+ ## Differences from v0.2
14
+
15
+ Due to an issue in the annotation interface, a small number of annotations (about 0.6%) had multiple answer types (e.g.: unanswerable and boolean; see more information on answer types in the final section of this README) in v0.2. These were manually fixed to create v0.3. These fixes affected train, development, and test sets.
16
+
17
+ ## Figures and tables
18
+
19
+ These are new starting version 0.2. The actual images of the figures and tables can be downloaded from the link above. The JSON files contain the
20
+ captions to those images in the `figure_and_table_captions` field.
21
+
22
+ This field is a dict whose keys are file names of the images of tables and figures, and the values are their captions.
23
+
24
+ For example, the paper with arxiv id `1811.00942` is in the training set, and contains the following `figures_and_tables` field:
25
+
26
+ ```
27
+ "figures_and_tables": [
28
+ {
29
+ "file": "3-Table1-1.png",
30
+ "caption": "Table 1: Comparison of neural language models on Penn Treebank and WikiText-103."
31
+ },
32
+ {
33
+ "file": "4-Figure1-1.png",
34
+ "caption": "Figure 1: Log perplexity\u2013recall error with KN-5."
35
+ },
36
+ {
37
+ "file": "4-Figure2-1.png",
38
+ "caption": "Figure 2: Log perplexity\u2013recall error with QRNN."
39
+ },
40
+ {
41
+ "file": "4-Table2-1.png",
42
+ "caption": "Table 2: Language modeling results on performance and model quality."
43
+ }
44
+ ]
45
+ ```
46
+
47
+ and when you download the `train_dev_figures_and_tables` tarball, you will see four files in `train/1811.00942`, with file names corresponding to
48
+ the `file` fields in the list above.
49
+
50
+ ## Fields specific to questions:
51
+
52
+ - `nlp_background` shows the experience the question writer had. The values can be `zero` (no experience), `two` (0 - 2 years of experience), `five` (2 - 5 years of experience), and `infinity` (> 5 years of experience). The field may be empty as well, indicating the writer has chosen not to share this information.
53
+
54
+ - `topic_background` shows how familiar the question writer was with the topic of the paper. The values are `unfamiliar`, `familiar`, `research` (meaning that the topic is the research area of the writer), or null.
55
+
56
+ - `paper_read`, when specified shows whether the questionwriter has read the paper.
57
+
58
+ - `search_query`, if not empty, is the query the question writer used to find the abstract of the paper from a large pool of abstracts we made available to them.
59
+
60
+ ## Fields specific to answers
61
+
62
+ Unanswerable answers have `unanswerable` set to true. The remaining answers have exactly one of the following fields being non-empty.
63
+
64
+ - `extractive_spans` are spans in the paper which serve as the answer.
65
+ - `free_form_answer` is a written out answer.
66
+ - `yes_no` is true iff the answer is Yes, and false iff the answer is No.
67
+
68
+ `evidence` is the set of paragraphs, figures or tables used to arrive at the answer. When the evidence is a table or a figure, it starts with the
69
+ string `FLOAT SELECTED`, and contains the caption of the corresponding table or figure.
70
+
71
+ `highlighted_evidence` is the set of sentences the answer providers selected as evidence if they chose textual evidence. The text in the `evidence` field is a mapping from these sentences to the paragraph level. That is, if you see textual evidence in the `evidence` field, it is guaranteed to be entire paragraphs, while that is not the case with `highlighted_evidence`.
original_data/qasper-train-dev-v0.3/qasper-dev-v0.3.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ae7ee62a65b1c4225791c70de80c2aad4e8998cf1fd4f09a53103db4f21af93
3
+ size 11398686
original_data/qasper-train-dev-v0.3/qasper-train-v0.3.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9458bfe76074a8fa8d1685af02bcc73537aa6d338ad20591dfaff1946bc88bf4
3
+ size 31969387
qasper.ipynb ADDED
@@ -0,0 +1,2561 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# QASPER evaluation"
8
+ ]
9
+ },
10
+ {
11
+ "cell_type": "code",
12
+ "execution_count": 1,
13
+ "metadata": {},
14
+ "outputs": [],
15
+ "source": [
16
+ "%load_ext autoreload\n",
17
+ "%autoreload 2"
18
+ ]
19
+ },
20
+ {
21
+ "cell_type": "code",
22
+ "execution_count": 2,
23
+ "metadata": {},
24
+ "outputs": [
25
+ {
26
+ "name": "stderr",
27
+ "output_type": "stream",
28
+ "text": [
29
+ "/Users/ag2435/anaconda3/envs/arxiv-agent/lib/python3.12/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
30
+ " from .autonotebook import tqdm as notebook_tqdm\n"
31
+ ]
32
+ }
33
+ ],
34
+ "source": [
35
+ "import json\n",
36
+ "import os\n",
37
+ "import plotly.express as px\n",
38
+ "import plotly.graph_objects as go\n",
39
+ "import dataset_reader\n",
40
+ "from transformers.tokenization_utils_base import BatchEncoding"
41
+ ]
42
+ },
43
+ {
44
+ "cell_type": "code",
45
+ "execution_count": 3,
46
+ "metadata": {},
47
+ "outputs": [],
48
+ "source": [
49
+ "# input_path = 'original_data/qasper-train-dev-v0.3/qasper-train-v0.3.json'\n",
50
+ "input_path = 'original_data/qasper-test-and-evaluator-v0.3/qasper-test-v0.3.json'\n",
51
+ "output_path = 'data'\n",
52
+ "split = 'test'\n",
53
+ "assert split in input_path"
54
+ ]
55
+ },
56
+ {
57
+ "cell_type": "code",
58
+ "execution_count": 4,
59
+ "metadata": {},
60
+ "outputs": [],
61
+ "source": [
62
+ "reader = dataset_reader.QasperReader(include_global_attention_mask=False)\n",
63
+ "instances = list(reader._read(input_path))"
64
+ ]
65
+ },
66
+ {
67
+ "cell_type": "code",
68
+ "execution_count": 5,
69
+ "metadata": {},
70
+ "outputs": [
71
+ {
72
+ "data": {
73
+ "text/plain": [
74
+ "defaultdict(int,\n",
75
+ " {'number of documents': 416,\n",
76
+ " 'number of questions': 1451,\n",
77
+ " 'number of answers': 3554,\n",
78
+ " 'questions with multiple answers': 1427,\n",
79
+ " 'extractive questions': 1817,\n",
80
+ " 'extractive questions with multiple spans': 787,\n",
81
+ " 'multiple_evidence_spans_count': 1063,\n",
82
+ " 'freeform answers': 878,\n",
83
+ " 'answers with table or figure as evidence': 391,\n",
84
+ " 'answers with no evidence': 444,\n",
85
+ " 'unanswerable questions': 366,\n",
86
+ " 'yes/no questions': 493,\n",
87
+ " 'number of truncated contexts': 2})"
88
+ ]
89
+ },
90
+ "execution_count": 5,
91
+ "metadata": {},
92
+ "output_type": "execute_result"
93
+ }
94
+ ],
95
+ "source": [
96
+ "reader._stats"
97
+ ]
98
+ },
99
+ {
100
+ "cell_type": "code",
101
+ "execution_count": 6,
102
+ "metadata": {},
103
+ "outputs": [
104
+ {
105
+ "data": {
106
+ "application/vnd.plotly.v1+json": {
107
+ "config": {
108
+ "plotlyServerURL": "https://plot.ly"
109
+ },
110
+ "data": [
111
+ {
112
+ "alignmentgroup": "True",
113
+ "bingroup": "x",
114
+ "hovertemplate": "variable=0<br>value=%{x}<br>count=%{y}<extra></extra>",
115
+ "legendgroup": "0",
116
+ "marker": {
117
+ "color": "#636efa",
118
+ "pattern": {
119
+ "shape": ""
120
+ }
121
+ },
122
+ "name": "0",
123
+ "offsetgroup": "0",
124
+ "orientation": "v",
125
+ "showlegend": true,
126
+ "type": "histogram",
127
+ "x": [
128
+ 7574,
129
+ 7569,
130
+ 7574,
131
+ 7577,
132
+ 4103,
133
+ 5491,
134
+ 5489,
135
+ 5489,
136
+ 5491,
137
+ 5492,
138
+ 5489,
139
+ 4802,
140
+ 4802,
141
+ 6355,
142
+ 6358,
143
+ 6363,
144
+ 2632,
145
+ 2638,
146
+ 2633,
147
+ 3556,
148
+ 3557,
149
+ 3557,
150
+ 3954,
151
+ 3948,
152
+ 3947,
153
+ 3948,
154
+ 2593,
155
+ 2592,
156
+ 2589,
157
+ 7232,
158
+ 4374,
159
+ 4379,
160
+ 4375,
161
+ 7679,
162
+ 7679,
163
+ 7680,
164
+ 7680,
165
+ 7679,
166
+ 7681,
167
+ 2631,
168
+ 2629,
169
+ 2627,
170
+ 2627,
171
+ 2627,
172
+ 5644,
173
+ 5644,
174
+ 5641,
175
+ 5642,
176
+ 4153,
177
+ 4153,
178
+ 4152,
179
+ 3628,
180
+ 3627,
181
+ 3626,
182
+ 6481,
183
+ 6471,
184
+ 6477,
185
+ 5854,
186
+ 5851,
187
+ 5855,
188
+ 2812,
189
+ 2814,
190
+ 4355,
191
+ 4355,
192
+ 4352,
193
+ 4355,
194
+ 5766,
195
+ 5763,
196
+ 5767,
197
+ 5764,
198
+ 5758,
199
+ 3341,
200
+ 3339,
201
+ 3360,
202
+ 3341,
203
+ 3345,
204
+ 1649,
205
+ 1651,
206
+ 1650,
207
+ 2317,
208
+ 2315,
209
+ 2314,
210
+ 2893,
211
+ 2891,
212
+ 2895,
213
+ 2664,
214
+ 2668,
215
+ 2668,
216
+ 2663,
217
+ 5552,
218
+ 5559,
219
+ 5557,
220
+ 5555,
221
+ 6438,
222
+ 6443,
223
+ 6438,
224
+ 6437,
225
+ 5294,
226
+ 5290,
227
+ 5291,
228
+ 5291,
229
+ 4229,
230
+ 4224,
231
+ 4224,
232
+ 2715,
233
+ 2711,
234
+ 2716,
235
+ 2710,
236
+ 2711,
237
+ 2712,
238
+ 2710,
239
+ 3279,
240
+ 3282,
241
+ 3282,
242
+ 3282,
243
+ 3283,
244
+ 3280,
245
+ 3474,
246
+ 3474,
247
+ 3475,
248
+ 3476,
249
+ 5646,
250
+ 5646,
251
+ 5646,
252
+ 5642,
253
+ 4563,
254
+ 4561,
255
+ 4562,
256
+ 4567,
257
+ 4563,
258
+ 4563,
259
+ 4561,
260
+ 5194,
261
+ 5189,
262
+ 5196,
263
+ 8518,
264
+ 8519,
265
+ 8518,
266
+ 8520,
267
+ 8519,
268
+ 8519,
269
+ 3673,
270
+ 3673,
271
+ 3676,
272
+ 6399,
273
+ 6407,
274
+ 4527,
275
+ 4522,
276
+ 4522,
277
+ 5838,
278
+ 5836,
279
+ 5836,
280
+ 8347,
281
+ 8349,
282
+ 1179,
283
+ 1177,
284
+ 1176,
285
+ 1177,
286
+ 1176,
287
+ 3294,
288
+ 3294,
289
+ 6697,
290
+ 6708,
291
+ 6707,
292
+ 5097,
293
+ 5094,
294
+ 5105,
295
+ 2141,
296
+ 2144,
297
+ 2143,
298
+ 4648,
299
+ 4643,
300
+ 4645,
301
+ 3723,
302
+ 3723,
303
+ 3723,
304
+ 3721,
305
+ 2445,
306
+ 2444,
307
+ 3659,
308
+ 3649,
309
+ 3647,
310
+ 3650,
311
+ 4460,
312
+ 4460,
313
+ 3645,
314
+ 3641,
315
+ 3641,
316
+ 3643,
317
+ 2776,
318
+ 2783,
319
+ 2771,
320
+ 3348,
321
+ 3341,
322
+ 3344,
323
+ 3341,
324
+ 2778,
325
+ 2779,
326
+ 2783,
327
+ 2781,
328
+ 3659,
329
+ 3653,
330
+ 3658,
331
+ 3653,
332
+ 2624,
333
+ 2628,
334
+ 2627,
335
+ 2625,
336
+ 2625,
337
+ 2626,
338
+ 3584,
339
+ 3580,
340
+ 2838,
341
+ 2837,
342
+ 4610,
343
+ 4607,
344
+ 4605,
345
+ 4604,
346
+ 4605,
347
+ 4802,
348
+ 4802,
349
+ 4804,
350
+ 4802,
351
+ 4801,
352
+ 4798,
353
+ 4800,
354
+ 2959,
355
+ 2954,
356
+ 2956,
357
+ 5573,
358
+ 5578,
359
+ 2539,
360
+ 2537,
361
+ 2537,
362
+ 14957,
363
+ 14954,
364
+ 14946,
365
+ 14953,
366
+ 4510,
367
+ 4502,
368
+ 4502,
369
+ 4504,
370
+ 4501,
371
+ 4774,
372
+ 4775,
373
+ 5710,
374
+ 5929,
375
+ 5928,
376
+ 3004,
377
+ 3003,
378
+ 3003,
379
+ 3007,
380
+ 3008,
381
+ 4526,
382
+ 4530,
383
+ 4528,
384
+ 4526,
385
+ 2903,
386
+ 2906,
387
+ 2906,
388
+ 3373,
389
+ 3379,
390
+ 3371,
391
+ 3375,
392
+ 3375,
393
+ 3637,
394
+ 3637,
395
+ 2096,
396
+ 2090,
397
+ 6331,
398
+ 6327,
399
+ 6328,
400
+ 5317,
401
+ 5320,
402
+ 5310,
403
+ 5314,
404
+ 5963,
405
+ 5960,
406
+ 2989,
407
+ 2989,
408
+ 2991,
409
+ 3545,
410
+ 3551,
411
+ 3546,
412
+ 3544,
413
+ 6593,
414
+ 6609,
415
+ 6586,
416
+ 5183,
417
+ 5189,
418
+ 5182,
419
+ 6773,
420
+ 6760,
421
+ 6772,
422
+ 6761,
423
+ 5712,
424
+ 5710,
425
+ 5710,
426
+ 6712,
427
+ 6711,
428
+ 6715,
429
+ 4002,
430
+ 3992,
431
+ 3998,
432
+ 6192,
433
+ 6184,
434
+ 6186,
435
+ 6189,
436
+ 5478,
437
+ 5485,
438
+ 4384,
439
+ 4386,
440
+ 4385,
441
+ 4384,
442
+ 3545,
443
+ 3264,
444
+ 3266,
445
+ 3263,
446
+ 3266,
447
+ 5838,
448
+ 5840,
449
+ 2884,
450
+ 2880,
451
+ 2883,
452
+ 2884,
453
+ 2998,
454
+ 2998,
455
+ 2998,
456
+ 3001,
457
+ 2706,
458
+ 2701,
459
+ 2704,
460
+ 2704,
461
+ 4195,
462
+ 4197,
463
+ 4194,
464
+ 2768,
465
+ 2761,
466
+ 2767,
467
+ 2762,
468
+ 3169,
469
+ 6721,
470
+ 4036,
471
+ 2888,
472
+ 2887,
473
+ 8826,
474
+ 8826,
475
+ 8826,
476
+ 8824,
477
+ 2398,
478
+ 2404,
479
+ 5021,
480
+ 5038,
481
+ 5030,
482
+ 5825,
483
+ 3670,
484
+ 3669,
485
+ 3577,
486
+ 3206,
487
+ 3204,
488
+ 4054,
489
+ 8719,
490
+ 4150,
491
+ 4153,
492
+ 854,
493
+ 7084,
494
+ 7088,
495
+ 7086,
496
+ 6814,
497
+ 3623,
498
+ 3624,
499
+ 7944,
500
+ 4680,
501
+ 4682,
502
+ 4682,
503
+ 4680,
504
+ 4683,
505
+ 4682,
506
+ 4679,
507
+ 4680,
508
+ 5535,
509
+ 5542,
510
+ 5534,
511
+ 5539,
512
+ 5542,
513
+ 5537,
514
+ 4813,
515
+ 4816,
516
+ 4815,
517
+ 4812,
518
+ 4812,
519
+ 4813,
520
+ 2785,
521
+ 2783,
522
+ 2786,
523
+ 2792,
524
+ 2784,
525
+ 2792,
526
+ 4384,
527
+ 4383,
528
+ 4386,
529
+ 4385,
530
+ 4387,
531
+ 4825,
532
+ 4827,
533
+ 4832,
534
+ 4823,
535
+ 4823,
536
+ 2876,
537
+ 2877,
538
+ 2873,
539
+ 2877,
540
+ 2873,
541
+ 7625,
542
+ 7631,
543
+ 7632,
544
+ 7629,
545
+ 7625,
546
+ 3088,
547
+ 3090,
548
+ 3087,
549
+ 3087,
550
+ 2874,
551
+ 2870,
552
+ 2870,
553
+ 2867,
554
+ 2866,
555
+ 2868,
556
+ 2867,
557
+ 2867,
558
+ 1977,
559
+ 1975,
560
+ 1979,
561
+ 1976,
562
+ 4580,
563
+ 4587,
564
+ 4585,
565
+ 4590,
566
+ 8314,
567
+ 8310,
568
+ 8307,
569
+ 8314,
570
+ 5827,
571
+ 5823,
572
+ 5823,
573
+ 5825,
574
+ 5974,
575
+ 5971,
576
+ 5974,
577
+ 5972,
578
+ 5974,
579
+ 5973,
580
+ 5974,
581
+ 5798,
582
+ 5800,
583
+ 5786,
584
+ 4363,
585
+ 4363,
586
+ 4362,
587
+ 3041,
588
+ 3039,
589
+ 3036,
590
+ 4393,
591
+ 4394,
592
+ 4393,
593
+ 6195,
594
+ 6199,
595
+ 6196,
596
+ 6384,
597
+ 6384,
598
+ 6384,
599
+ 6384,
600
+ 6383,
601
+ 6386,
602
+ 3365,
603
+ 3366,
604
+ 3379,
605
+ 5022,
606
+ 5019,
607
+ 5017,
608
+ 2636,
609
+ 2646,
610
+ 2639,
611
+ 2635,
612
+ 2636,
613
+ 2650,
614
+ 5039,
615
+ 5035,
616
+ 5037,
617
+ 4184,
618
+ 4185,
619
+ 4184,
620
+ 4807,
621
+ 4809,
622
+ 4806,
623
+ 4806,
624
+ 4808,
625
+ 4815,
626
+ 3033,
627
+ 3044,
628
+ 3045,
629
+ 3314,
630
+ 3313,
631
+ 3313,
632
+ 3308,
633
+ 3313,
634
+ 3311,
635
+ 4984,
636
+ 4977,
637
+ 4985,
638
+ 5919,
639
+ 5918,
640
+ 5919,
641
+ 4500,
642
+ 4505,
643
+ 4516,
644
+ 4341,
645
+ 4348,
646
+ 4347,
647
+ 4507,
648
+ 4522,
649
+ 4514,
650
+ 7900,
651
+ 7887,
652
+ 7888,
653
+ 7887,
654
+ 7886,
655
+ 7887,
656
+ 7891,
657
+ 7888,
658
+ 5912,
659
+ 5908,
660
+ 5904,
661
+ 5905,
662
+ 5902,
663
+ 3650,
664
+ 3648,
665
+ 3651,
666
+ 3651,
667
+ 3648,
668
+ 3554,
669
+ 3557,
670
+ 3553,
671
+ 3557,
672
+ 3557,
673
+ 2572,
674
+ 2569,
675
+ 2569,
676
+ 2569,
677
+ 2572,
678
+ 6191,
679
+ 6188,
680
+ 6189,
681
+ 6189,
682
+ 6190,
683
+ 4172,
684
+ 4174,
685
+ 4174,
686
+ 4172,
687
+ 4170,
688
+ 5597,
689
+ 5596,
690
+ 5598,
691
+ 5597,
692
+ 5598,
693
+ 9010,
694
+ 9011,
695
+ 9013,
696
+ 9013,
697
+ 9009,
698
+ 9012,
699
+ 5407,
700
+ 5413,
701
+ 5408,
702
+ 5406,
703
+ 5406,
704
+ 5406,
705
+ 5713,
706
+ 5711,
707
+ 5713,
708
+ 5712,
709
+ 6142,
710
+ 6142,
711
+ 6140,
712
+ 6144,
713
+ 5549,
714
+ 5543,
715
+ 5419,
716
+ 5420,
717
+ 7092,
718
+ 7089,
719
+ 7086,
720
+ 7089,
721
+ 6252,
722
+ 6256,
723
+ 6257,
724
+ 6251,
725
+ 2699,
726
+ 2698,
727
+ 5691,
728
+ 5691,
729
+ 2859,
730
+ 2858,
731
+ 2860,
732
+ 2861,
733
+ 4917,
734
+ 4921,
735
+ 4923,
736
+ 4918,
737
+ 1770,
738
+ 1772,
739
+ 6205,
740
+ 6206,
741
+ 5200,
742
+ 5199,
743
+ 3112,
744
+ 3108,
745
+ 2603,
746
+ 2597,
747
+ 2599,
748
+ 2598,
749
+ 4933,
750
+ 4931,
751
+ 4929,
752
+ 4928,
753
+ 3285,
754
+ 3292,
755
+ 3289,
756
+ 3291,
757
+ 3290,
758
+ 3287,
759
+ 3996,
760
+ 4002,
761
+ 8335,
762
+ 8340,
763
+ 6619,
764
+ 6614,
765
+ 2819,
766
+ 2822,
767
+ 2816,
768
+ 2817,
769
+ 2819,
770
+ 2478,
771
+ 2472,
772
+ 2470,
773
+ 2471,
774
+ 2466,
775
+ 4815,
776
+ 4824,
777
+ 4822,
778
+ 4821,
779
+ 4819,
780
+ 7170,
781
+ 7168,
782
+ 7169,
783
+ 7171,
784
+ 7170,
785
+ 7169,
786
+ 7170,
787
+ 7171,
788
+ 5122,
789
+ 5120,
790
+ 5120,
791
+ 5123,
792
+ 5123,
793
+ 5123,
794
+ 2853,
795
+ 2854,
796
+ 2853,
797
+ 13450,
798
+ 13449,
799
+ 13449,
800
+ 2814,
801
+ 2824,
802
+ 2818,
803
+ 5580,
804
+ 5579,
805
+ 5578,
806
+ 5929,
807
+ 5933,
808
+ 5949,
809
+ 5880,
810
+ 5881,
811
+ 5886,
812
+ 5572,
813
+ 5574,
814
+ 5575,
815
+ 3336,
816
+ 3336,
817
+ 3333,
818
+ 3336,
819
+ 3339,
820
+ 3336,
821
+ 3336,
822
+ 3336,
823
+ 3333,
824
+ 3337,
825
+ 8766,
826
+ 8769,
827
+ 8764,
828
+ 8766,
829
+ 5958,
830
+ 5979,
831
+ 5958,
832
+ 5957,
833
+ 6918,
834
+ 6924,
835
+ 6924,
836
+ 6920,
837
+ 7068,
838
+ 7063,
839
+ 7064,
840
+ 7064,
841
+ 4862,
842
+ 4860,
843
+ 4859,
844
+ 4860,
845
+ 5358,
846
+ 5358,
847
+ 5360,
848
+ 5360,
849
+ 5284,
850
+ 5268,
851
+ 5270,
852
+ 5271,
853
+ 3054,
854
+ 3047,
855
+ 3051,
856
+ 3052,
857
+ 3045,
858
+ 7561,
859
+ 7574,
860
+ 7568,
861
+ 7567,
862
+ 7564,
863
+ 3948,
864
+ 3943,
865
+ 3947,
866
+ 3943,
867
+ 3943,
868
+ 2973,
869
+ 2975,
870
+ 2974,
871
+ 2981,
872
+ 2973,
873
+ 2979,
874
+ 2783,
875
+ 2783,
876
+ 2779,
877
+ 5075,
878
+ 5079,
879
+ 5070,
880
+ 5083,
881
+ 5071,
882
+ 7581,
883
+ 7580,
884
+ 7576,
885
+ 7584,
886
+ 7581,
887
+ 7579,
888
+ 7581,
889
+ 3112,
890
+ 3109,
891
+ 3108,
892
+ 3108,
893
+ 3108,
894
+ 3077,
895
+ 3075,
896
+ 4773,
897
+ 4772,
898
+ 4770,
899
+ 2703,
900
+ 3717,
901
+ 3721,
902
+ 3723,
903
+ 3723,
904
+ 6314,
905
+ 6311,
906
+ 6311,
907
+ 1775,
908
+ 1771,
909
+ 1773,
910
+ 1774,
911
+ 8750,
912
+ 8754,
913
+ 8756,
914
+ 8746,
915
+ 6554,
916
+ 6558,
917
+ 6560,
918
+ 6557,
919
+ 6401,
920
+ 6398,
921
+ 6399,
922
+ 6403,
923
+ 6226,
924
+ 6222,
925
+ 6220,
926
+ 6222,
927
+ 6224,
928
+ 6221,
929
+ 6222,
930
+ 5838,
931
+ 5844,
932
+ 6234,
933
+ 6231,
934
+ 6231,
935
+ 6382,
936
+ 6382,
937
+ 6385,
938
+ 2205,
939
+ 2208,
940
+ 2204,
941
+ 5028,
942
+ 5024,
943
+ 5023,
944
+ 3681,
945
+ 3681,
946
+ 3678,
947
+ 5719,
948
+ 5722,
949
+ 5721,
950
+ 5722,
951
+ 5720,
952
+ 3376,
953
+ 3371,
954
+ 3361,
955
+ 3356,
956
+ 3016,
957
+ 3013,
958
+ 3014,
959
+ 3014,
960
+ 6692,
961
+ 6698,
962
+ 6695,
963
+ 6693,
964
+ 6693,
965
+ 3209,
966
+ 3223,
967
+ 3700,
968
+ 5471,
969
+ 5471,
970
+ 5471,
971
+ 5476,
972
+ 5473,
973
+ 5254,
974
+ 5255,
975
+ 5225,
976
+ 5226,
977
+ 5225,
978
+ 5224,
979
+ 5224,
980
+ 4233,
981
+ 4237,
982
+ 5952,
983
+ 5952,
984
+ 5951,
985
+ 5949,
986
+ 1684,
987
+ 1684,
988
+ 1681,
989
+ 1682,
990
+ 1686,
991
+ 2975,
992
+ 2973,
993
+ 2976,
994
+ 2975,
995
+ 2535,
996
+ 2534,
997
+ 2532,
998
+ 2534,
999
+ 928,
1000
+ 923,
1001
+ 948,
1002
+ 929,
1003
+ 2386,
1004
+ 2382,
1005
+ 2388,
1006
+ 4987,
1007
+ 4984,
1008
+ 4983,
1009
+ 4985,
1010
+ 6879,
1011
+ 6884,
1012
+ 6880,
1013
+ 6881,
1014
+ 6877,
1015
+ 6877,
1016
+ 6880,
1017
+ 6879,
1018
+ 2558,
1019
+ 2556,
1020
+ 2563,
1021
+ 2183,
1022
+ 2181,
1023
+ 2181,
1024
+ 9704,
1025
+ 9707,
1026
+ 9699,
1027
+ 9704,
1028
+ 2234,
1029
+ 2258,
1030
+ 2254,
1031
+ 5560,
1032
+ 5560,
1033
+ 5575,
1034
+ 5564,
1035
+ 2608,
1036
+ 2604,
1037
+ 2603,
1038
+ 2604,
1039
+ 2605,
1040
+ 5704,
1041
+ 4044,
1042
+ 4038,
1043
+ 4035,
1044
+ 6779,
1045
+ 6766,
1046
+ 6766,
1047
+ 5841,
1048
+ 5840,
1049
+ 5841,
1050
+ 5575,
1051
+ 5572,
1052
+ 4692,
1053
+ 4694,
1054
+ 4696,
1055
+ 4693,
1056
+ 4696,
1057
+ 4700,
1058
+ 4700,
1059
+ 4700,
1060
+ 2548,
1061
+ 2548,
1062
+ 2548,
1063
+ 2552,
1064
+ 2551,
1065
+ 2549,
1066
+ 2546,
1067
+ 2545,
1068
+ 6862,
1069
+ 6866,
1070
+ 6871,
1071
+ 6864,
1072
+ 9929,
1073
+ 4639,
1074
+ 4643,
1075
+ 4642,
1076
+ 3431,
1077
+ 3429,
1078
+ 2752,
1079
+ 2756,
1080
+ 2759,
1081
+ 3365,
1082
+ 3364,
1083
+ 3369,
1084
+ 3364,
1085
+ 3364,
1086
+ 6938,
1087
+ 6943,
1088
+ 6941,
1089
+ 6946,
1090
+ 6950,
1091
+ 2610,
1092
+ 2612,
1093
+ 3340,
1094
+ 5471,
1095
+ 5471,
1096
+ 5473,
1097
+ 5476,
1098
+ 3291,
1099
+ 3296,
1100
+ 3297,
1101
+ 5973,
1102
+ 5970,
1103
+ 5970,
1104
+ 5969,
1105
+ 5971,
1106
+ 14168,
1107
+ 3347,
1108
+ 3347,
1109
+ 3096,
1110
+ 3093,
1111
+ 3100,
1112
+ 3102,
1113
+ 8357,
1114
+ 8354,
1115
+ 8360,
1116
+ 2799,
1117
+ 2800,
1118
+ 2804,
1119
+ 2801,
1120
+ 2800,
1121
+ 3020,
1122
+ 3024,
1123
+ 3017,
1124
+ 3310,
1125
+ 3308,
1126
+ 3308,
1127
+ 3707,
1128
+ 3707,
1129
+ 3703,
1130
+ 3702,
1131
+ 3705,
1132
+ 3703,
1133
+ 7470,
1134
+ 2723,
1135
+ 2724,
1136
+ 4837,
1137
+ 4838,
1138
+ 4840,
1139
+ 4839,
1140
+ 4839,
1141
+ 4839,
1142
+ 4526,
1143
+ 4527,
1144
+ 4526,
1145
+ 4525,
1146
+ 16384,
1147
+ 10069,
1148
+ 10070,
1149
+ 7411,
1150
+ 7420,
1151
+ 7412,
1152
+ 4638,
1153
+ 4638,
1154
+ 4644,
1155
+ 4421,
1156
+ 4415,
1157
+ 4417,
1158
+ 4416,
1159
+ 3425,
1160
+ 3422,
1161
+ 3426,
1162
+ 3423,
1163
+ 2247,
1164
+ 2242,
1165
+ 2244,
1166
+ 4973,
1167
+ 4964,
1168
+ 4970,
1169
+ 7050,
1170
+ 7050,
1171
+ 7057,
1172
+ 8247,
1173
+ 8244,
1174
+ 8236,
1175
+ 8239,
1176
+ 2632,
1177
+ 2629,
1178
+ 2633,
1179
+ 6967,
1180
+ 6968,
1181
+ 6967,
1182
+ 6971,
1183
+ 5029,
1184
+ 5031,
1185
+ 5030,
1186
+ 5034,
1187
+ 2770,
1188
+ 2769,
1189
+ 2763,
1190
+ 2765,
1191
+ 2783,
1192
+ 6005,
1193
+ 6006,
1194
+ 6008,
1195
+ 6005,
1196
+ 7366,
1197
+ 7360,
1198
+ 7361,
1199
+ 4916,
1200
+ 4915,
1201
+ 4914,
1202
+ 4917,
1203
+ 1401,
1204
+ 1422,
1205
+ 1398,
1206
+ 1405,
1207
+ 1404,
1208
+ 1399,
1209
+ 1403,
1210
+ 2565,
1211
+ 4880,
1212
+ 4879,
1213
+ 4878,
1214
+ 5056,
1215
+ 5053,
1216
+ 5059,
1217
+ 5055,
1218
+ 5055,
1219
+ 5055,
1220
+ 1057,
1221
+ 1059,
1222
+ 5954,
1223
+ 5959,
1224
+ 5954,
1225
+ 5956,
1226
+ 5503,
1227
+ 5501,
1228
+ 3995,
1229
+ 3998,
1230
+ 3987,
1231
+ 3990,
1232
+ 6485,
1233
+ 6476,
1234
+ 6476,
1235
+ 6481,
1236
+ 6343,
1237
+ 6340,
1238
+ 6344,
1239
+ 2536,
1240
+ 2937,
1241
+ 2939,
1242
+ 2941,
1243
+ 2938,
1244
+ 2940,
1245
+ 992,
1246
+ 993,
1247
+ 6614,
1248
+ 6615,
1249
+ 6615,
1250
+ 3348,
1251
+ 4479,
1252
+ 4479,
1253
+ 4482,
1254
+ 4481,
1255
+ 4486,
1256
+ 4481,
1257
+ 4482,
1258
+ 2845,
1259
+ 2843,
1260
+ 1390,
1261
+ 1389,
1262
+ 1393,
1263
+ 1398,
1264
+ 8632,
1265
+ 8626,
1266
+ 6202,
1267
+ 6205,
1268
+ 6200,
1269
+ 5081,
1270
+ 5085,
1271
+ 5081,
1272
+ 5082,
1273
+ 8441,
1274
+ 8439,
1275
+ 10067,
1276
+ 10051,
1277
+ 4189,
1278
+ 4179,
1279
+ 4176,
1280
+ 4181,
1281
+ 5106,
1282
+ 5093,
1283
+ 5101,
1284
+ 5093,
1285
+ 2917,
1286
+ 2920,
1287
+ 4627,
1288
+ 4629,
1289
+ 4631,
1290
+ 3333,
1291
+ 3332,
1292
+ 3335,
1293
+ 4089,
1294
+ 4086,
1295
+ 5323,
1296
+ 5330,
1297
+ 5323,
1298
+ 5324,
1299
+ 4734,
1300
+ 4732,
1301
+ 4732,
1302
+ 4700,
1303
+ 4705,
1304
+ 4065,
1305
+ 4068,
1306
+ 6630,
1307
+ 6628,
1308
+ 6624,
1309
+ 6626,
1310
+ 4687,
1311
+ 4681,
1312
+ 4681,
1313
+ 4681,
1314
+ 2247,
1315
+ 2254,
1316
+ 2252,
1317
+ 3083,
1318
+ 3083,
1319
+ 3085,
1320
+ 3083,
1321
+ 3089,
1322
+ 3085,
1323
+ 6165,
1324
+ 6169,
1325
+ 8331,
1326
+ 8336,
1327
+ 8329,
1328
+ 4388,
1329
+ 4389,
1330
+ 4398,
1331
+ 4394,
1332
+ 4387,
1333
+ 5780,
1334
+ 5780,
1335
+ 5784,
1336
+ 8566,
1337
+ 8565,
1338
+ 8569,
1339
+ 8568,
1340
+ 1127,
1341
+ 2902,
1342
+ 2906,
1343
+ 2906,
1344
+ 2901,
1345
+ 9027,
1346
+ 9025,
1347
+ 9029,
1348
+ 2142,
1349
+ 2141,
1350
+ 2142,
1351
+ 1924,
1352
+ 1925,
1353
+ 1924,
1354
+ 5633,
1355
+ 5629,
1356
+ 5632,
1357
+ 5344,
1358
+ 5343,
1359
+ 8741,
1360
+ 8746,
1361
+ 8746,
1362
+ 6040,
1363
+ 6045,
1364
+ 6038,
1365
+ 1303,
1366
+ 1302,
1367
+ 1302,
1368
+ 1301,
1369
+ 4192,
1370
+ 4207,
1371
+ 4198,
1372
+ 4202,
1373
+ 3193,
1374
+ 3195,
1375
+ 3191,
1376
+ 3190,
1377
+ 3191,
1378
+ 3189,
1379
+ 3191,
1380
+ 3191,
1381
+ 3189,
1382
+ 4008,
1383
+ 4009,
1384
+ 4291,
1385
+ 4285,
1386
+ 4291,
1387
+ 4289,
1388
+ 2603,
1389
+ 2606,
1390
+ 2606,
1391
+ 2602,
1392
+ 2604,
1393
+ 4724,
1394
+ 4722,
1395
+ 4723,
1396
+ 4723,
1397
+ 2524,
1398
+ 2529,
1399
+ 5882,
1400
+ 5880,
1401
+ 5883,
1402
+ 5881,
1403
+ 5880,
1404
+ 5879,
1405
+ 3867,
1406
+ 3859,
1407
+ 6677,
1408
+ 6674,
1409
+ 6678,
1410
+ 6677,
1411
+ 6681,
1412
+ 7845,
1413
+ 7842,
1414
+ 5504,
1415
+ 5499,
1416
+ 5654,
1417
+ 5650,
1418
+ 4914,
1419
+ 4916,
1420
+ 3199,
1421
+ 3197,
1422
+ 3200,
1423
+ 3200,
1424
+ 3197,
1425
+ 2928,
1426
+ 2927,
1427
+ 2926,
1428
+ 5112,
1429
+ 5113,
1430
+ 5110,
1431
+ 5115,
1432
+ 6886,
1433
+ 6891,
1434
+ 6885,
1435
+ 6895,
1436
+ 6891,
1437
+ 2794,
1438
+ 2796,
1439
+ 2796,
1440
+ 1483,
1441
+ 1486,
1442
+ 4629,
1443
+ 4628,
1444
+ 4628,
1445
+ 3537,
1446
+ 3531,
1447
+ 6720,
1448
+ 6724,
1449
+ 6718,
1450
+ 6491,
1451
+ 6489,
1452
+ 6487,
1453
+ 2942,
1454
+ 2948,
1455
+ 3826,
1456
+ 3827,
1457
+ 3877,
1458
+ 3878,
1459
+ 15066,
1460
+ 15064,
1461
+ 15064,
1462
+ 9732,
1463
+ 9746,
1464
+ 9735,
1465
+ 5568,
1466
+ 5565,
1467
+ 5568,
1468
+ 5567,
1469
+ 5576,
1470
+ 1437,
1471
+ 1436,
1472
+ 1438,
1473
+ 1438,
1474
+ 1438,
1475
+ 5750,
1476
+ 5747,
1477
+ 5750,
1478
+ 5750,
1479
+ 5748,
1480
+ 3338,
1481
+ 3339,
1482
+ 11847,
1483
+ 11853,
1484
+ 11857,
1485
+ 11847,
1486
+ 2241,
1487
+ 2238,
1488
+ 2246,
1489
+ 3751,
1490
+ 3758,
1491
+ 3757,
1492
+ 2891,
1493
+ 2891,
1494
+ 2890,
1495
+ 16384,
1496
+ 9942,
1497
+ 1548,
1498
+ 4793,
1499
+ 4793,
1500
+ 4789,
1501
+ 4790,
1502
+ 1767,
1503
+ 1767,
1504
+ 1770,
1505
+ 3794,
1506
+ 3795,
1507
+ 13007,
1508
+ 13014,
1509
+ 13005,
1510
+ 13009,
1511
+ 3176,
1512
+ 3179,
1513
+ 3185,
1514
+ 3178,
1515
+ 3178,
1516
+ 3178,
1517
+ 5356,
1518
+ 5355,
1519
+ 5355,
1520
+ 6301,
1521
+ 6306,
1522
+ 4021,
1523
+ 4023,
1524
+ 4023,
1525
+ 4029,
1526
+ 4024,
1527
+ 4567,
1528
+ 4566,
1529
+ 8079,
1530
+ 8077,
1531
+ 8078,
1532
+ 8080,
1533
+ 2661,
1534
+ 2660,
1535
+ 1454,
1536
+ 7845,
1537
+ 7828,
1538
+ 8313,
1539
+ 8317,
1540
+ 8312,
1541
+ 8317,
1542
+ 5162,
1543
+ 5165,
1544
+ 5159,
1545
+ 5161,
1546
+ 7341,
1547
+ 7343,
1548
+ 7338,
1549
+ 7344,
1550
+ 5199,
1551
+ 5196,
1552
+ 5198,
1553
+ 5196,
1554
+ 4082,
1555
+ 4085,
1556
+ 4081,
1557
+ 4081,
1558
+ 3258,
1559
+ 3263,
1560
+ 3257,
1561
+ 7372,
1562
+ 4943,
1563
+ 4938,
1564
+ 4946,
1565
+ 4944,
1566
+ 2477,
1567
+ 2479,
1568
+ 9025,
1569
+ 9025,
1570
+ 9024,
1571
+ 5528,
1572
+ 5529,
1573
+ 5530,
1574
+ 7757,
1575
+ 7737,
1576
+ 7739,
1577
+ 7734,
1578
+ 7736
1579
+ ],
1580
+ "xaxis": "x",
1581
+ "yaxis": "y"
1582
+ }
1583
+ ],
1584
+ "layout": {
1585
+ "barmode": "relative",
1586
+ "legend": {
1587
+ "title": {
1588
+ "text": "variable"
1589
+ },
1590
+ "tracegroupgap": 0
1591
+ },
1592
+ "template": {
1593
+ "data": {
1594
+ "bar": [
1595
+ {
1596
+ "error_x": {
1597
+ "color": "#2a3f5f"
1598
+ },
1599
+ "error_y": {
1600
+ "color": "#2a3f5f"
1601
+ },
1602
+ "marker": {
1603
+ "line": {
1604
+ "color": "#E5ECF6",
1605
+ "width": 0.5
1606
+ },
1607
+ "pattern": {
1608
+ "fillmode": "overlay",
1609
+ "size": 10,
1610
+ "solidity": 0.2
1611
+ }
1612
+ },
1613
+ "type": "bar"
1614
+ }
1615
+ ],
1616
+ "barpolar": [
1617
+ {
1618
+ "marker": {
1619
+ "line": {
1620
+ "color": "#E5ECF6",
1621
+ "width": 0.5
1622
+ },
1623
+ "pattern": {
1624
+ "fillmode": "overlay",
1625
+ "size": 10,
1626
+ "solidity": 0.2
1627
+ }
1628
+ },
1629
+ "type": "barpolar"
1630
+ }
1631
+ ],
1632
+ "carpet": [
1633
+ {
1634
+ "aaxis": {
1635
+ "endlinecolor": "#2a3f5f",
1636
+ "gridcolor": "white",
1637
+ "linecolor": "white",
1638
+ "minorgridcolor": "white",
1639
+ "startlinecolor": "#2a3f5f"
1640
+ },
1641
+ "baxis": {
1642
+ "endlinecolor": "#2a3f5f",
1643
+ "gridcolor": "white",
1644
+ "linecolor": "white",
1645
+ "minorgridcolor": "white",
1646
+ "startlinecolor": "#2a3f5f"
1647
+ },
1648
+ "type": "carpet"
1649
+ }
1650
+ ],
1651
+ "choropleth": [
1652
+ {
1653
+ "colorbar": {
1654
+ "outlinewidth": 0,
1655
+ "ticks": ""
1656
+ },
1657
+ "type": "choropleth"
1658
+ }
1659
+ ],
1660
+ "contour": [
1661
+ {
1662
+ "colorbar": {
1663
+ "outlinewidth": 0,
1664
+ "ticks": ""
1665
+ },
1666
+ "colorscale": [
1667
+ [
1668
+ 0,
1669
+ "#0d0887"
1670
+ ],
1671
+ [
1672
+ 0.1111111111111111,
1673
+ "#46039f"
1674
+ ],
1675
+ [
1676
+ 0.2222222222222222,
1677
+ "#7201a8"
1678
+ ],
1679
+ [
1680
+ 0.3333333333333333,
1681
+ "#9c179e"
1682
+ ],
1683
+ [
1684
+ 0.4444444444444444,
1685
+ "#bd3786"
1686
+ ],
1687
+ [
1688
+ 0.5555555555555556,
1689
+ "#d8576b"
1690
+ ],
1691
+ [
1692
+ 0.6666666666666666,
1693
+ "#ed7953"
1694
+ ],
1695
+ [
1696
+ 0.7777777777777778,
1697
+ "#fb9f3a"
1698
+ ],
1699
+ [
1700
+ 0.8888888888888888,
1701
+ "#fdca26"
1702
+ ],
1703
+ [
1704
+ 1,
1705
+ "#f0f921"
1706
+ ]
1707
+ ],
1708
+ "type": "contour"
1709
+ }
1710
+ ],
1711
+ "contourcarpet": [
1712
+ {
1713
+ "colorbar": {
1714
+ "outlinewidth": 0,
1715
+ "ticks": ""
1716
+ },
1717
+ "type": "contourcarpet"
1718
+ }
1719
+ ],
1720
+ "heatmap": [
1721
+ {
1722
+ "colorbar": {
1723
+ "outlinewidth": 0,
1724
+ "ticks": ""
1725
+ },
1726
+ "colorscale": [
1727
+ [
1728
+ 0,
1729
+ "#0d0887"
1730
+ ],
1731
+ [
1732
+ 0.1111111111111111,
1733
+ "#46039f"
1734
+ ],
1735
+ [
1736
+ 0.2222222222222222,
1737
+ "#7201a8"
1738
+ ],
1739
+ [
1740
+ 0.3333333333333333,
1741
+ "#9c179e"
1742
+ ],
1743
+ [
1744
+ 0.4444444444444444,
1745
+ "#bd3786"
1746
+ ],
1747
+ [
1748
+ 0.5555555555555556,
1749
+ "#d8576b"
1750
+ ],
1751
+ [
1752
+ 0.6666666666666666,
1753
+ "#ed7953"
1754
+ ],
1755
+ [
1756
+ 0.7777777777777778,
1757
+ "#fb9f3a"
1758
+ ],
1759
+ [
1760
+ 0.8888888888888888,
1761
+ "#fdca26"
1762
+ ],
1763
+ [
1764
+ 1,
1765
+ "#f0f921"
1766
+ ]
1767
+ ],
1768
+ "type": "heatmap"
1769
+ }
1770
+ ],
1771
+ "heatmapgl": [
1772
+ {
1773
+ "colorbar": {
1774
+ "outlinewidth": 0,
1775
+ "ticks": ""
1776
+ },
1777
+ "colorscale": [
1778
+ [
1779
+ 0,
1780
+ "#0d0887"
1781
+ ],
1782
+ [
1783
+ 0.1111111111111111,
1784
+ "#46039f"
1785
+ ],
1786
+ [
1787
+ 0.2222222222222222,
1788
+ "#7201a8"
1789
+ ],
1790
+ [
1791
+ 0.3333333333333333,
1792
+ "#9c179e"
1793
+ ],
1794
+ [
1795
+ 0.4444444444444444,
1796
+ "#bd3786"
1797
+ ],
1798
+ [
1799
+ 0.5555555555555556,
1800
+ "#d8576b"
1801
+ ],
1802
+ [
1803
+ 0.6666666666666666,
1804
+ "#ed7953"
1805
+ ],
1806
+ [
1807
+ 0.7777777777777778,
1808
+ "#fb9f3a"
1809
+ ],
1810
+ [
1811
+ 0.8888888888888888,
1812
+ "#fdca26"
1813
+ ],
1814
+ [
1815
+ 1,
1816
+ "#f0f921"
1817
+ ]
1818
+ ],
1819
+ "type": "heatmapgl"
1820
+ }
1821
+ ],
1822
+ "histogram": [
1823
+ {
1824
+ "marker": {
1825
+ "pattern": {
1826
+ "fillmode": "overlay",
1827
+ "size": 10,
1828
+ "solidity": 0.2
1829
+ }
1830
+ },
1831
+ "type": "histogram"
1832
+ }
1833
+ ],
1834
+ "histogram2d": [
1835
+ {
1836
+ "colorbar": {
1837
+ "outlinewidth": 0,
1838
+ "ticks": ""
1839
+ },
1840
+ "colorscale": [
1841
+ [
1842
+ 0,
1843
+ "#0d0887"
1844
+ ],
1845
+ [
1846
+ 0.1111111111111111,
1847
+ "#46039f"
1848
+ ],
1849
+ [
1850
+ 0.2222222222222222,
1851
+ "#7201a8"
1852
+ ],
1853
+ [
1854
+ 0.3333333333333333,
1855
+ "#9c179e"
1856
+ ],
1857
+ [
1858
+ 0.4444444444444444,
1859
+ "#bd3786"
1860
+ ],
1861
+ [
1862
+ 0.5555555555555556,
1863
+ "#d8576b"
1864
+ ],
1865
+ [
1866
+ 0.6666666666666666,
1867
+ "#ed7953"
1868
+ ],
1869
+ [
1870
+ 0.7777777777777778,
1871
+ "#fb9f3a"
1872
+ ],
1873
+ [
1874
+ 0.8888888888888888,
1875
+ "#fdca26"
1876
+ ],
1877
+ [
1878
+ 1,
1879
+ "#f0f921"
1880
+ ]
1881
+ ],
1882
+ "type": "histogram2d"
1883
+ }
1884
+ ],
1885
+ "histogram2dcontour": [
1886
+ {
1887
+ "colorbar": {
1888
+ "outlinewidth": 0,
1889
+ "ticks": ""
1890
+ },
1891
+ "colorscale": [
1892
+ [
1893
+ 0,
1894
+ "#0d0887"
1895
+ ],
1896
+ [
1897
+ 0.1111111111111111,
1898
+ "#46039f"
1899
+ ],
1900
+ [
1901
+ 0.2222222222222222,
1902
+ "#7201a8"
1903
+ ],
1904
+ [
1905
+ 0.3333333333333333,
1906
+ "#9c179e"
1907
+ ],
1908
+ [
1909
+ 0.4444444444444444,
1910
+ "#bd3786"
1911
+ ],
1912
+ [
1913
+ 0.5555555555555556,
1914
+ "#d8576b"
1915
+ ],
1916
+ [
1917
+ 0.6666666666666666,
1918
+ "#ed7953"
1919
+ ],
1920
+ [
1921
+ 0.7777777777777778,
1922
+ "#fb9f3a"
1923
+ ],
1924
+ [
1925
+ 0.8888888888888888,
1926
+ "#fdca26"
1927
+ ],
1928
+ [
1929
+ 1,
1930
+ "#f0f921"
1931
+ ]
1932
+ ],
1933
+ "type": "histogram2dcontour"
1934
+ }
1935
+ ],
1936
+ "mesh3d": [
1937
+ {
1938
+ "colorbar": {
1939
+ "outlinewidth": 0,
1940
+ "ticks": ""
1941
+ },
1942
+ "type": "mesh3d"
1943
+ }
1944
+ ],
1945
+ "parcoords": [
1946
+ {
1947
+ "line": {
1948
+ "colorbar": {
1949
+ "outlinewidth": 0,
1950
+ "ticks": ""
1951
+ }
1952
+ },
1953
+ "type": "parcoords"
1954
+ }
1955
+ ],
1956
+ "pie": [
1957
+ {
1958
+ "automargin": true,
1959
+ "type": "pie"
1960
+ }
1961
+ ],
1962
+ "scatter": [
1963
+ {
1964
+ "fillpattern": {
1965
+ "fillmode": "overlay",
1966
+ "size": 10,
1967
+ "solidity": 0.2
1968
+ },
1969
+ "type": "scatter"
1970
+ }
1971
+ ],
1972
+ "scatter3d": [
1973
+ {
1974
+ "line": {
1975
+ "colorbar": {
1976
+ "outlinewidth": 0,
1977
+ "ticks": ""
1978
+ }
1979
+ },
1980
+ "marker": {
1981
+ "colorbar": {
1982
+ "outlinewidth": 0,
1983
+ "ticks": ""
1984
+ }
1985
+ },
1986
+ "type": "scatter3d"
1987
+ }
1988
+ ],
1989
+ "scattercarpet": [
1990
+ {
1991
+ "marker": {
1992
+ "colorbar": {
1993
+ "outlinewidth": 0,
1994
+ "ticks": ""
1995
+ }
1996
+ },
1997
+ "type": "scattercarpet"
1998
+ }
1999
+ ],
2000
+ "scattergeo": [
2001
+ {
2002
+ "marker": {
2003
+ "colorbar": {
2004
+ "outlinewidth": 0,
2005
+ "ticks": ""
2006
+ }
2007
+ },
2008
+ "type": "scattergeo"
2009
+ }
2010
+ ],
2011
+ "scattergl": [
2012
+ {
2013
+ "marker": {
2014
+ "colorbar": {
2015
+ "outlinewidth": 0,
2016
+ "ticks": ""
2017
+ }
2018
+ },
2019
+ "type": "scattergl"
2020
+ }
2021
+ ],
2022
+ "scattermapbox": [
2023
+ {
2024
+ "marker": {
2025
+ "colorbar": {
2026
+ "outlinewidth": 0,
2027
+ "ticks": ""
2028
+ }
2029
+ },
2030
+ "type": "scattermapbox"
2031
+ }
2032
+ ],
2033
+ "scatterpolar": [
2034
+ {
2035
+ "marker": {
2036
+ "colorbar": {
2037
+ "outlinewidth": 0,
2038
+ "ticks": ""
2039
+ }
2040
+ },
2041
+ "type": "scatterpolar"
2042
+ }
2043
+ ],
2044
+ "scatterpolargl": [
2045
+ {
2046
+ "marker": {
2047
+ "colorbar": {
2048
+ "outlinewidth": 0,
2049
+ "ticks": ""
2050
+ }
2051
+ },
2052
+ "type": "scatterpolargl"
2053
+ }
2054
+ ],
2055
+ "scatterternary": [
2056
+ {
2057
+ "marker": {
2058
+ "colorbar": {
2059
+ "outlinewidth": 0,
2060
+ "ticks": ""
2061
+ }
2062
+ },
2063
+ "type": "scatterternary"
2064
+ }
2065
+ ],
2066
+ "surface": [
2067
+ {
2068
+ "colorbar": {
2069
+ "outlinewidth": 0,
2070
+ "ticks": ""
2071
+ },
2072
+ "colorscale": [
2073
+ [
2074
+ 0,
2075
+ "#0d0887"
2076
+ ],
2077
+ [
2078
+ 0.1111111111111111,
2079
+ "#46039f"
2080
+ ],
2081
+ [
2082
+ 0.2222222222222222,
2083
+ "#7201a8"
2084
+ ],
2085
+ [
2086
+ 0.3333333333333333,
2087
+ "#9c179e"
2088
+ ],
2089
+ [
2090
+ 0.4444444444444444,
2091
+ "#bd3786"
2092
+ ],
2093
+ [
2094
+ 0.5555555555555556,
2095
+ "#d8576b"
2096
+ ],
2097
+ [
2098
+ 0.6666666666666666,
2099
+ "#ed7953"
2100
+ ],
2101
+ [
2102
+ 0.7777777777777778,
2103
+ "#fb9f3a"
2104
+ ],
2105
+ [
2106
+ 0.8888888888888888,
2107
+ "#fdca26"
2108
+ ],
2109
+ [
2110
+ 1,
2111
+ "#f0f921"
2112
+ ]
2113
+ ],
2114
+ "type": "surface"
2115
+ }
2116
+ ],
2117
+ "table": [
2118
+ {
2119
+ "cells": {
2120
+ "fill": {
2121
+ "color": "#EBF0F8"
2122
+ },
2123
+ "line": {
2124
+ "color": "white"
2125
+ }
2126
+ },
2127
+ "header": {
2128
+ "fill": {
2129
+ "color": "#C8D4E3"
2130
+ },
2131
+ "line": {
2132
+ "color": "white"
2133
+ }
2134
+ },
2135
+ "type": "table"
2136
+ }
2137
+ ]
2138
+ },
2139
+ "layout": {
2140
+ "annotationdefaults": {
2141
+ "arrowcolor": "#2a3f5f",
2142
+ "arrowhead": 0,
2143
+ "arrowwidth": 1
2144
+ },
2145
+ "autotypenumbers": "strict",
2146
+ "coloraxis": {
2147
+ "colorbar": {
2148
+ "outlinewidth": 0,
2149
+ "ticks": ""
2150
+ }
2151
+ },
2152
+ "colorscale": {
2153
+ "diverging": [
2154
+ [
2155
+ 0,
2156
+ "#8e0152"
2157
+ ],
2158
+ [
2159
+ 0.1,
2160
+ "#c51b7d"
2161
+ ],
2162
+ [
2163
+ 0.2,
2164
+ "#de77ae"
2165
+ ],
2166
+ [
2167
+ 0.3,
2168
+ "#f1b6da"
2169
+ ],
2170
+ [
2171
+ 0.4,
2172
+ "#fde0ef"
2173
+ ],
2174
+ [
2175
+ 0.5,
2176
+ "#f7f7f7"
2177
+ ],
2178
+ [
2179
+ 0.6,
2180
+ "#e6f5d0"
2181
+ ],
2182
+ [
2183
+ 0.7,
2184
+ "#b8e186"
2185
+ ],
2186
+ [
2187
+ 0.8,
2188
+ "#7fbc41"
2189
+ ],
2190
+ [
2191
+ 0.9,
2192
+ "#4d9221"
2193
+ ],
2194
+ [
2195
+ 1,
2196
+ "#276419"
2197
+ ]
2198
+ ],
2199
+ "sequential": [
2200
+ [
2201
+ 0,
2202
+ "#0d0887"
2203
+ ],
2204
+ [
2205
+ 0.1111111111111111,
2206
+ "#46039f"
2207
+ ],
2208
+ [
2209
+ 0.2222222222222222,
2210
+ "#7201a8"
2211
+ ],
2212
+ [
2213
+ 0.3333333333333333,
2214
+ "#9c179e"
2215
+ ],
2216
+ [
2217
+ 0.4444444444444444,
2218
+ "#bd3786"
2219
+ ],
2220
+ [
2221
+ 0.5555555555555556,
2222
+ "#d8576b"
2223
+ ],
2224
+ [
2225
+ 0.6666666666666666,
2226
+ "#ed7953"
2227
+ ],
2228
+ [
2229
+ 0.7777777777777778,
2230
+ "#fb9f3a"
2231
+ ],
2232
+ [
2233
+ 0.8888888888888888,
2234
+ "#fdca26"
2235
+ ],
2236
+ [
2237
+ 1,
2238
+ "#f0f921"
2239
+ ]
2240
+ ],
2241
+ "sequentialminus": [
2242
+ [
2243
+ 0,
2244
+ "#0d0887"
2245
+ ],
2246
+ [
2247
+ 0.1111111111111111,
2248
+ "#46039f"
2249
+ ],
2250
+ [
2251
+ 0.2222222222222222,
2252
+ "#7201a8"
2253
+ ],
2254
+ [
2255
+ 0.3333333333333333,
2256
+ "#9c179e"
2257
+ ],
2258
+ [
2259
+ 0.4444444444444444,
2260
+ "#bd3786"
2261
+ ],
2262
+ [
2263
+ 0.5555555555555556,
2264
+ "#d8576b"
2265
+ ],
2266
+ [
2267
+ 0.6666666666666666,
2268
+ "#ed7953"
2269
+ ],
2270
+ [
2271
+ 0.7777777777777778,
2272
+ "#fb9f3a"
2273
+ ],
2274
+ [
2275
+ 0.8888888888888888,
2276
+ "#fdca26"
2277
+ ],
2278
+ [
2279
+ 1,
2280
+ "#f0f921"
2281
+ ]
2282
+ ]
2283
+ },
2284
+ "colorway": [
2285
+ "#636efa",
2286
+ "#EF553B",
2287
+ "#00cc96",
2288
+ "#ab63fa",
2289
+ "#FFA15A",
2290
+ "#19d3f3",
2291
+ "#FF6692",
2292
+ "#B6E880",
2293
+ "#FF97FF",
2294
+ "#FECB52"
2295
+ ],
2296
+ "font": {
2297
+ "color": "#2a3f5f"
2298
+ },
2299
+ "geo": {
2300
+ "bgcolor": "white",
2301
+ "lakecolor": "white",
2302
+ "landcolor": "#E5ECF6",
2303
+ "showlakes": true,
2304
+ "showland": true,
2305
+ "subunitcolor": "white"
2306
+ },
2307
+ "hoverlabel": {
2308
+ "align": "left"
2309
+ },
2310
+ "hovermode": "closest",
2311
+ "mapbox": {
2312
+ "style": "light"
2313
+ },
2314
+ "paper_bgcolor": "white",
2315
+ "plot_bgcolor": "#E5ECF6",
2316
+ "polar": {
2317
+ "angularaxis": {
2318
+ "gridcolor": "white",
2319
+ "linecolor": "white",
2320
+ "ticks": ""
2321
+ },
2322
+ "bgcolor": "#E5ECF6",
2323
+ "radialaxis": {
2324
+ "gridcolor": "white",
2325
+ "linecolor": "white",
2326
+ "ticks": ""
2327
+ }
2328
+ },
2329
+ "scene": {
2330
+ "xaxis": {
2331
+ "backgroundcolor": "#E5ECF6",
2332
+ "gridcolor": "white",
2333
+ "gridwidth": 2,
2334
+ "linecolor": "white",
2335
+ "showbackground": true,
2336
+ "ticks": "",
2337
+ "zerolinecolor": "white"
2338
+ },
2339
+ "yaxis": {
2340
+ "backgroundcolor": "#E5ECF6",
2341
+ "gridcolor": "white",
2342
+ "gridwidth": 2,
2343
+ "linecolor": "white",
2344
+ "showbackground": true,
2345
+ "ticks": "",
2346
+ "zerolinecolor": "white"
2347
+ },
2348
+ "zaxis": {
2349
+ "backgroundcolor": "#E5ECF6",
2350
+ "gridcolor": "white",
2351
+ "gridwidth": 2,
2352
+ "linecolor": "white",
2353
+ "showbackground": true,
2354
+ "ticks": "",
2355
+ "zerolinecolor": "white"
2356
+ }
2357
+ },
2358
+ "shapedefaults": {
2359
+ "line": {
2360
+ "color": "#2a3f5f"
2361
+ }
2362
+ },
2363
+ "ternary": {
2364
+ "aaxis": {
2365
+ "gridcolor": "white",
2366
+ "linecolor": "white",
2367
+ "ticks": ""
2368
+ },
2369
+ "baxis": {
2370
+ "gridcolor": "white",
2371
+ "linecolor": "white",
2372
+ "ticks": ""
2373
+ },
2374
+ "bgcolor": "#E5ECF6",
2375
+ "caxis": {
2376
+ "gridcolor": "white",
2377
+ "linecolor": "white",
2378
+ "ticks": ""
2379
+ }
2380
+ },
2381
+ "title": {
2382
+ "x": 0.05
2383
+ },
2384
+ "xaxis": {
2385
+ "automargin": true,
2386
+ "gridcolor": "white",
2387
+ "linecolor": "white",
2388
+ "ticks": "",
2389
+ "title": {
2390
+ "standoff": 15
2391
+ },
2392
+ "zerolinecolor": "white",
2393
+ "zerolinewidth": 2
2394
+ },
2395
+ "yaxis": {
2396
+ "automargin": true,
2397
+ "gridcolor": "white",
2398
+ "linecolor": "white",
2399
+ "ticks": "",
2400
+ "title": {
2401
+ "standoff": 15
2402
+ },
2403
+ "zerolinecolor": "white",
2404
+ "zerolinewidth": 2
2405
+ }
2406
+ }
2407
+ },
2408
+ "title": {
2409
+ "text": "Histogram of question + context token lengths (total=1451)"
2410
+ },
2411
+ "xaxis": {
2412
+ "anchor": "y",
2413
+ "domain": [
2414
+ 0,
2415
+ 1
2416
+ ],
2417
+ "title": {
2418
+ "text": "value"
2419
+ }
2420
+ },
2421
+ "yaxis": {
2422
+ "anchor": "x",
2423
+ "domain": [
2424
+ 0,
2425
+ 1
2426
+ ],
2427
+ "title": {
2428
+ "text": "count"
2429
+ }
2430
+ }
2431
+ }
2432
+ }
2433
+ },
2434
+ "metadata": {},
2435
+ "output_type": "display_data"
2436
+ }
2437
+ ],
2438
+ "source": [
2439
+ "# plot histogram of full_text lengths\n",
2440
+ "question_token_lengths = []\n",
2441
+ "for inst in instances:\n",
2442
+ " question_token_lengths.append(len(inst['question_with_context']))\n",
2443
+ " \n",
2444
+ "px.histogram(question_token_lengths, \n",
2445
+ " title=f'Histogram of question + context token lengths (total={len(question_token_lengths)})')"
2446
+ ]
2447
+ },
2448
+ {
2449
+ "cell_type": "markdown",
2450
+ "metadata": {},
2451
+ "source": [
2452
+ "## Save instances to JSON"
2453
+ ]
2454
+ },
2455
+ {
2456
+ "cell_type": "code",
2457
+ "execution_count": 7,
2458
+ "metadata": {},
2459
+ "outputs": [],
2460
+ "source": [
2461
+ "# recursively check the type of each field in the dataset\n",
2462
+ "def check_type(data, level=0):\n",
2463
+ " tabs = '\\t' * level\n",
2464
+ " if isinstance(data, dict):\n",
2465
+ " for key, value in data.items():\n",
2466
+ " print(f'{tabs}{key}: {type(value)}')\n",
2467
+ " check_type(value, level + 1)\n",
2468
+ " elif isinstance(data, list):\n",
2469
+ " for i in data:\n",
2470
+ " check_type(i, level + 1)\n",
2471
+ " else:\n",
2472
+ " pass\n",
2473
+ " # print(f'{tabs}{type(data)}')\n",
2474
+ " # if isinstance(data, BatchEncoding):\n",
2475
+ " # print(f'{tabs}{data[:100]}')"
2476
+ ]
2477
+ },
2478
+ {
2479
+ "cell_type": "code",
2480
+ "execution_count": 8,
2481
+ "metadata": {},
2482
+ "outputs": [
2483
+ {
2484
+ "name": "stdout",
2485
+ "output_type": "stream",
2486
+ "text": [
2487
+ "question_with_context: <class 'list'>\n",
2488
+ "paragraph_indices: <class 'list'>\n",
2489
+ "evidence: <class 'list'>\n",
2490
+ "answer: <class 'list'>\n",
2491
+ "metadata: <class 'dict'>\n",
2492
+ "\tquestion: <class 'str'>\n",
2493
+ "\tquestion_tokens: <class 'list'>\n",
2494
+ "\tparagraphs: <class 'list'>\n",
2495
+ "\tcontext_tokens: <class 'list'>\n",
2496
+ "\tquestion_id: <class 'str'>\n",
2497
+ "\tarticle_id: <class 'str'>\n",
2498
+ "\tall_answers: <class 'list'>\n",
2499
+ "\t\t\ttext: <class 'str'>\n",
2500
+ "\t\t\ttype: <class 'str'>\n",
2501
+ "\t\t\ttext: <class 'str'>\n",
2502
+ "\t\t\ttype: <class 'str'>\n",
2503
+ "\t\t\ttext: <class 'str'>\n",
2504
+ "\t\t\ttype: <class 'str'>\n",
2505
+ "\t\t\ttext: <class 'str'>\n",
2506
+ "\t\t\ttype: <class 'str'>\n",
2507
+ "\t\t\ttext: <class 'str'>\n",
2508
+ "\t\t\ttype: <class 'str'>\n",
2509
+ "\t\t\ttext: <class 'str'>\n",
2510
+ "\t\t\ttype: <class 'str'>\n",
2511
+ "\tall_evidence: <class 'list'>\n",
2512
+ "\tall_evidence_masks: <class 'list'>\n"
2513
+ ]
2514
+ }
2515
+ ],
2516
+ "source": [
2517
+ "check_type(instances[0])"
2518
+ ]
2519
+ },
2520
+ {
2521
+ "cell_type": "code",
2522
+ "execution_count": 9,
2523
+ "metadata": {},
2524
+ "outputs": [],
2525
+ "source": [
2526
+ "# save instances to file\n",
2527
+ "with open(os.path.join(output_path, f'{split}_instances.json'), 'w') as f:\n",
2528
+ " # pretty print json\n",
2529
+ " json.dump(instances, f, indent=4)"
2530
+ ]
2531
+ },
2532
+ {
2533
+ "cell_type": "code",
2534
+ "execution_count": null,
2535
+ "metadata": {},
2536
+ "outputs": [],
2537
+ "source": []
2538
+ }
2539
+ ],
2540
+ "metadata": {
2541
+ "kernelspec": {
2542
+ "display_name": "arxiv-agent",
2543
+ "language": "python",
2544
+ "name": "python3"
2545
+ },
2546
+ "language_info": {
2547
+ "codemirror_mode": {
2548
+ "name": "ipython",
2549
+ "version": 3
2550
+ },
2551
+ "file_extension": ".py",
2552
+ "mimetype": "text/x-python",
2553
+ "name": "python",
2554
+ "nbconvert_exporter": "python",
2555
+ "pygments_lexer": "ipython3",
2556
+ "version": "3.12.0"
2557
+ }
2558
+ },
2559
+ "nbformat": 4,
2560
+ "nbformat_minor": 2
2561
+ }