|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:29:40.320935Z" |
|
}, |
|
"title": "From Dataset Recycling to Multi-Property Extraction and Beyond", |
|
"authors": [ |
|
{ |
|
"first": "Tomasz", |
|
"middle": [], |
|
"last": "Dwojak", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Adam Mickiewicz University", |
|
"location": { |
|
"settlement": "Poznan" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Micha\u0142", |
|
"middle": [], |
|
"last": "Pietruszka", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Jagiellonian University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Borchmann", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Poznan University of Technology", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jakub", |
|
"middle": [], |
|
"last": "Ch\u0142\u0119dowski", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Jagiellonian University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Filip", |
|
"middle": [], |
|
"last": "Grali\u0144ski", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Adam Mickiewicz University", |
|
"location": { |
|
"settlement": "Poznan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Applica", |
|
"middle": [], |
|
"last": "Ai", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper investigates various Transformer architectures on the WikiReading Information Extraction and Machine Reading Comprehension dataset. The proposed dual-source model outperforms the current state-of-theart by a large margin. Next, we introduce WikiReading Recycled-a newly developed public dataset, and the task of multipleproperty extraction. It uses the same data as WikiReading but does not inherit its predecessor's identified disadvantages. In addition, we provide a human-annotated test set with diagnostic subsets for a detailed analysis of model performance.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper investigates various Transformer architectures on the WikiReading Information Extraction and Machine Reading Comprehension dataset. The proposed dual-source model outperforms the current state-of-theart by a large margin. Next, we introduce WikiReading Recycled-a newly developed public dataset, and the task of multipleproperty extraction. It uses the same data as WikiReading but does not inherit its predecessor's identified disadvantages. In addition, we provide a human-annotated test set with diagnostic subsets for a detailed analysis of model performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The emergence of attention-based models has revolutionized Natural Language Processing (Young et al., 2018) . Pretraining these models on large corpora like BookCorpus (Zhu et al., 2015) has been shown to yield a reliable and robust base for downstream tasks. These include Natural Language Inference (Bowman et al., 2015) , Question Answering (Rajpurkar et al., 2016) , Named Entity Recognition (Yadav and Bethard, 2018; Goyal et al., 2018; , and Property Extraction (Hewlett et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 107, |
|
"text": "(Young et al., 2018)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 168, |
|
"end": 186, |
|
"text": "(Zhu et al., 2015)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 301, |
|
"end": 322, |
|
"text": "(Bowman et al., 2015)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 344, |
|
"end": 368, |
|
"text": "(Rajpurkar et al., 2016)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 396, |
|
"end": 421, |
|
"text": "(Yadav and Bethard, 2018;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 422, |
|
"end": 441, |
|
"text": "Goyal et al., 2018;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 468, |
|
"end": 490, |
|
"text": "(Hewlett et al., 2016)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The creation of large supervised datasets often comes with trade-offs, such as one between the quality and quantity of data. For instance, the WikiReading dataset (Hewlett et al., 2016) has been created in such a way that WikiData annotations were treated as the expected answers for related Wikipedia articles. However, the above datasets were created separately, and the information content of both sources overlaps only partially. Hence, the resulting dataset may contain noise.", |
|
"cite_spans": [ |
|
{ |
|
"start": 163, |
|
"end": 185, |
|
"text": "(Hewlett et al., 2016)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The best models can achieve results better than the human baseline across many NLP datasets such as MSCQAs (Wang et al., 2018) , STS-B, QNLI (Raffel et al., 2020) , CoLA or MRPC . However, as a consequence of different kinds of noise in the data, they rarely maximize the score metric (Stanislawek et al., 2019) . While current work in NLP is focused on preparing new datasets, we regard recycling the current ones as equally important as creating a new one. Thus, after outperforming previous state-of-the-art on WikiReading, we investigated the dataset's weaknesses and created an entirely new, more challenging Multi-Property Extraction task with improved data splits and a reliable, human-annotated test set.", |
|
"cite_spans": [ |
|
{ |
|
"start": 107, |
|
"end": 126, |
|
"text": "(Wang et al., 2018)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 141, |
|
"end": 162, |
|
"text": "(Raffel et al., 2020)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 285, |
|
"end": 311, |
|
"text": "(Stanislawek et al., 2019)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Contribution. The specific contributions of this work are the following. We analyzed the WikiReading dataset and pointed out its weaknesses. We introduced a Multi-Property Extraction task by creating a new dataset: WikiReading Recycled. Our dataset contains a human-annotated test set, with multiple subsets aimed to benchmark qualities such as generalization on unseen properties. We introduced a Mean-Multi-Property-F 1 score suited for the new Multi-Property Extraction task. We evaluated previously used architectures on both datasets. Furthermore, we showed that pretrained transformer models (Dual-Source RoBERTa and T5) beat all other baselines. The new dataset and all the models mentioned in the present paper were made publicly available on GitHub. 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Early work in relation extraction revolves around problems crafted using distant supervision methods, which are semi-supervised methods that automatically label pools of unlabeled data (Craven and Kumlien, 1999) . In contrast, many QA datasets were created through crowd-sourcing, where annotators were asked to formulate questions with Dataset Task Input Output SNLI Natural Language Inference two sentences relation between the sentences SQUAD Question Answering article, question answer to the question WiNER Named Entity Recognition article annotated named entities WR Property Extraction article, property value of the property WRR (ours) Multi-Property Extraction article, properties values of the properties Table 1 : Comparison of NLP tasks on text comprehension and information extraction. More differences between WR and WRR were outlined in Table 3. answers that require knowledge retrieval and information synthesis. One of the most popular QA datasets is Wikipedia-based SQUAD, where an instance consists of a human-formulated question, and an encyclopedic reading passage used to base the answer on (Rajpurkar et al., 2018) . Another crowd-sourced dataset that profoundly influenced Natural Language Inference research is SNLI (Bowman et al., 2015)-a three-way semantics-based classification of a relation between two different sentences. Both SQUAD and SNLI are large-scale Machine Reading Comprehension (MRC) tasks, but they cannot be treated as Property Extraction as defined in Section 3; hence they are not considered in this paper. Similarly, some MRC problems framed in TREC tracks, such as Conversational Assistance or Question Answering, are beyond the scope of this paper (Dalton et al., 2020; Dang et al., 2007) . Hewlett et al. (2016) proposed the WikiReading dataset that consists of a Wikipedia article and related WikiData statement. No additional annotation work was performed, yet the resulting dataset was of presumably high reliability. Nevertheless, we consider an additional human annotation to be desired (Section 4.3). Alongside the dataset, a property extraction task was introduced. The idea behind it is to read an article given a property name and to infer the associated value from the article. The property extraction paradigm is described in detail in Section 3, whereas a brief comparison to related datasets is presented in Table 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 185, |
|
"end": 211, |
|
"text": "(Craven and Kumlien, 1999)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 1135, |
|
"end": 1159, |
|
"text": "(Rajpurkar et al., 2018)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1718, |
|
"end": 1739, |
|
"text": "(Dalton et al., 2020;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 1740, |
|
"end": 1758, |
|
"text": "Dang et al., 2007)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 1761, |
|
"end": 1782, |
|
"text": "Hewlett et al. (2016)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 345, |
|
"end": 744, |
|
"text": "Task Input Output SNLI Natural Language Inference two sentences relation between the sentences SQUAD Question Answering article, question answer to the question WiNER Named Entity Recognition article annotated named entities WR Property Extraction article, property value of the property WRR (ours) Multi-Property Extraction article, properties values of the properties Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 874, |
|
"end": 882, |
|
"text": "Table 3.", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 2392, |
|
"end": 2399, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Initially, the best-performing model used placeholders to allow rewriting out-of-vocabulary words to the output. Next, Choi et al. (2017) presented a reinforcement learning approach that improved results on a challenging subset of the 10% longest articles. This framework was extended by Wang and Jin (2019) with a self-correcting action that removes the inaccurate answer from the answer generation module and continues to read. hold the state-of-the-art on WikiReading with their proposition of SWEAR that attends over a sliding window's representations to reduce documents to one vector from which another GRU network generates the answer (Chung et al., 2014) . Additionally, they evaluated a strong semi-supervised solution on a randomly sampled 1% subset of WikiReading.", |
|
"cite_spans": [ |
|
{ |
|
"start": 119, |
|
"end": 137, |
|
"text": "Choi et al. (2017)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 288, |
|
"end": 307, |
|
"text": "Wang and Jin (2019)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 642, |
|
"end": 662, |
|
"text": "(Chung et al., 2014)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To the best of our knowledge, no authors validated Transformer-based models on WikiReading and pretrained encoders.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Let a property denote any query for which a system is expected to return an answer from given text. Examples include country of citizenship for a biography provided as an input text, or architect name for an article regarding the opening of a new building. Contrary to QA problems, a query is not formulated as a question in natural language but rather as a phrase or keyword. We use the term value when referring to a valid answer for the stated query. Some properties have multiple valid answers; thus, multiple values are expected. Examine the case of Johann Sebastian Bach's biography for which property sister has eight values. We will refer to any task consisting of a tuple (properties, text) for which values are to be provided as a property extraction task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Property Extraction", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The biggest publicly available dataset for property extraction is WikiReading (Hewlett et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 78, |
|
"end": 100, |
|
"text": "(Hewlett et al., 2016)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Property Extraction", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The dataset combines articles from Wikipedia with Wikidata information. The dataset is of great value; however, several flaws can be identified. First, more than 95% of articles in the test set appeared in the train set (Table 2) . Second, the unjustifiably large size of the test set is a substantial obstacle for running experiments. For instance, it takes 50 hours to process the test set using a Transformer model such as T5 SMALL on a single NVidia V100 GPU. Finally, WikiReading assumes that every value in the test set can be determined on the basis of a given article. As shown later, this is not the case for 28% of values.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 220, |
|
"end": 229, |
|
"text": "(Table 2)", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Property Extraction", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In the Multi-Property Extraction (MPE) scenario we propose, the system is expected to return values for multiple properties at once. Hence, can be considered a generalization of a single-property extraction task as it can be easily formulated as such. Thus, MPE is reverse-compatible with the single-property extraction, and it is still possible to evaluate models trained in the single-property setting. Many arguments can be considered in favor of framing the problem as MPE. In a typical business scenario, multiple properties are expected to be extracted from a given document. The bulk inference requires a lower computational budget by a factor proportional to the mean number of properties per article, which makes MPE preferable. Moreover, one can expect that systems trained in such a way will manifest emergent properties resulting from the interaction between properties themselves. Consider the set of property-value pairs: date of birth: 1915-01-12, date of death: 1979-05-02, place of birth: Saint Petersburg already predicted by an autoregressive model. It is in principle possible to answer: country of citizenship: Russian Empire, country of citizenship: Soviet Union using the earlier predicted pairs only. This phenomenon emerges if the model (or person) learned the relationships between years, administrative boundaries of the city, and the transformation of the Russian Empire into a communist state that occurred in the meantime. Although no such reasoning is required and the problem can be solved by memorizing related co-occurrence patterns, we intend to achieve the mentioned emergent properties. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Towards Multi-Property Extraction", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The comparison to existing datasets and shared tasks is briefly presented in Table 1 , whereas Table 3 focuses on selected differences between WikiReading Recycled and WikiReading.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 77, |
|
"end": 84, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "WikiReading Recycled: Novel Dataset for Multi-Property Extraction", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our set of desiderata is based on the following intentions. We wished to introduce the problem of Multi-Property Extraction to evaluate systems that extract any number of given properties at once from the same source text. Our second objective was to ensure that an article may appear in precisely one data split. The third core intention was to introduce an article-centered data objective instead of a property-centric one. Note that an instance of data should be an article with multiple properties. The fourth objective was to ensure that all properties in the test set can be extracted or inferred. The fifth was to keep the validation and test sets within a reasonable size. Moreover, we aim to provide a test set of the highest quality, lacking noise that could arise from automatic processing. Finally, we intended to benchmark the model generalization abilities -the test set contains properties not seen during training, posing a challenge for current state-of-the-art systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Desiderata", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The WikiReading Recycled and WikiReading are based on the same data, yet differ in how they are arranged. Instances from the original WikiReading dataset were merged to produce over 4M samples in the MPE paradigm. Instead of performing a random split, we carefully divide the data assuming that 20% of properties should appear solely in the test set (more precisely, not seen before in train and validation sets). Around one thousand articles containing properties not seen in the remaining subsets were drafted to achieve the mentioned objective. Similarly, properties unique for the validation set were introduced to enable approximation of the test set performance without disclosing particular labels. Additionally, test and validation sets share 10% of the properties that do not appear in the train set, increasing the size of these subsets by 2,000 articles each. Another 2,000 articles containing the same properties as the train set were added to each of the validation and test sets. All the remaining articles were used to produce the training set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Collection and Split", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "To sum up, we achieved a design where as much as 50% of the properties cannot be seen in the training split, while the remaining 50% of the properties can appear in any split. We chose these properties carefully so that the size of the test and validation sets does not exceed 5,000 articles.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Collection and Split", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The quality of test sets plays a pivotal role in reasoning about a system's performance. Therefore, a group of annotators went through the instances of the test set and assessed whether the value either appeared in the article or can be inferred from it. To make further analysis possible, we provide both datasets, before (test-A) and after (test-B) annotation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Annotation", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The annotation process was non-trivial due to vagueness of the inferability definition, and the scientific character of the considered text. It was required to understand advanced encyclopedic articles e.g., about chemistry, biology, or astronomy, to answer domain-specific properties (scientific classifications or biological taxonomy), which are only possible with deep knowledge about the world and with the ability to learn during the process. Moreover, linguistic skills were required to transliterate and transcribe first and last names. Note that we consider the value which appears in a different writing script as inferable. Due to the stated issues, we decided to rely on highly trained linguists as annotators.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Annotation", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The process was supported by several heuristics. In particular, the approximate string matching was used to highlight fragments of presumably high importance. Nevertheless, it took seven linguists more than 100 hours in total to complete. On average, two minutes and thirty second were required to verify data assigned to one Wikipedia article.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Annotation", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The relevance of annotation mentioned above can be demonstrated by the fact that 28% of the property-value pairs were marked as unanswerable and removed. As it will be shown later, the Mean-Multi-Property-F 1 on a pre-verified test-A was approximately 20 points lower, and 8% of articles were removed entirely from the test-B during the annotation process.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Annotation", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We determined auxiliary validation subsets with specific qualities, not only to help improve data analysis but also to provide additional information at different stages of development of a system. The qualities we measure and the definition is provided below.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Diagnostic Subsets", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Rare, unseen. Rare and unseen properties were distinguished depending on their frequency. The number of occurrences in the train set was below a threshold of 4000 for each in rare and was precisely 0 for the unseen category.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Diagnostic Subsets", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Categorical, relational. We denote a property as categorical if its value set contains a limited number of values; otherwise, it is relational. We apply normalized entropy with a threshold of 0.7 to obtain properties that belong to the categorical subset. For instance, the continent property occurs 20060 times, but with 13 possible values, its normalized entropy equals 0.43; hence it is marked as categorical. This splitting method is not ideal, but we wanted to use the same method as in (Hewlett et al., 2016) . For example, if the distribution of continents was uniform, the property would have been classified as relational. However, in practice, it almost never happens.", |
|
"cite_spans": [ |
|
{ |
|
"start": 492, |
|
"end": 514, |
|
"text": "(Hewlett et al., 2016)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Diagnostic Subsets", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Exact match. The exact match category applies to cases where expected value is mentioned directly in the source text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Diagnostic Subsets", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Long articles. Instances with articles longer than 695 words (threshold qualifying to the top 15% longest articles in the train set) constitute the long articles diagnostic set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Diagnostic Subsets", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Characteristics of different systems can be compared qualitatively by evaluating on these subsets. For instance, the long articles subset is challenging for systems that consume truncated inputs. Unseen is precisely constructed to assess systems' ability to extract previously not seen properties. On the other hand, rare can be viewed as an approximation of the system's performance on a lower-resource downstream extraction task. The categorical subset is useful in assessing approaches featuring a classifier, whereas it is suboptimal to use such systems for relational due to richer output space. Similarly, the exact match can be approached with sequence tagging solutions. The share of each diagnostic subset is presented in Table 4 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 731, |
|
"end": 738, |
|
"text": "Table 4", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Diagnostic Subsets", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "We evaluate different model architectures on the WikiReading Recycled dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architectures", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We reimplemented the previously best performing WikiReading model, finetuned pretrained Transformer models, and applied a dual-source model. Their competitiveness can be demonstrated by the fact that we were able to outperform the previous state-of-the-art on the WikiReading by a far margin.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architectures", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Basic seq2seq. A straightforward approach to single-property extraction is to use an LSTM sequence-to-sequence model where the input consists of a property name concatenated with the considered input text. To compare with the previous results, we reproduced the basic sequence-tosequence model proposed by Hewlett et al. (2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 306, |
|
"end": 327, |
|
"text": "Hewlett et al. (2016)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architectures", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Vanilla Transformer. A more up-to-date solution is to use the Transformer architecture (Vaswani et al., 2017) instead of an RNN, and a subword tokenization method, such as unigram LM tokenization (Kudo, 2018) . We use the term vanilla to denote a model trained from scratch.", |
|
"cite_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 109, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 196, |
|
"end": 208, |
|
"text": "(Kudo, 2018)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architectures", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Vanilla Dual-Source Transformer. The Transformer architecture was extended to support two inputs and successfully applied in Automatic Post-Editing . We propose to reuse this Dual-Source Transformer architecture in the property extraction tasks. The architecture consists of two encoders that share parameters and a single decoder. Moreover, the encoders and decoder share embeddings and vocabulary. In our approach, the first encoder is fed with the text of an article, and the second one takes the names of properties (Figure 1) . The model is trained to generate a sequence of pairs: (property, value) separated with a special symbol.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 520, |
|
"end": 530, |
|
"text": "(Figure 1)", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model Architectures", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Dual-Source RoBERTa. Recent research shows that pretrained language models can improve performance on downstream tasks (Radford et al., 2018) . Therefore, we experimented with the pretrained RoBERTa language model as an encoder. RoBERTa models were developed as a hyperoptimized version of BERT with a byte-level BPE and a considerably larger dictionary (Liu et al., 2019; Devlin et al., 2019 ). All the model parameters, including the RoBERTa weights, were further optimized on the WikiReading Recycled task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 119, |
|
"end": 141, |
|
"text": "(Radford et al., 2018)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 354, |
|
"end": 372, |
|
"text": "(Liu et al., 2019;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 373, |
|
"end": 392, |
|
"text": "Devlin et al., 2019", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architectures", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": ". Recently proposed T5 model (Raffel et al., 2020 ) is a Transformer model pretrained on a cleaned version of CommonCrawl. T5 is famous for achieving excellent performance on the Super-GLUE benchmark . To create a model input, we concatenate a property name and an article. In the case of MPE, we reduce the dataset to the single property setting, as used by the T5 model's authors.", |
|
"cite_spans": [ |
|
{ |
|
"start": 29, |
|
"end": 49, |
|
"text": "(Raffel et al., 2020", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "T5", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this section, we describe the evaluation of previously proposed architectures on both WikiReading and WikiReading Recycled datasets. We would like to highlight that the results are not comparable between the two datasets, as they are based on different train/validation/test splits.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The performance of systems is evaluated using the F1 metric, adapted for the WikiReading Recycled format. For WikiReading, Mean-F 1 follows the originally proposed micro-averaged metric and assesses F1 scores for each property instance, averaged over the whole test set. Let E denote a set of expected property-value pairs and O model-generated property-value pairs. Assuming |\u2022| stands for set cardinality, precision and recall can be formulated as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Metrics", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "P (E, O) = |E \u2229 O| |O| , R(E, O) = |E \u2229 O| |E|", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Metrics", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Then F 1 is computed as a harmonic mean:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Metrics", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "F 1 (E, O) = 2 \u2022 P (E, O) \u2022 R(E, O) P (E, O) + R(E, O)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Metrics", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Given a sequence E = {E 1 , E 2 , .., E n } of expected answers for n test instances, and associated sequence of predictions O = {O 1 , O 2 , .., O n }, we calculate Mean-F 1 as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Metrics", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Mean-F 1 (E, O) = 1 n \u2022 i\u2208[1,n] F 1 (E i , O i )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Metrics", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "In WikiReading Recycled, we adjust the metric to handle many properties in a single test instance. To do that, the E i and O i sets contain values from many properties at once and n is equal to the number of articles. Note that in the case of the M-F 1 properties are considered as instances. We call our article-centric metric Mean-Multi-Property-F 1 or in short MMP-F 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Metrics", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Since the basic seq2seq model description missed some essential details, they had to be assumed before model training. For example, we supposed that the model consisted of unidirectional LSTMs and truecasing was applied to the output. The rest of the parameters followed the description provided by the authors. An extensive hyperparameter search was conducted for both Dual-Source Transformers on the WikiReading Recycled task. In the case of the Dual-Source Transformer evaluated on WikiReading we restricted ourselves to hyperparameters following the default values specified in the Marian NMT Toolkit ). The only difference was the reduction of encoder and decoder depths to 4.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Details", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "For the Vanilla Dual-Source Transformer evaluation, both WikiReading and WikiReading Recycled datasets were processed with a SentencePiece model (Kudo, 2018) trained on a concatenated corpus of inputs and outputs with a vocabulary size of 32,000. Dual-Source RoBERTa model is initialized with RoBERTa BASE (consisting of 12 encoder layers and a dictionary of 50,000 subword units).", |
|
"cite_spans": [ |
|
{ |
|
"start": 145, |
|
"end": 157, |
|
"text": "(Kudo, 2018)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Details", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "In the case of the T5 model, we keep hyperparameters as close as possible to those used during pretraining. The training continues with restored AdaFactor parameters. We finetuned the small version of the model in a supervised-only manner.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Details", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "We truncate the input to the first 512 tokens for all our models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Details", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Hyperparameter Optimization. Hyperparameters for WikiReading Recycled were optimized", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Details", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Mean-F 1 Basic s2s (Hewlett et al., 2016) 70.8 Placeholder s2s (Choi et al., 2017) 75.6 SWEAR 76 using the Tree-structured Parzen Estimator algorithm (Bergstra et al., 2011) with additional heuristics and Gaussian priors resulting from the default settings proposed for this sampler in the Optuna framework (Akiba et al., 2019) . An evaluation was performed every 8,000 steps, and the validationbased early stopping was applied when no progress was achieved in 3 consecutive validations. The total number of 250 trials was performed for each architecture. Intermediate results of each trial were monitored and used to ensure only the top 10% trials were allowed to continue. Details of the hyperparameter optimization are presented in Appendix A.", |
|
"cite_spans": [ |
|
{ |
|
"start": 19, |
|
"end": 41, |
|
"text": "(Hewlett et al., 2016)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 63, |
|
"end": 82, |
|
"text": "(Choi et al., 2017)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 150, |
|
"end": 173, |
|
"text": "(Bergstra et al., 2011)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 307, |
|
"end": 327, |
|
"text": "(Akiba et al., 2019)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Although the main focus of our evaluation is the WikiReading Recycled dataset; we additionally evaluate whether the Vanilla Dual-Source Transformer can improve the state-of-the-art on WikiReading.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results on WikiReading", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "We reproduced the Basic seq2seq model. It achieved a Mean-F 1 score of 74.8, which is 4 points higher than reported by Hewlett et al. (2016) . The difference may be caused by poor optimization in the original work. Our dual-source solution achieves 82.4 and outperforms the previous stateof-the-art model by 5.6 Mean-F 1 points. To measure the impact of using two encoders instead of one, we evaluated the Vanilla Single-source Transformer, which takes a concatenated pair of article and property as its input. Our dual-source model outperformed its single-source counterpart by 3.1 points. Table 6 presents the final results.", |
|
"cite_spans": [ |
|
{ |
|
"start": 119, |
|
"end": 140, |
|
"text": "Hewlett et al. (2016)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 591, |
|
"end": 598, |
|
"text": "Table 6", |
|
"ref_id": "TABREF9" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results on WikiReading", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "The results on WikiReading show that the Dual-Source Transformer is beneficial to the Property Extraction task. On WikiReading Recycled, we supplement the evaluation with pretrained models:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results on WikiReading Recycled", |
|
"sec_num": "6.4" |
|
}, |
|
{ |
|
"text": "Dual-Source RoBERTa and T5. Table 7 presents Mean-Multi-Property-F 1 scores on the annotated test set (test-B). All the transformer-based models outperform the Basic seq2seq. The Dual-Source Transformer achieved 77.5 Mean-Multi-Property-F 1 . Its pretrained version, Dual-Source RoBERTa, improves the result by 1.4 points. As the T5 model beats the Vanilla Dual-Source Transformer, we may conclude that even though the WikiReading Recycled dataset is very large, the pretraining is crucial for this MPE task. It is worth remembering that the results on WikiReading and WikiReading Recycled are not comparable due to the dissimilarities in metrics and datasets. We will elaborate on that in section 7.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 28, |
|
"end": 35, |
|
"text": "Table 7", |
|
"ref_id": "TABREF11" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results on WikiReading Recycled", |
|
"sec_num": "6.4" |
|
}, |
|
{ |
|
"text": "The final scores of transformer-based models differ slightly on WikiReading Recycled. In order to get more insight, we analyze the models on diagnostic sets described in Section 4.4.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Analysis", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Impact of Property Frequency. We provide two diagnostic sets related to property frequency: unseen and rare. Both dual-source models failed on the unseen subset. These models ignored the unseen properties from the input and did not generate any answer. The best result was achieved by the T5 model (10.9 points), albeit it still does not meet expectations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Analysis", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The results on the rare subset show that the pretraining makes a difference if properties are infrequent in the train set ( Figure 2 ). Impact of Property Type. The extraction of some properties may be treated as a classification task since the set of their valid values is limited. In this case, all models perform similarly and achieve approximately 85 Mean-Multi-Property-F 1 . The difficulty of the task increases proportionally to the normalized entropy value, which may lead to the divergence of model performances. This phenomenon is visible in the case of our Basic seq2seq, where the weakness is evident above the 0. Exact Match and Long Articles. The results from the exact match and long articles subsets are correlated with the scores attained on the test-B set; however, the absolute values achieved differ substantially. This is because the long article subset is more challenging, as the chance of an answer appearing in the constant-length prefix decreases with the length of the article. The use of recently introduced models like LongFormer (Beltagy et al., 2020) and BigBird (Zaheer et al., 2020) might decrease the gap in scores between long and average-length articles. On the other hand, system performance should increase when the answer is provided directly in the text, as can be found in the exact match subset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1094, |
|
"end": 1115, |
|
"text": "(Zaheer et al., 2020)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 124, |
|
"end": 132, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion and Analysis", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Difficulty of Test Sets. To compare the difficulty of the WikiReading and WikiReading Recycled test sets, we converted the outputs from the nonannotated WikiReading Recycled test set (test-A) to WikiReading format, and calculated the Mean-F 1 . With the Vanilla Dual-Source Transformer, we obtained 54.0 Mean-F 1 , 28.4 points less than on WikiReading. This considerable decrease in score shows that the WikiReading Recycled test-A set is more difficult than WikiReading. The reason behind this is that we removed leakage of articles between splits, and we also added more infrequent properties that are harder to answer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Analysis", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Impact of Human Annotation. The Vanilla Dual-Source Transformer was evaluated on both WikiReading Recycled test sets. It obtained Mean-Multi-Property-F 1 of 62.6 on the non annotated test-A set, while achieving 77.5 on the annotated test-B. This discrepancy suggests that the linguists indeed succeeded to remove non-inferable properties. We anticipate that cleaning the train set in a similar fashion could improve the stability of the training and the overall results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Analysis", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We introduced WikiReading Recycled-the first Multi-Property Extraction dataset with a humanannotated test set. We provided strong baselines that improved the current state-of-the-art on WikiReading by a large margin. The bestperforming architecture was successfully adapted from Automatic Post-Editing systems. We show that using pretrained language models increases the performance on the WikiReading Recycled dataset significantly, despite its large size. Additionally, we created diagnostic subsets to qualitatively assess model performance. The results on a challenging subset of unseen properties reveal that despite high overall scores, the evaluated systems fail to provide satisfactory performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Summary", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Low scores indicate an opportunity to improve, as these properties were verified by annotators and are expected to be answerable. We look forward to seeing models closing this gap and leading to remarkable progress in Machine Reading Comprehension.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Summary", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "The dataset and models, as well as their detailed configurations required for reproducibility, are publicly available.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Summary", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "https://github.com/applicaai/multi-p roperty-extraction", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/fnl/syntok", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "The Smart Growth Operational Programme supported this research under project no. POIR.01.01.01-00-0877/19 (A universal platform for robotic automation of processes requiring text comprehension, with a unique level of implementation and service automation).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Search space Vanilla Dual-source RoBERTa batch size 2 {6,7,8,9} 2 9 2 9 learning rate 1e-5, 5e-5,.., 1e-2 5e-4 5e-5 lr scheduler inverse sqrt, linear decay linear linearReLU, GELU ReLU GELU learned positional emb * true, false false share all emb true, false false - Table 8 : Search space considered and hyperparameters determined as optimal when the validation set of WRR is considered. The * symbol denotes tied hyperparameters set to the same values for both encoder and decoder where applicable. The use of pretrained RoBERTa model resulted in the necessity to stick with several architectural choices signalized by -character.A Hyperparameter Search Table 8 summarizes search space considered and hyperparameters determined as optimal when the validation set of WRR is considered. Hyperparameters for WRR were optimized using the Tree-structured Parzen Estimator with additional heuristics and Gaussian priors resulting from the default settings proposed for this sampler in the Optuna framework. An evaluation was performed every 8,000 steps, and the validation-based early stopping was applied when no progress was achieved in three consecutive validations. Intermediate results of each trial (results from every validation) were monitored and used to stop unpromising training earlier.The trial was pruned in the case its best intermediate value was in the bottom 90 percentiles among trials at the same step (only the top 10% of trials were allowed to continue the training). This process was disabled until five trials finished.The total number of 250 trials was performed for each architecture.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 267, |
|
"end": 274, |
|
"text": "Table 8", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 656, |
|
"end": 663, |
|
"text": "Table 8", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Parameter", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Since the basic seq2seq model description missed some essential details, they had to be assumed before model training. For example, we supposed that the model consisted of unidirectional LSTMs. It was trained with mean (per word) cross-entropy loss until no progress was observed for 10 consecutive validations occurring every 10,000 updates. Input and output sequences were tokenized and lowercased. Besides, and truecasing was applied to the output. We use syntok 2 tokenizer and a simple RNN-based truecaser proposed by Susanto et al. (2016) . During inference, we used a beam size of 8. The rest of the parameters followed the description provided by the authors.", |
|
"cite_spans": [ |
|
{ |
|
"start": 523, |
|
"end": 544, |
|
"text": "Susanto et al. (2016)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Basic seq2seq Replication Details", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Optuna: A next-generation hyperparameter optimization framework", |
|
"authors": [ |
|
{ |
|
"first": "Takuya", |
|
"middle": [], |
|
"last": "Akiba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shotaro", |
|
"middle": [], |
|
"last": "Sano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Toshihiko", |
|
"middle": [], |
|
"last": "Yanase", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takeru", |
|
"middle": [], |
|
"last": "Ohta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Masanori", |
|
"middle": [], |
|
"last": "Koyama", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, KDD '19", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2623--2631", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3292500.3330701" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Takuya Akiba, Shotaro Sano, Toshihiko Yanase, Takeru Ohta, and Masanori Koyama. 2019. Op- tuna: A next-generation hyperparameter optimiza- tion framework. In Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, KDD '19, pages 2623- 2631, New York, NY, USA. Association for Com- puting Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Longformer: The long-document transformer", |
|
"authors": [ |
|
{ |
|
"first": "Iz", |
|
"middle": [], |
|
"last": "Beltagy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arman", |
|
"middle": [], |
|
"last": "Cohan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Computing Research Repository", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2004.05150" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Iz Beltagy, Matthew E. Peters, and Arman Cohan. 2020. Longformer: The long-document transformer. Com- puting Research Repository, arXiv:2004.05150.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Algorithms for hyper-parameter optimization", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Bergstra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R\u00e9mi", |
|
"middle": [], |
|
"last": "Bardenet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bal\u00e1zs", |
|
"middle": [], |
|
"last": "K\u00e9gl", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "24", |
|
"issue": "", |
|
"pages": "2546--2554", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James S. Bergstra, R\u00e9mi Bardenet, Yoshua Bengio, and Bal\u00e1zs K\u00e9gl. 2011. Algorithms for hyper-parameter optimization. In J. Shawe-Taylor, R. S. Zemel, P. L. Bartlett, F. Pereira, and K. Q. Weinberger, editors, Advances in Neural Information Processing Systems 24, pages 2546-2554. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "A large annotated corpus for learning natural language inference", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Samuel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabor", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Angeli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Potts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "632--642", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D15-1075" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Samuel R. Bowman, Gabor Angeli, Christopher Potts, and Christopher D. Manning. 2015. A large anno- tated corpus for learning natural language inference. In Proceedings of the 2015 Conference on Empiri- cal Methods in Natural Language Processing, pages 632-642, Lisbon, Portugal. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Coarse-to-Fine Question Answering for Long Documents", |
|
"authors": [ |
|
{ |
|
"first": "Eunsol", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Hewlett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "209--220", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P17-1020" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eunsol Choi, Daniel Hewlett, Jakob Uszkoreit, Illia Polosukhin, Alexandre Lacoste, and Jonathan Be- rant. 2017. Coarse-to-Fine Question Answering for Long Documents. In Proceedings of the 55th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 209- 220, Vancouver, Canada. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Empirical evaluation of gated recurrent neural networks on sequence modeling", |
|
"authors": [ |
|
{ |
|
"first": "Junyoung", |
|
"middle": [], |
|
"last": "Chung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caglar", |
|
"middle": [], |
|
"last": "Gulcehre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "NIPS 2014 Workshop on Deep Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Junyoung Chung, Caglar Gulcehre, Kyunghyun Cho, and Yoshua Bengio. 2014. Empirical evaluation of gated recurrent neural networks on sequence model- ing. In NIPS 2014 Workshop on Deep Learning.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Constructing biological knowledge bases by extracting information from text sources", |
|
"authors": [ |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Craven", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johan", |
|
"middle": [], |
|
"last": "Kumlien", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Proceedings of the Seventh International Conference on Intelligent Systems for Molecular Biology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "77--86", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark Craven and Johan Kumlien. 1999. Constructing biological knowledge bases by extracting informa- tion from text sources. In Proceedings of the Sev- enth International Conference on Intelligent Systems for Molecular Biology, pages 77-86. AAAI Press.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "TREC CAsT 2019: The conversational assistance track overview. Computing Research Repository", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dalton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chenyan", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamie", |
|
"middle": [], |
|
"last": "Callan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Dalton, Chenyan Xiong, and Jamie Callan. 2020. TREC CAsT 2019: The conversational assis- tance track overview. Computing Research Reposi- tory, Arxiv:2003.13624.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Overview of the TREC 2007 question answering track", |
|
"authors": [ |
|
{ |
|
"first": "Hoa", |
|
"middle": [ |
|
"Trang" |
|
], |
|
"last": "Dang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diane", |
|
"middle": [], |
|
"last": "Kelly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Trec", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hoa Trang Dang, Diane Kelly, and Jimmy J Lin. 2007. Overview of the TREC 2007 question answering track. In Trec, volume 7.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Recent named entity recognition and classification techniques: A systematic review", |
|
"authors": [ |
|
{ |
|
"first": "Archana", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vishal", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manish", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Computer Science Review", |
|
"volume": "29", |
|
"issue": "", |
|
"pages": "21--43", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.cosrev.2018.06.001" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Archana Goyal, Vishal Gupta, and Manish Kumar. 2018. Recent named entity recognition and classi- fication techniques: A systematic review. Computer Science Review, 29:21-43.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Accurate supervised and semisupervised machine reading for long documents", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Hewlett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Lacoste", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Izzeddin", |
|
"middle": [], |
|
"last": "Gur", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2011--2020", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D17-1214" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Hewlett, Llion Jones, Alexandre Lacoste, and Izzeddin Gur. 2017. Accurate supervised and semi- supervised machine reading for long documents. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 2011-2020, Copenhagen, Denmark. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "WikiReading: A novel large-scale language understanding task over Wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Hewlett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Lacoste", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Fandrianto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jay", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Kelcey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Berthelot", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1535--1545", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P16-1145" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Hewlett, Alexandre Lacoste, Llion Jones, Illia Polosukhin, Andrew Fandrianto, Jay Han, Matthew Kelcey, and David Berthelot. 2016. WikiReading: A novel large-scale language understanding task over Wikipedia. In Proceedings of the 54th Annual Meet- ing of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1535-1545, Berlin, Germany. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "MS-UEdin submission to the WMT2018 APE shared task: Dual-source transformer for automatic post-editing", |
|
"authors": [ |
|
{ |
|
"first": "Marcin", |
|
"middle": [], |
|
"last": "Junczys", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "-", |
|
"middle": [], |
|
"last": "Dowmunt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Grundkiewicz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Third Conference on Machine Translation: Shared Task Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "822--826", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-6467" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcin Junczys-Dowmunt and Roman Grundkiewicz. 2018. MS-UEdin submission to the WMT2018 APE shared task: Dual-source transformer for automatic post-editing. In Proceedings of the Third Confer- ence on Machine Translation: Shared Task Papers, pages 822-826, Belgium, Brussels. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Marian: Fast neural machine translation in C++", |
|
"authors": [ |
|
{ |
|
"first": "Marcin", |
|
"middle": [], |
|
"last": "Junczys-Dowmunt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Grundkiewicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomasz", |
|
"middle": [], |
|
"last": "Dwojak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenneth", |
|
"middle": [], |
|
"last": "Heafield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Neckermann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Seide", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ulrich", |
|
"middle": [], |
|
"last": "Germann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alham", |
|
"middle": [], |
|
"last": "Fikri Aji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikolay", |
|
"middle": [], |
|
"last": "Bogoychev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Andr\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Martins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of ACL 2018, System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "116--121", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcin Junczys-Dowmunt, Roman Grundkiewicz, Tomasz Dwojak, Hieu Hoang, Kenneth Heafield, Tom Neckermann, Frank Seide, Ulrich Germann, Alham Fikri Aji, Nikolay Bogoychev, Andr\u00e9 F. T. Martins, and Alexandra Birch. 2018. Marian: Fast neural machine translation in C++. In Proceedings of ACL 2018, System Demonstrations, pages 116- 121, Melbourne, Australia. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Subword regularization: Improving neural network translation models with multiple subword candidates", |
|
"authors": [ |
|
{ |
|
"first": "Taku", |
|
"middle": [], |
|
"last": "Kudo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "66--75", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-1007" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Taku Kudo. 2018. Subword regularization: Improving neural network translation models with multiple sub- word candidates. In Proceedings of the 56th Annual Meeting of the Association for Computational Lin- guistics (Volume 1: Long Papers), pages 66-75, Mel- bourne, Australia. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "A survey on deep learning for named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "IEEE Transactions on Knowledge and Data Engineering", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--1", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/TKDE.2020.2981314" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Li, A. Sun, J. Han, and C. Li. 2020. A survey on deep learning for named entity recognition. IEEE Transactions on Knowledge and Data Engineering, pages 1-1.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "RoBERTa: A robustly optimized BERT pretraining approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Computing Research Repository", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.11692" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. RoBERTa: A robustly optimized BERT pre- training approach. Computing Research Repository, arXiv:1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Improving language understanding by generative pre-training. Technical report", |
|
"authors": [ |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karthik", |
|
"middle": [], |
|
"last": "Narasimhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Salimans", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. 2018. Improving language under- standing by generative pre-training. Technical re- port, OpenAI.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Exploring the limits of transfer learning with a unified text-totext transformer", |
|
"authors": [ |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Raffel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Roberts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katherine", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sharan", |
|
"middle": [], |
|
"last": "Narang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Matena", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanqi", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "21", |
|
"issue": "140", |
|
"pages": "1--67", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Colin Raffel, Noam Shazeer, Adam Roberts, Kather- ine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2020. Exploring the limits of transfer learning with a unified text-to- text transformer. Journal of Machine Learning Re- search, 21(140):1-67.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Know what you don't know: Unanswerable questions for SQuAD", |
|
"authors": [ |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Rajpurkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "784--789", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-2124" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pranav Rajpurkar, Robin Jia, and Percy Liang. 2018. Know what you don't know: Unanswerable ques- tions for SQuAD. In Proceedings of the 56th An- nual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 784- 789, Melbourne, Australia. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "SQuAD: 100,000+ questions for machine comprehension of text", |
|
"authors": [ |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Rajpurkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Konstantin", |
|
"middle": [], |
|
"last": "Lopyrev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2383--2392", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D16-1264" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. SQuAD: 100,000+ questions for machine comprehension of text. In Proceedings of the 2016 Conference on Empirical Methods in Natu- ral Language Processing, pages 2383-2392, Austin, Texas. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Named entity recognition -is there a glass ceiling", |
|
"authors": [ |
|
{ |
|
"first": "Tomasz", |
|
"middle": [], |
|
"last": "Stanislawek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Wr\u00f3blewska", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alicja", |
|
"middle": [], |
|
"last": "W\u00f3jcicka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Ziembicki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Przemyslaw", |
|
"middle": [], |
|
"last": "Biecek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "624--633", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/K19-1058" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomasz Stanislawek, Anna Wr\u00f3blewska, Alicja W\u00f3j- cicka, Daniel Ziembicki, and Przemyslaw Biecek. 2019. Named entity recognition -is there a glass ceiling? In Proceedings of the 23rd Confer- ence on Computational Natural Language Learning (CoNLL), pages 624-633, Hong Kong, China. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Learning to capitalize with characterlevel recurrent neural networks: An empirical study", |
|
"authors": [ |
|
{ |
|
"first": "Raymond Hendy", |
|
"middle": [], |
|
"last": "Susanto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hai", |
|
"middle": [ |
|
"Leong" |
|
], |
|
"last": "Chieu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2090--2095", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D16-1225" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Raymond Hendy Susanto, Hai Leong Chieu, and Wei Lu. 2016. Learning to capitalize with character- level recurrent neural networks: An empirical study. In Proceedings of the 2016 Conference on Empiri- cal Methods in Natural Language Processing, pages 2090-2095, Austin, Texas. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems 30, pages 5998-6008. Curran Asso- ciates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "SuperGLUE: A stickier benchmark for general-purpose language understanding systems", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yada", |
|
"middle": [], |
|
"last": "Pruksachatkun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Nangia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "3266--3280", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Wang, Yada Pruksachatkun, Nikita Nangia, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel Bowman. 2019. SuperGLUE: A stickier benchmark for general-purpose language understanding systems. In Advances in Neural In- formation Processing Systems 32, pages 3266-3280. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "GLUE: A multi-task benchmark and analysis platform for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 EMNLP Workshop Black-boxNLP: Analyzing and Interpreting Neural Networks for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "353--355", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-5446" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Fe- lix Hill, Omer Levy, and Samuel Bowman. 2018. GLUE: A multi-task benchmark and analysis plat- form for natural language understanding. In Pro- ceedings of the 2018 EMNLP Workshop Black- boxNLP: Analyzing and Interpreting Neural Net- works for NLP, pages 353-355, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Struct-BERT: Incorporating language structures into pretraining for deep language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bin", |
|
"middle": [], |
|
"last": "Bi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiangnan", |
|
"middle": [], |
|
"last": "Xia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zuyi", |
|
"middle": [], |
|
"last": "Bao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liwei", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luo", |
|
"middle": [], |
|
"last": "Si", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "8th International Conference on Learning Representations", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Wang, Bin Bi, Ming Yan, Chen Wu, Jiangnan Xia, Zuyi Bao, Liwei Peng, and Luo Si. 2020. Struct- BERT: Incorporating language structures into pre- training for deep language understanding. In 8th International Conference on Learning Representa- tions, ICLR 2020, Addis Ababa, Ethiopia, April 26- 30, 2020.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "A deep reinforcement learning based multi-step coarse to fine question answering (MSCQA) system", |
|
"authors": [ |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongxia", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "33", |
|
"issue": "", |
|
"pages": "7224--7232", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1609/aaai.v33i01.33017224" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yu Wang and Hongxia Jin. 2019. A deep reinforce- ment learning based multi-step coarse to fine ques- tion answering (MSCQA) system. In Proceedings of the AAAI Conference on Artificial Intelligence, vol- ume 33, pages 7224-7232.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "A survey on recent advances in named entity recognition from deep learning models", |
|
"authors": [ |
|
{ |
|
"first": "Vikas", |
|
"middle": [], |
|
"last": "Yadav", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Bethard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2145--2158", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vikas Yadav and Steven Bethard. 2018. A survey on re- cent advances in named entity recognition from deep learning models. In Proceedings of the 27th Inter- national Conference on Computational Linguistics, pages 2145-2158, Santa Fe, New Mexico, USA. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Recent trends in deep learning based natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Hazarika", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Poria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Cambria", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/MCI.2018.2840738" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T. Young, D. Hazarika, S. Poria, and E. Cambria. 2018. Recent trends in deep learning based natural lan- guage processing [review article].", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Big Bird: Transformers for longer sequences", |
|
"authors": [ |
|
{ |
|
"first": "Manzil", |
|
"middle": [], |
|
"last": "Zaheer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guru", |
|
"middle": [], |
|
"last": "Guruganesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Avinava", |
|
"middle": [], |
|
"last": "Dubey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joshua", |
|
"middle": [], |
|
"last": "Ainslie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Alberti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Santiago", |
|
"middle": [], |
|
"last": "Ontanon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anirudh", |
|
"middle": [], |
|
"last": "Ravula", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qifan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amr", |
|
"middle": [], |
|
"last": "Ahmed", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Computing Research Repository", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2007.14062" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, and Amr Ahmed. 2020. Big Bird: Transformers for longer sequences. Computing Research Repository, arXiv:2007.14062.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Aligning books and movies: Towards story-like visual explanations by watching movies and reading books", |
|
"authors": [ |
|
{ |
|
"first": "Yukun", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Kiros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rich", |
|
"middle": [], |
|
"last": "Zemel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raquel", |
|
"middle": [], |
|
"last": "Urtasun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Torralba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanja", |
|
"middle": [], |
|
"last": "Fidler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 IEEE International Conference on Computer Vision (ICCV), ICCV '15", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "19--27", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ICCV.2015.11" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yukun Zhu, Ryan Kiros, Rich Zemel, Ruslan Salakhut- dinov, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. 2015. Aligning books and movies: Towards story-like visual explanations by watching movies and reading books. In Proceedings of the 2015 IEEE International Conference on Computer Vision (ICCV), ICCV '15, pages 19-27, USA. IEEE Com- puter Society.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "The architecture of Dual-Source Transformer as proposed by for Automatic Post-Editing. In the case of WikiReading Recycled and WikiReading, the encoder transforms an article and the corresponding properties separately." |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "The relation of property frequency and Mean-Multi-Property-F 1 . Both RoBERTa and Vanilla refer to Dual-Source Transformers." |
|
}, |
|
"FIGREF3": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "5 threshold. The details are presented in Figure 3. The relation of property normalized entropy and Mean-Multi-Property-F 1 . Both RoBERTa and Vanilla refer to Dual-Source Transformers." |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"text": "", |
|
"type_str": "table", |
|
"content": "<table><tr><td>: The size of WikiReading splits (Size) and num-</td></tr><tr><td>ber of articles leaked from the train set as an absolute</td></tr><tr><td>value or percentage.</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"text": "Selected differences between WR and WRR. Both metrics are described in Section 6.", |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"num": null |
|
}, |
|
"TABREF5": { |
|
"html": null, |
|
"text": "An average per-article size of the corresponding subsets as a percent of a total number of properties.", |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"num": null |
|
}, |
|
"TABREF7": { |
|
"html": null, |
|
"text": "Comparison of evaluated models. The T5 model can be considered as a pretrained equivalent of Vanilla Transformer, and our RoBERTa-based model can be viewed as a partially-pretrained Vanilla Dual-Source Transformer. Basic seq2seq is an RNN counterpart of both T5 and Vanilla Transformer.", |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"num": null |
|
}, |
|
"TABREF9": { |
|
"html": null, |
|
"text": "Results on WikiReading (test set).", |
|
"type_str": "table", |
|
"content": "<table><tr><td>Basic s2s</td></tr><tr><td>denotes the re-implemented model described in Sec-</td></tr><tr><td>tion 6.2.</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF11": { |
|
"html": null, |
|
"text": "Results on WikiReading Recycled human-annotated test set supplemented with scores on diagnostics subsets. All scores are Mean-Multi-Property-F 1 .", |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |