|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:56:51.230537Z" |
|
}, |
|
"title": "BREAK It Down: A Question Understanding Benchmark", |
|
"authors": [ |
|
{ |
|
"first": "Tomer", |
|
"middle": [], |
|
"last": "Wolfson", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Tel Aviv University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Mor", |
|
"middle": [], |
|
"last": "Geva", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Tel Aviv University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Ankit", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Tel Aviv University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Allen Institute for AI", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Ilan University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Deutch", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Tel Aviv University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Berant", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Tel Aviv University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Understanding natural language questions entails the ability to break down a question into the requisite steps for computing its answer. In this work, we introduce a Question Decomposition Meaning Representation (QDMR) for questions. QDMR constitutes the ordered list of steps, expressed through natural language, that are necessary for answering a question. We develop a crowdsourcing pipeline, showing that quality QDMRs can be annotated at scale, and release the BREAK dataset, containing over 83K pairs of questions and their QDMRs. We demonstrate the utility of QDMR by showing that (a) it can be used to improve open-domain question answering on the HOTPOTQA dataset, (b) it can be deterministically converted to a pseudo-SQL formal language, which can alleviate annotation in semantic parsing applications. Last, we use BREAK to train a sequence-to-sequence model with copying that parses questions into QDMR structures, and show that it substantially outperforms several natural baselines.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Understanding natural language questions entails the ability to break down a question into the requisite steps for computing its answer. In this work, we introduce a Question Decomposition Meaning Representation (QDMR) for questions. QDMR constitutes the ordered list of steps, expressed through natural language, that are necessary for answering a question. We develop a crowdsourcing pipeline, showing that quality QDMRs can be annotated at scale, and release the BREAK dataset, containing over 83K pairs of questions and their QDMRs. We demonstrate the utility of QDMR by showing that (a) it can be used to improve open-domain question answering on the HOTPOTQA dataset, (b) it can be deterministically converted to a pseudo-SQL formal language, which can alleviate annotation in semantic parsing applications. Last, we use BREAK to train a sequence-to-sequence model with copying that parses questions into QDMR structures, and show that it substantially outperforms several natural baselines.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Recently, increasing work has been devoted to models that can reason and integrate information from multiple parts of an input. This includes reasoning over images (Antol et al., 2015; Johnson et al., 2017; Suhr et al., 2019; Hudson and Manning, 2019) , paragraphs (Dua et al., 2019) , documents (Welbl et al., 2018; Talmor and Berant, 2018; , tables (Pasupat and Liang, 2015) , and more. Question answering (QA) is commonly used to test the ability to reason, where a complex natural language question is posed, and is to be answered given a particular context (text, image, etc.) . Although questions often share structure across tasks and modalities, understanding the language of complex questions has thus far been addressed within each task in isolation. Consider the questions in Figure 1 , all of which express operations such as fact chaining and counting. Additionally, humans can take a complex question and break it down into a sequence of simpler questions even when they are unaware of what or where the answer is. This ability, to compose and decompose questions, lies at the heart of human language (Pelletier, 1994) and allows us to tackle previously unseen problems. Thus, better question understanding models should improve performance and generalization in tasks that require multi-step reasoning or that do not have access to substantial amounts of data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 164, |
|
"end": 184, |
|
"text": "(Antol et al., 2015;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 185, |
|
"end": 206, |
|
"text": "Johnson et al., 2017;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 207, |
|
"end": 225, |
|
"text": "Suhr et al., 2019;", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 226, |
|
"end": 251, |
|
"text": "Hudson and Manning, 2019)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 265, |
|
"end": 283, |
|
"text": "(Dua et al., 2019)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 296, |
|
"end": 316, |
|
"text": "(Welbl et al., 2018;", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 317, |
|
"end": 341, |
|
"text": "Talmor and Berant, 2018;", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 351, |
|
"end": 376, |
|
"text": "(Pasupat and Liang, 2015)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 562, |
|
"end": 581, |
|
"text": "(text, image, etc.)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1115, |
|
"end": 1132, |
|
"text": "(Pelletier, 1994)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 787, |
|
"end": 795, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this work we propose question understanding as a standalone language understanding task. We introduce a formalism for representing the meaning of questions that relies on question decomposition, and is agnostic to the information source. Our formalism, Question Decomposition Meaning Representation (QDMR), is inspired by database query languages (SQL; SPARQL), and by semantic parsing (Zelle and Mooney, 1996; Zettlemoyer and Collins, 2005; Clarke et al., 2010) , in which questions are given full meaning representations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 389, |
|
"end": 413, |
|
"text": "(Zelle and Mooney, 1996;", |
|
"ref_id": "BIBREF53" |
|
}, |
|
{ |
|
"start": 414, |
|
"end": 444, |
|
"text": "Zettlemoyer and Collins, 2005;", |
|
"ref_id": "BIBREF54" |
|
}, |
|
{ |
|
"start": 445, |
|
"end": 465, |
|
"text": "Clarke et al., 2010)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We express complex questions via simple (''atomic'') questions that can be executed in sequence to answer the original question. Each atomic question can be mapped into a small set of formal operations, where each operation either selects a set of entities, retrieves information about their attributes, or aggregates information over entities. While this has been formalized in knowledge-base (KB) query languages (Chamberlin and Boyce, 1974) , the same intuition can be applied to other modalities, such as images and text. QDMR abstracts away the context needed to answer the question, allowing in principle to query multiple sources for the same question.", |
|
"cite_spans": [ |
|
{ |
|
"start": 415, |
|
"end": 443, |
|
"text": "(Chamberlin and Boyce, 1974)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In contrast to semantic parsing, QDMR operations are expressed through natural language, facilitating annotation at scale by non-experts. Figure 1 presents examples of complex questions on three different modalities. The middle box lists the natural language decompositions provided for each question, and the bottom box displays their corresponding formal queries.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 138, |
|
"end": 146, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "QDMR serves as the formalism for creating BREAK, a question decomposition dataset of 83,978 questions over ten datasets and three modalities. BREAK is collected via crowdsourcing, with a user interface that allows us to train crowd-workers to produce quality decompositions ( \u00a73). Validating the quality of annotated structures reveals 97.4% to be correct ( \u00a74).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We demonstrate the utility of QDMR in two setups. First, we regard the task of open-domain QA over multi-hop questions from the HOTPOTQA dataset. Combining QDMR structures in BREAK with a reading comprehension (RC) model (Min et al., 2019b) improves F 1 from 43.3 to 52.4 ( \u00a75). Second, we show that decompositions in BREAK possess high annotation consistency, which indicates that annotators produce high-quality QDMRs ( \u00a74.3). In \u00a76 we discuss how these QDMRs can be used as a strong proxy for full logical forms in semantic parsing.", |
|
"cite_spans": [ |
|
{ |
|
"start": 221, |
|
"end": 240, |
|
"text": "(Min et al., 2019b)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We use BREAK to train a neural QDMR parser that maps questions into QDMR representations, based on a sequence-to-sequence model with copying (Gu et al., 2016) . Manual analysis of generated structures reveals an accuracy of 54%, showing that automatic QDMR parsing is possible, though still far from human performance ( \u00a77).", |
|
"cite_spans": [ |
|
{ |
|
"start": 141, |
|
"end": 158, |
|
"text": "(Gu et al., 2016)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To conclude, our contributions are:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Proposing the task of question understanding and introducing the QDMR formalism for representing the meaning of questions ( \u00a72)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 The BREAK dataset, which consists of 83,978 examples sampled from 10 datasets over three distinct information sources ( \u00a73) \u2022 Showing how QDMR can be used to improve open-domain question answering ( \u00a75), as well as alleviate the burden of annotating logical forms in semantic parsing ( \u00a76)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 A QDMR parser based on a sequence-tosequence model with copying mechanism ( \u00a77)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The BREAK dataset, models, and entire codebase are publicly available at: https://github. com/tomerwolgithub/Break.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this section we define the QDMR formalism for domain agnostic question decomposition.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Question Decomposition Formalism", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "QDMR is primarily inspired by SQL (Codd, 1970; Chamberlin and Boyce, 1974) . However, while SQL was designed for relational databases, QDMR also aims to capture the meaning of questions over unstructured sources such as text and images. Thus, our formalism abstracts away from SQL by assuming an underlying ''idealized'' KB, which contains all entities and relations expressed in the question. This abstraction enables QDMR to be unrestricted to a particular modality, with its operators to be executed also against text QDMR Definition Given a question x, its QDMR is a sequence of n steps, s = s 1 , ..., s n , where each step s i corresponds to a single query 1 A system could potentially answer ''Name the political parties of the most densely populated country'', by retrieving ''the most densely populated country'' using a database query, and ''the political parties of #1'' via an RC model. operator f i (see Table 1 ). A step, s i is a sequence of tokens,", |
|
"cite_spans": [ |
|
{ |
|
"start": 34, |
|
"end": 46, |
|
"text": "(Codd, 1970;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 47, |
|
"end": 74, |
|
"text": "Chamberlin and Boyce, 1974)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 663, |
|
"end": 664, |
|
"text": "1", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 917, |
|
"end": 924, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Question Decomposition Formalism", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "s i = (s i 1 , ..., s i m i ), where a token s i k", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Question Decomposition Formalism", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "is either a word from a predefined lexicon L x (details in \u00a73) or a reference token, referring to the result of a previous step s j , where j < i. The last step, s n returns the answer to x.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Question Decomposition Formalism", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Decomposition Graph QDMR structures can be represented as a directed acyclic graph (DAG), used for evaluating QDMR parsing models ( \u00a77.1). Given QDMR, s = s 1 , ..., s n , each step s i is a node in the graph, labeled by its sequence of tokens and index i. Edges in the graph are induced by reference tokens to previous steps. Node s i is connected by an incoming edge (s", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Question Decomposition Formalism", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "j , s i ), if ref [s j ] \u2208 (s i 1 , ..., s i m i ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Question Decomposition Formalism", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "That is, if one of the tokens in s i is a reference to s j . Figure 2 displays a sequence of QDMR steps, represented as a DAG.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 69, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Question Decomposition Formalism", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "QDMR Operators A QDMR step corresponds to one of 13 query operators. We designed the operators to be expressive enough to represent the meaning of questions from a diverse set of datasets ( \u00a73). QDMR assumes an underlying KB, K, which contains all of the entities and relations expressed in its steps. A relation, r, is a function mapping two arguments to whether r holds in K: [[r(x, y) ]] K \u2208 {true, false}. The operators operate over: (i) sets of objects S o , where objects o, are either numbers n, boolean values b, or entities e in K; (ii) a closed set of phrases w op , describing logical operations; and (iii) natural language phrases w, representing entities and relations in K. We assume the existence of grounding functions that map a phrase w to concrete constants in K. Table 2 describes the aforementioned constructs. In addition, we define the function map K (S e , S o ) which maps entity e \u2208 S e to the set of corresponding objects from S o . Each o \u2208 S o corresponds to an e \u2208 S e by being contained in the result of a sequence of PROJECT and GROUP operations applied to e:", |
|
"cite_spans": [ |
|
{ |
|
"start": 378, |
|
"end": 387, |
|
"text": "[[r(x, y)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 783, |
|
"end": 790, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Question Decomposition Formalism", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "2 map K (S e , S o ) = { e, o | e \u2208 S e , o \u2208 S o , o \u2208 op k \u2022 ... \u2022 op 1 (e)}.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Question Decomposition Formalism", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We now formally define each QDMR operator and provide concrete examples in Table 1. \u2022 SELECT: Computes the set of entities in K corresponding to w: select(w) = ground e K (w).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 75, |
|
"end": 83, |
|
"text": "Table 1.", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Question Decomposition Formalism", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "2 The sequence of operations op 1 , . . . , op k is traced using the references to previous steps in the QDMR structure.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Question Decomposition Formalism", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Given a phrase wagg which describes an aggregate operation, agg denotes the corresponding operation. Either max, min, count , sum or avg . sup Given wsup describing a superlative, it denotes the corresponding function. Either arg max or arg min. com Given wcom describing a comparison, it denotes the corresponding relation out of: <, \u2264, >, \u2265, =, =. ari Given w ari describing an arithmetic operation, it denotes the corresponding operation out of: +, \u2212, * , /. ground e K (w) Given a natural language phrase w, it returns the set of corresponding KB entities, Se. ground r K (w) Given a natural language phrase w, it returns the corresponding KB relation, r. \u2022 FILTER: Filters a set of objects so that it follows the condition expressed by w:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "filter(S o , w) = S o \u2229 {o | [[r(e, o)]] K \u2261 true},", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where r = ground r K (w), e = ground e K (w)}.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 PROJECT: Computes the objects that relate to input entities S e with the relation expressed by w,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "proj(w, S e ) = {o | [[r(e, o)]] K \u2261 true, e \u2208 S e },", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where r = ground r K (w).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 AGGREGATE: The result of applying an aggregate operation:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "aggregate(w agg , S o ) = {agg (S o )}.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 GROUP: Receives a set of ''keys'', S e , and a set of corresponding ''values'', S o . It outputs a set of numbers, each corresponding to a key e \u2208 S e . Each number results from applying aggregate, w agg to the subset of values corresponding to e.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "group(w agg , S o , S e ) = {agg (V o (e)) | e \u2208 S e },", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where V o (e) = {o | e, o \u2208 map K (S e , S o )}.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 SUPERLATIVE: Receives entity set S e and number set S n . Each number n \u2208 S n is the result of a mapping from an entity e \u2208 S e . It returns a subset of S e for which the corresponding number is either highest/lowest as indicated by w sup .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "super(S e , S n , w sup ) = {sup (map K (S e , S n ))}.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 COMPARATIVE: Receives entity set S e and number set S n . Each n \u2208 S n is the result of a mapping from an e \u2208 S e . It returns a subset of S e for which the comparison with n \u2032 , represented by w com , holds.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "comparative(S e , S n , w com , n \u2032 ) = {e | e, n \u2208 map K (S e , S n ), com(n, n \u2032 ) \u2261 true}.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 UNION: Denotes the union of object sets:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "union(S 1 o , S 2 o ) = S 1 o \u222a S 2 o .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 DISCARD: Denotes the set difference of two object sets:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "discard(S 1 o , S 2 o ) = S 1 o \\ S 2 o .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 INTERSECTION: Computes the intersection of its entity sets and returns all objects which relate to the entities with the relation expressed by w.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "intersect(w, S 1 e , S 2 e ) = {o | e \u2208 S 1 e \u2229 S 2 e , [[r(e, o)]] K \u2261 true, r = ground r K (w)}.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 SORT: Orders a set of entities according to a corresponding set of numbers. Each number n i is the result of a mapping from entity e i .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "sort(S e , S n ) = { e i 1 ...e i m | e i j , n i j \u2208 map K (S e , S n ), n i 1 \u2264 ... \u2264 n i m }.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 BOOLEAN: Returns whether the relation expressed by w holds between the input objects:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "boolean(S 1 o , w, S 2 o ) = {[[r(o 1 , o 2 )]] K }, where r = ground r K (w) and S 1 o , S 2 o are singleton sets containing o 1 , o 2 respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 ARITHMETIC: Computes the application of an arithmetic operation:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "arith(w ari , S 1 n , S 2 n ) = {ari(n 1 , n 2 )}, where S 1 n , S 2 n are singleton sets containing n 1 , n 2 respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "High-level Decompositions In QDMR, each step corresponds to a single logical operator. In certain contexts, a less granular decomposition might be desirable, where sub-structures containing multiple operators could be collapsed to a single node. This can be easily achieved in QDMR by merging certain adjacent nodes in its DAG structure. When examining existing RC datasets Dua et al., 2019) , we observed that long spans in the question often match long spans in the text, due to existing practices of generating questions Step #1 merges together SELECT and multiple FILTER steps. via crowdsourcing. In such cases, decomposing the long spans into multiple steps and having an RC model process each step independently, increases the probability of error. Thus, to promote the usefulness of QDMR for current RC datasets and models, we introduce high-level QDMR, by merging the following operators:", |
|
"cite_spans": [ |
|
{ |
|
"start": 374, |
|
"end": 391, |
|
"text": "Dua et al., 2019)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 SELECT + PROJECT on named entities:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For the question, ''What is the birthdate of Jane?'' its high-level QDMR would be ''return the birthdate of Jane'' as opposed to the more granular, ''return Jane; return birthdate of #1''.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 SELECT + FILTER: Consider the first step of the example in Figure 3 . It contains both a SELECT operator (''return actress'') as well as two FILTER conditions (''that played...'', ''on the TV sitcom...'').", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 69, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 FILTER + GROUP + COMPARATIVE: Certain high-level FILTER steps contain implicit grouping and comparison operations. E.g., ''return yard line scores in the fourth quarter; return #1 that both teams scored from''.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Step #2 contains an implicit GROUP of team per yard line and a COMPARATIVE returning the lines where exactly two teams scored.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We provide both granular and high-level QDMRs for a random subset of RC questions (see Table 3 ). The concrete utility of high-level QDMR to open-domain QA is presented in \u00a75.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 94, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Function Description agg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Our annotation pipeline for generating BREAK consisted of three phases. First, we collected complex questions from existing QA benchmarks. Second, we crowdsourced the QDMR annotation of these questions. Finally, we validated worker annotations in order to maintain their quality. Question Collection Questions in BREAK were randomly sampled from ten QA datasets over the following tasks (Table 3) :", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 387, |
|
"end": 396, |
|
"text": "(Table 3)", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Collection", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Semantic Parsing: Mapping natural language utterances into formal queries, to be executed on a target KB (Price, 1990; Zelle and Mooney, 1996; Yu et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 107, |
|
"end": 120, |
|
"text": "(Price, 1990;", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 121, |
|
"end": 144, |
|
"text": "Zelle and Mooney, 1996;", |
|
"ref_id": "BIBREF53" |
|
}, |
|
{ |
|
"start": 145, |
|
"end": 161, |
|
"text": "Yu et al., 2018)", |
|
"ref_id": "BIBREF52" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Collection", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Reading Comprehension (RC): Questions that require understanding of a text passage by reasoning over multiple sentences (Talmor and Berant, 2018; Dua et al., 2019; Abujabal et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 122, |
|
"end": 147, |
|
"text": "(Talmor and Berant, 2018;", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 148, |
|
"end": 165, |
|
"text": "Dua et al., 2019;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 166, |
|
"end": 188, |
|
"text": "Abujabal et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Collection", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Visual Question Answering (VQA): Questions over images that require both visual and numerical reasoning skills (Johnson et al., 2017; Suhr et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 113, |
|
"end": 135, |
|
"text": "(Johnson et al., 2017;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 136, |
|
"end": 154, |
|
"text": "Suhr et al., 2019)", |
|
"ref_id": "BIBREF46" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Collection", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "All questions collected were composed by human annotators. 3 HOTPOTQA questions were all sampled from the hard split of the dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Collection", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "QDMR Annotation A key question is whether it is possible to train non-expert annotators to produce high-quality QDMRs. We designed an annotation interface (Figure 4) , where workers are first given explanations and examples on how to identify and phrase each of the operators in Table 1 . Then, workers decompose questions into a list of 3 Except for COMPLEXWEBQUESTIONS (CWQ), where annotators paraphrased automatically generated questions. steps, where they are only allowed to use words from a lexicon L x , which contains: (a) words appearing in the question (or their automatically computed inflections), (b) words from a small pre-defined list of 66 function word such as, 'if ', 'on', 'for each', or (c) reference tokens that refer to the results of a previous step. This ensures that the language used by workers is consistent across examples, while being expressive enough for the decomposition. Our annotation interface presents workers with the question only, so they are agnostic to the original modality of the question. The efficacy of this process is explored in \u00a74.2.", |
|
"cite_spans": [ |
|
{ |
|
"start": 683, |
|
"end": 710, |
|
"text": "', 'on', 'for each', or (c)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 155, |
|
"end": 165, |
|
"text": "(Figure 4)", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 279, |
|
"end": 286, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Collection", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We used Amazon Mechanical Turk to crowdsource QDMR annotation. In each task, workers decomposed questions, paying them $0.40 per question, which amounts to an average pay of $12 per hour. Overall, we collected 83,978 examples using 64 distinct workers. The dataset was partitioned into train/development/test sets following the partitions in the original datasets. During partition, we made sure that development and test samples do not share the same context.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Collection", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "To ensure worker quality, we initially published qualification tasks, open to all workers in the United States. The task required workers to carefully review the annotation instructions and decompose 10 example questions. The examples were selected so that each QDMR operation should appear in at least one of their decompositions (Table 1) . In total, 64 workers were able to correctly decompose at least 8 examples and were qualified as annotators. To validate worker performance over time, we conducted random validations of annotations. Over 9K annotations were reviewed by experts throughout the annotation process. Only workers who consistently produced correct QDMRs for at least 90% of their tasks were allowed to continue as annotators.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 331, |
|
"end": 340, |
|
"text": "(Table 1)", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Worker Validation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This section examines the properties of collected QDMRs in BREAK and analyzes their quality.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset Analysis", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Overall, BREAK contains 83,978 decompositions, including 60,150 QDMRs and 23,828 examples with high-level QDMRs, which are exclusive to text modalities. Table 3 shows that data is proportionately distributed between questions over structured (DB) and unstructured modalities (text, images). The distribution of QDMR operators is presented in Table 4 , detailing the prevalence of each query operator 4 (we automatically compute this distribution, as explained in \u00a74.3). SELECT and PROJECT are the most common operators. Additionally, at least 10% of QDMRs contain operators such as GROUP and COMPARATIVE, which entail complex reasoning, in contrast to high-level QDMRs, where such operations are rare. This distinction sheds light on the reasoning types required for answering RC datasets (highlevel QDMR) compared with more structured tasks (QDMR). include 3-6 steps, whereas high-level QDMRs are much shorter, as a single SELECT often finds an entity described by a long noun phrase (see \u00a72).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 153, |
|
"end": 160, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 342, |
|
"end": 349, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Quantitative Analysis", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We describe the process of estimating the correctness of collected QDMR annotations. Similar to previous works (Yu et al., 2018; Kwiatkowski et al., 2019) we use expert judgments, where the experts had prepared the guidelines for the annotation task. Given a question and its annotated QDMR, (q, s) the expert determines the correctness of s using one of the following categories:", |
|
"cite_spans": [ |
|
{ |
|
"start": 111, |
|
"end": 128, |
|
"text": "(Yu et al., 2018;", |
|
"ref_id": "BIBREF52" |
|
}, |
|
{ |
|
"start": 129, |
|
"end": 154, |
|
"text": "Kwiatkowski et al., 2019)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Quality Analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 Correct (C): If s constitutes a list of QDMR operations that lead to correctly answering q.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Quality Analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 Granular (C G ): If s is correct and none of its operators can be further decomposed. 5", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Quality Analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 Incorrect (I): If s is in neither C nor C G .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Quality Analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Examples of these expert judgments are shown in Figure 5 . To estimate expert judgment of correctness, we manually reviewed a random sample of 500 QDMRs from BREAK. We classified 93.8% of the samples in C G and another 3.6% in C. Thus, 97.4% of the samples constitute a correct decomposition of the original question. Workers have somewhat struggled with decomposing superlatives (e.g., ''biggest sphere''), as evident from the first question in Figure 5 . Collected QDMRs displayed similar estimates of C, C G , and I, regardless of their modality (DB, text, or image).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 48, |
|
"end": 56, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF4" |
|
}, |
|
{ |
|
"start": 446, |
|
"end": 454, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Quality Analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "As QDMR is expressed using natural language, it introduces variability into its annotations. We wish to validate the consistency of collected QDMRs, that is, whether we can correctly infer the formal QDMR operator (f i ) and its arguments from each step (s i ). To infer these formal representations, we developed an algorithm that goes over the QDMR structure step-by-step, and for each step s i , uses a set of predefined templates to identify f i and its arguments, expressed in s i . This results in an execution graph (Figure 2) , where the execution result of a parent node serves as input to its child. Figure 1 presents three QDMR decompositions along with the formal graphs output by our algorithm (lower box). Each node lists its operator (e.g., GROUP), its constant input listed in brackets (e.g., count) and its dynamic input, which are the execution results of its parent nodes.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 523, |
|
"end": 533, |
|
"text": "(Figure 2)", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 610, |
|
"end": 618, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Annotation Consistency", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Overall, 99.5% of QDMRs had all their steps mapped into pseudo-logical forms by our algorithm. To evaluate the correctness of the mapping algorithm, we randomly sampled 350 logical forms, and examined the structure of the formulas, assuming that words copied from the question correspond to entities and relations in an idealized KB (see \u00a72). Of this sample, 99.4% of its examples had all of their steps, s i , correctly mapped to the corresponding f i . Overall, 93.1% of the examples were of fully accurate logical forms, with errors being due to QDMRs that were either incorrect or not fully decomposed (I, C in \u00a74.2). Thus, a rule-based algorithm can map more than 93% of the annotations into a correct formal representation. This shows that our annotators produced consistent and high-quality QDMRs. Moreover, it suggests that non-experts can annotate questions with pseudo-logical forms, which can be used as Algorithm 1 BREAKRC 1: procedure BREAKRC(s: QDMR) 2:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation Consistency", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "ansrs \u2190 [] 3: for s i in s = s 1 , . . . , s n do 4: op \u2190 OPTYPE(s i ) 5: ref s \u2190 REFERENCEDSTEPS(s i ) 6:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation Consistency", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "if op is SELECT then 7:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation Consistency", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "ans \u2190 ANSWER(s i ) 8:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation Consistency", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "else if op is FILTER then 9:\u015d i \u2190 EXTRACTQUESTION(s i ) 10: Yih et al., 2016) , further discussed in \u00a76.", |
|
"cite_spans": [ |
|
{ |
|
"start": 60, |
|
"end": 77, |
|
"text": "Yih et al., 2016)", |
|
"ref_id": "BIBREF51" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation Consistency", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "ans tmp \u2190 ANSWER(\u015d i ) 11", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation Consistency", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "A natural setup for QDMR is in answering complex questions that require multiple reasoning steps. We compare models that exploit question decompositions to baselines that do not. We use the open-domain QA (''full-wiki\") setting of the HOTPOTQA dataset : Given a question, the QA model retrieves the relevant Wikipedia paragraphs and answers the question using these paragraphs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "QDMR for Open-domain QA", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We compare BREAKRC, a model that utilizes question decomposition to BERTQA, a standard QA model, based on BERT , and present COMBINED, an approach that enjoys the benefits of both models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "BREAKRC Algorithm 1 describes the BREAKRC model, which uses high-level QDMR structures for answering open-domain multi-hop questions. We assume access to an Information Retrieval (IR) model and an RC model, and denote by ANSWER(\u2022) a function that takes a question as input, runs the IR model to obtain paragraphs, and then feeds those paragraphs as context for an RC model that returns a distribution over answers. Given an input QDMR, s = s 1 , ..., s n , iterate over s step-by-step and perform the following. First, we extract the operation (line 4) and the previous steps referenced by s i (line 5). Then, we compute the answer to s i conditioned on the extracted operator. For SELECT steps, we simply run the ANSWER(\u2022) function. For PROJECT steps, we substitute the reference to the previous step in s i with its already computed answer, and then run ANSWER(\u2022). For FILTER steps, 6 we use a simple rule to extract a ''normalized question'',\u015d i from s i and get an intermediate answer ans tmp with ANSWER(\u015d i ). We then ''intersect'' ans tmp with the referenced answer by multiplying the probabilities provided by the RC model and normalizing. For COMPARISON steps, we compare, with a discrete operation, the numbers returned by the referenced steps. The final answer is the highest probability answer of step s n .", |
|
"cite_spans": [ |
|
{ |
|
"start": 885, |
|
"end": 886, |
|
"text": "6", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "As our IR model we use bigram TF-IDF, proposed by Chen et al. (2017) . Because the RC model is run on single-hop questions, we use the BERTbased RC model from Min et al. (2019b) , trained solely on SQuAD (Rajpurkar et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 50, |
|
"end": 68, |
|
"text": "Chen et al. (2017)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 159, |
|
"end": 177, |
|
"text": "Min et al. (2019b)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 204, |
|
"end": 228, |
|
"text": "(Rajpurkar et al., 2016)", |
|
"ref_id": "BIBREF44" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "BERTQA Baseline As BREAKRC exploits question decompositions, we compare it with a model that does not. BERTQA receives as input the original natural language question, x. It uses the same IR model as BREAKRC to retrieve paragraphs for x. For a fair comparison, we set its number of retrieved paragraphs such that it is identical to BREAKRC (namely, 10 paragraphs for each QDMR step that involves IR). Similar to BREAKRC, retrieved paragraphs are fed to a pretrained BERT-based RC model (Min et al., 2019b) to answer x. In contrast to BREAKRC, that is trained on SQUAD, BERTQA is trained on the target dataset (HOTPOTQA), giving it an advantage over BREAKRC.", |
|
"cite_spans": [ |
|
{ |
|
"start": 486, |
|
"end": 505, |
|
"text": "(Min et al., 2019b)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "A COMBINED Approach Last, we present an approach that combines the strengths of BREAKRC and BERTQA. In this approach, we use the QDMR decomposition to improve retrieval only. Given a question x and its QDMR s, we run BREAKRC on s, but in addition to storing answers, we also store all the paragraphs retrieved by the IR model. We then run BERTQA on the question x and the top-10 paragraphs retrieved by BREAKRC, sorted by their IR ranking. This approach resembles that of Qi et al. (2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 472, |
|
"end": 488, |
|
"text": "Qi et al. (2019)", |
|
"ref_id": "BIBREF43" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "The advantage of COMBINED is that we do not need to develop an answering procedure for each QDMR operator separately, which involves dif- 6 INTERSECTION steps are handled in a manner similar to FILTER, but we omit the exact description for brevity. Table 6 : Open-domain QA results on HOTPOTQA.", |
|
"cite_spans": [ |
|
{ |
|
"start": 138, |
|
"end": 139, |
|
"text": "6", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 249, |
|
"end": 256, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "ferent discrete operations such as comparison and intersection. Instead, we use BREAKRC to retrieve contexts, and an end-to-end approach to learn how to answer the question directly. This can often handle operators not implemented in BREAKRC, like BOOLEAN and UNION.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "DATASET To evaluate our models, we use all 2,765 QDMR annotated examples of the HOTPOTQA development set found in BREAK. PROJECT and COMPARISON type questions account for 48% and 7% of examples respectively. Table 6 shows model performance on HOTPOTQA. We report EM and F 1 using the official HOTPOTQA evaluation script. IR measures the percentage of examples in which the IR model successfully retrieved both of the ''gold paragraphs'' necessary for answering the multi-hop question. To assess the potential utility of QDMR, we report results for BREAKRC G , which uses gold QDMRs, and BREAKRC P , which uses QDMRs predicted by a COPYNET parser ( \u00a77.2).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 208, |
|
"end": 215, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Retrieving paragraphs with decomposed questions substantially improves the IR metric from 46.3 to 59.2 (BREAKRC G ), or 52.5 (BREAKRC P ). This leads to substantial gains in EM and F 1 for COMBINED G (43.3 to 52.4) and COMBINED P (43.3 to 49.3). The EM and F 1 of BREAKRC G are only slightly higher than BERTQA because BREAKRC does not handle certain operators, such as BOOLEAN steps (9.4% of the examples).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The majority of questions in HOTPOTQA combine SELECT operations with either PROJECT (also called ''bridge'' questions), COMPARISON, or FILTER. PROJECT and COMPARISON questions ( Figure 6 ) were shown to be less susceptible ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 178, |
|
"end": 186, |
|
"text": "Figure 6", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "PROJECT COMPARISON EM F 1 IR EM F 1 IR BERTQA 22.8 31.0 31.6 42.9 51.7 75.8 BREAKRC P 25.4 33.7 52.9 34.7 50.4 68.9 BREAKRC G 32.2 41.9 59.8 44.5 57.6 78.0 to reasoning shortcuts, i.e. they necessitate multistep reasoning (Chen and Durrett, 2019; Jiang and Bansal, 2019; Min et al., 2019a) . In Table 7 we report BREAKRC results on these question types, where it notably outperforms BERTQA.", |
|
"cite_spans": [ |
|
{ |
|
"start": 222, |
|
"end": 246, |
|
"text": "(Chen and Durrett, 2019;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 247, |
|
"end": 270, |
|
"text": "Jiang and Bansal, 2019;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 271, |
|
"end": 289, |
|
"text": "Min et al., 2019a)", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 295, |
|
"end": 302, |
|
"text": "Table 7", |
|
"ref_id": "TABREF11" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Ablations In BREAKRC, multiple IR queries are issued, one at each step. To examine whether these multiple queries were the cause for performance gains, we built IR-NP, a model that issues multiple IR queries, one for each noun phrase in the question. Similar to COMBINED, the question and union of retrieved paragraphs are given as input to BERTQA. We observe that COMBINED substantially outperforms IR-NP, indicating that the structure of QDMR, rather than multiple IR queries, has led to improved performance. 7 To test whether QDMR is better than a simple rule-based decomposition algorithm, we developed a model that decomposes a question by applying a set of predefined rules over the dependency tree of the question (full details in \u00a77.2). COMBINED and BREAKRC were compared to COMBINED R and BREAKRC R , which use the rulebased decompositions. We observe that QDMR lead to substantially higher performance when compared to the rule-based decompositions. 7 Issuing an IR query over each ''content word'' in the question, instead of each noun phrase, led to poor results.", |
|
"cite_spans": [ |
|
{ |
|
"start": 512, |
|
"end": 513, |
|
"text": "7", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 961, |
|
"end": 962, |
|
"text": "7", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "As QDMR structures can be easily annotated at scale, a natural question is how far are they from fully executable queries (known to be expensive to annotate). As shown in \u00a74.3, QDMRs can be mapped to pseudo-logical forms with high precision (93.1%) by extracting formal operators and arguments from their steps. The pseudo-logical form differs from an executable query in the lack of grounding of its arguments (entities and relations) in KB constants. This stems from the design of QDMR as a domain-agnostic meaning representation ( \u00a72). QDMR abstracts away from a concrete KB schema by assuming an underlying ''idealized'' KB, which contains all of its arguments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "QDMR for Semantic Parsing", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Thus, QDMR can be viewed as an intermediate representation between a natural language question and an executable query. Such intermediate representations have already been discussed in prior work on semantic parsing. Kwiatkowski et al. (2013) and Choi et al. (2015) used underspecified logical forms as an intermediate representation. Guo et al. (2019) proposed a two-stage approach, separating between learning an intermediate text-to-SQL representation and the actual mapping to schema items. Works in the database community have particularly targeted the mapping of intermediate query representations into DB grounded queries, using schema mapping and join path inference (Androutsopoulos et al., 1995; Baik et al., 2019) . We argue that QDMR can be used as an easy-to-annotate representation in such semantic parsers, bridging between natural language and full logical forms.", |
|
"cite_spans": [ |
|
{ |
|
"start": 217, |
|
"end": 242, |
|
"text": "Kwiatkowski et al. (2013)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 247, |
|
"end": 265, |
|
"text": "Choi et al. (2015)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 335, |
|
"end": 352, |
|
"text": "Guo et al. (2019)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 675, |
|
"end": 705, |
|
"text": "(Androutsopoulos et al., 1995;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 706, |
|
"end": 724, |
|
"text": "Baik et al., 2019)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "QDMR for Semantic Parsing", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We now present evaluation metrics and models for mapping questions into QDMR structures.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "QDMR Parsing", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Task Definition Given a question x we wish to map it to its QDMR steps, s = s 1 , ... , s n . One can frame this as a sequence-to-sequence problem where x is mapped to a string representing its decomposition. We add a special separating token SEP , and define the target string to be s 1 1 , ... , s 1 m 1 , SEP , s 2 1 , ... , s 2 m 2 , SEP , ... , s n m n , where m 1 , ... , m n are the number of tokens in each decomposition step. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "QDMR Parsing", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We wish to assess the quality of a predicted QDMR,\u015d to a gold standard, s. Figure 7 lists various properties by which question decompositions may differ, such as granularity (e.g., steps 1-3 of decomposition 1 are merged into the first step of decomposition 2), ordering (e.g., the last two steps are swapped) and wording (e.g., using ''from '' instead of ''on'') . While such differences do not affect the overall semantics, the second decomposition can be further decomposed. To measure such variations, we introduce two types of evaluation metrics. Sequence-based metrics treat the decomposition as a sequence of tokens, applying standard text generation metrics. As such metrics ignore the QDMR graph structure, we also use graph-based metrics that compare the predicted graph G\u015d to the gold QDMR graph G s (see \u00a72).", |
|
"cite_spans": [ |
|
{ |
|
"start": 342, |
|
"end": 363, |
|
"text": "'' instead of ''on'')", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 75, |
|
"end": 83, |
|
"text": "Figure 7", |
|
"ref_id": "FIGREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "Sequence-based scores, where higher values are better, are denoted by \u21d1. Graph-based scores, where lower values are better, are denoted by \u21d3.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "\u2022 Exact Match \u21d1: Measures exact match between s and\u015d, either 0 or 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "\u2022 SARI \u21d1 (Xu et al., 2016) : SARI is commonly used in tasks such as text simplification. Given s, we consider the sets of added, deleted, and kept n-grams when mapping the question x to s. We compute these three sets for both s and\u015d using the standard of up to 4-grams, then average (a) the F 1 for added n-grams between s and\u015d, (b) the F 1 for kept n-grams, and (c) the precision for the deleted n-grams.", |
|
"cite_spans": [ |
|
{ |
|
"start": 9, |
|
"end": 26, |
|
"text": "(Xu et al., 2016)", |
|
"ref_id": "BIBREF49" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "\u2022 Graph Edit Distance (GED) \u21d3: A graph edit path is a sequence of node and edge edit operations (addition, deletion, and substitution), where each operation has a predefined cost. GED computes the minimal-cost graph edit path required for transitioning from G s to G\u015d (and vice versa), normalized by max(|G s |, |G\u015d|). Operation costs are 1 for insertion and deletion of nodes and edges. The substitution cost of two nodes u, v is set to be 1\u2212Align (u, v) , where Align (u, v) is the ratio of aligned tokens between these steps.", |
|
"cite_spans": [ |
|
{ |
|
"start": 449, |
|
"end": 455, |
|
"text": "(u, v)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 470, |
|
"end": 476, |
|
"text": "(u, v)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "\u2022 GED+ \u21d3: Comparing the QDMR graphs in Figure 8 , we consider the splitting and merging of graph nodes. We implement GED+, a variant of GED with additional operations to merge (split) a set of nodes (node), based on the A* algorithm (Hart et al., 1968 ). 8", |
|
"cite_spans": [ |
|
{ |
|
"start": 233, |
|
"end": 251, |
|
"text": "(Hart et al., 1968", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 39, |
|
"end": 47, |
|
"text": "Figure 8", |
|
"ref_id": "FIGREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "We present models for QDMR parsing, built over AllenNLP (Gardner et al., 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 56, |
|
"end": 78, |
|
"text": "(Gardner et al., 2017)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "QDMR Parsing Models", |
|
"sec_num": "7.2" |
|
}, |
|
{ |
|
"text": "\u2022 COPY: A model that copies the input question x, without introducing any modifications.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "QDMR Parsing Models", |
|
"sec_num": "7.2" |
|
}, |
|
{ |
|
"text": "\u2022 RULEBASED: We defined 12 decomposition rules, to be applied over the dependency tree of the question, augmented with coreference relations. A rule is a regular expression over the question dependency tree, which invokes a decomposition operation when matched (Table 8 ). For example, the rule for relative clauses (relcl) breaks the question at the relative pronoun ''that'', while adding a reference to the preceding part of the sentence. A full decomposition is obtained by recursively applying the rules until no rule is matched.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 261, |
|
"end": 269, |
|
"text": "(Table 8", |
|
"ref_id": "TABREF12" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "QDMR Parsing Models", |
|
"sec_num": "7.2" |
|
}, |
|
{ |
|
"text": "\u2022 SEQ2SEQ: A sequence-to-sequence neural model with a 5-layer LSTM encoder and attention at decoding time. \u2022 S2SDYNAMIC: SEQ2SEQ with a dynamic output vocabulary restricted to the closed set of tokens L x available to crowd-workers (see \u00a73).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "QDMR Parsing Models", |
|
"sec_num": "7.2" |
|
}, |
|
{ |
|
"text": "\u2022 COPYNET: SEQ2SEQ with an added copy mechanism that allows copying tokens from the input sequence (Gu et al., 2016) . Table 9 presents model performance on BREAK. Neural models outperform the RULEBASED baseline and perform reasonably well, with COPYNET obtaining the best scores across all metrics. This can be attributed to most of the tokens in a QDMR parse being copied from the original question.", |
|
"cite_spans": [ |
|
{ |
|
"start": 99, |
|
"end": 116, |
|
"text": "(Gu et al., 2016)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 119, |
|
"end": 126, |
|
"text": "Table 9", |
|
"ref_id": "TABREF14" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "QDMR Parsing Models", |
|
"sec_num": "7.2" |
|
}, |
|
{ |
|
"text": "Error Analysis To judge the quality of predicted QDMRs we sampled 100 predictions of COPYNET (Table 10) half of them being high-level QDMRs. For standard QDMR, 24% of the sampled predictions were an exact match, with an additional 30% being fully decomposed and semantically equivalent to the gold decompositions. For example, in the first row of Table 10 , the gold decomposition first discards the number of cylinders, then counts the remaining objects. Instead, COPYNET opted to count both groups, then subtract the number of cylinders from the number of objects. This illustrates how different QDMRs may be equivalent.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 93, |
|
"end": 103, |
|
"text": "(Table 10)", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 347, |
|
"end": 355, |
|
"text": "Table 10", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "7.3" |
|
}, |
|
{ |
|
"text": "For high-level examples (from RC datasets), as questions are often less structured, they require a deeper semantic understating from the decomposition model. Only 8% of the predictions were an exact match, with an additional 46% being semantically equivalent to the gold. The remaining 46% were of erroneous predictions (see Table 10 ).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 325, |
|
"end": 333, |
|
"text": "Table 10", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "7.3" |
|
}, |
|
{ |
|
"text": "Question Decomposition Recent work on QA through question decomposition has focused mostly on single modalities (Gupta and Lewis, 2018; Guo et al., 2019; Min et al., 2019b) . QA using neural modular networks has been suggested for both KBs and images by Andreas et al. (2016) and Hu et al. (2017) . Question decomposition over text was proposed by Talmor and Berant (2018) , however over a much more limited set of questions than in BREAK. Iyyer et al. (2017) have also decomposed questions to create a ''sequential question answering'' task. Their annotators viewed a web table and performed actions over it to retrieve the cells that constituted the answer. Conversely, we provided annotators only with the question, as QDMR is agnostic to the original context.", |
|
"cite_spans": [ |
|
{ |
|
"start": 112, |
|
"end": 135, |
|
"text": "(Gupta and Lewis, 2018;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 136, |
|
"end": 153, |
|
"text": "Guo et al., 2019;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 154, |
|
"end": 172, |
|
"text": "Min et al., 2019b)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 254, |
|
"end": 275, |
|
"text": "Andreas et al. (2016)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 280, |
|
"end": 296, |
|
"text": "Hu et al. (2017)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 348, |
|
"end": 372, |
|
"text": "Talmor and Berant (2018)", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 440, |
|
"end": 459, |
|
"text": "Iyyer et al. (2017)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "An opposite annotation cycle to ours was presented in Cheng et al. (2018) . The authors generate sequences of simple questions which crowdworkers paraphrase into a compositional question. Questions in BREAK are composed by humans, and are then decomposed to QDMR.", |
|
"cite_spans": [ |
|
{ |
|
"start": 54, |
|
"end": 73, |
|
"text": "Cheng et al. (2018)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Labeling corpora with a semantic formalism has often been reserved for expert annotators (Dahl et al., 1994; Zelle and Mooney, 1996; Abend and Rappoport, 2013; Yu et al., 2018) . Recent work has focused on cheaply eliciting quality annotations from nonexperts through crowdsourcing (He et al., 2016; Iyer et al., 2017; . FitzGerald et al. (2018) facilitated non-expert annotation by introducing a formalism expressed in natural language for semantic-role-labeling. This mirrors QDMR, as both are expressed in natural language.", |
|
"cite_spans": [ |
|
{ |
|
"start": 89, |
|
"end": 108, |
|
"text": "(Dahl et al., 1994;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 109, |
|
"end": 132, |
|
"text": "Zelle and Mooney, 1996;", |
|
"ref_id": "BIBREF53" |
|
}, |
|
{ |
|
"start": 133, |
|
"end": 159, |
|
"text": "Abend and Rappoport, 2013;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 160, |
|
"end": 176, |
|
"text": "Yu et al., 2018)", |
|
"ref_id": "BIBREF52" |
|
}, |
|
{ |
|
"start": 282, |
|
"end": 299, |
|
"text": "(He et al., 2016;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 300, |
|
"end": 318, |
|
"text": "Iyer et al., 2017;", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Formalism Annotation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Relation to Other Formalisms QDMR is related to Dependency-based Compositional Semantics (Liang et al., 2013) , as both focus on question representations. However, QDMR is designed (1) objects; (2) cylinders; (3) #1 besides #2; (4) number of #3.", |
|
"cite_spans": [ |
|
{ |
|
"start": 89, |
|
"end": 109, |
|
"text": "(Liang et al., 2013)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Formalism Annotation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(1) objects; (2) cylinders; (3) number of #1; (4) number of #2; (5) difference of #3 and #4. sem. equiv. (30%) ''Where is the youngest teacher from?''", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Formalism Annotation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(1) teachers;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Formalism Annotation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(2) the youngest of #1;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Formalism Annotation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(3) where is #2 from.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Formalism Annotation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(1) youngest teacher;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Formalism Annotation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(2) where is #1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Formalism Annotation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(1) company that Kyle York is the Chief Strategy Officer of; (2) corporation that acquired #1 in 2016.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "incorrect (46%) ''Kyle York is the Chief Strategy Officer of a company acquired by what corporation in 2016?''", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(1) company that Kyle York is the Chief Strategy Officer of; (2) corporation in 2016 that #1 was acquired by. (1) MASH star that Dayton 's Devils had a cameo from;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "incorrect (46%) ''Kyle York is the Chief Strategy Officer of a company acquired by what corporation in 2016?''", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(2) role that #1 played on the show.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "incorrect (46%) ''Kyle York is the Chief Strategy Officer of a company acquired by what corporation in 2016?''", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(1) the MASH that Dayton 's Devils had a cameo;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "incorrect (46%) ''Kyle York is the Chief Strategy Officer of a company acquired by what corporation in 2016?''", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(2) what role on the show star of #1 played. incorrect (46%) to facilitate annotations, while Dependencybased Compositional Semantics is centered on paralleling syntax. Domain-independent intermediate representations for semantic parsers were proposed by Kwiatkowski et al. (2013) and Reddy et al. (2016) . As there is no consensus on the ideal meaning representation for semantic parsing, representations are often chosen based on the particular execution setup: SQL is used for relational databases (Yu et al., 2018) , SPARQL for graph KBs (Yih et al., 2016) , while other ad-hoc languages are used based on the task at hand. We frame QDMR as an easy-to-annotate formalism that can be potentially converted to other representations, depending on the task. Last, AMR (Banarescu et al., 2013 ) is a meaning representation for sentences. Instead of representing general language, QDMR represents questions, which are important for QA systems, and for probing models for reasoning.", |
|
"cite_spans": [ |
|
{ |
|
"start": 255, |
|
"end": 280, |
|
"text": "Kwiatkowski et al. (2013)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 285, |
|
"end": 304, |
|
"text": "Reddy et al. (2016)", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 501, |
|
"end": 518, |
|
"text": "(Yu et al., 2018)", |
|
"ref_id": "BIBREF52" |
|
}, |
|
{ |
|
"start": 538, |
|
"end": 560, |
|
"text": "KBs (Yih et al., 2016)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 768, |
|
"end": 791, |
|
"text": "(Banarescu et al., 2013", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "incorrect (46%) ''Kyle York is the Chief Strategy Officer of a company acquired by what corporation in 2016?''", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this paper, we presented a formalism for question understanding. We have shown it is possible to train crowd-workers to produce such representations with high quality at scale, and created BREAK, a benchmark for question decomposition with over 83K decompositions of questions from 10 datasets and 3 modalities (DB, images, text). We presented the utility of QDMR for both open-domain question answering and semantic parsing, and constructed a QDMR parser with reasonable performance. QDMR proposes a promising direction for modeling question understanding, which we believe will be useful for multiple tasks in which reasoning is probed through questions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "Regarding the three merged operators of high-level QDMRs ( \u00a72), the first two operators are treated as SELECT, while the third is considered a FILTER.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For high-level QDMRs, the merged operators ( \u00a72) are considered to be fully decomposed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Because of its exponential worst-case complexity, we compute GED+ only for graphs with up to 5 nodes, covering 75.2% of the examples in the development set of BREAK.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work was completed in partial fulfillment for the PhD of Tomer Wolfson. This research was partially supported by The Israel Science Foundation (grants 942/16 and 978/17), and The Yandex Initiative for Machine Learning and the European Research Council (ERC) under the European Union Horizons 2020 research and innovation programme (grant ERC DELPHI 802800).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Example be-root How many objects smaller than the matte object are silver [objects smaller than the matte object, How many #1 silver] be-auxpass Find the average rating star for each movie that are not reviewed by Brittany Harris.[Brittany Harris, the average rating star for each movie that not reviewed by #1] do-subj Year did the team with Baltimore Fight Find the first names of students studying in 108.[students, #1 studying in 108, first names of #2] sent-coref Find the claim that has the largest total settlement amount. Return the effective date of the claim.[the claim that has the largest total settlement amount, the effective date of #1]", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 353, |
|
"end": 358, |
|
"text": "Fight", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Structure", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Universal conceptual cognitive annotation (UCCA)", |
|
"authors": [ |
|
{ |
|
"first": "Omri", |
|
"middle": [], |
|
"last": "Abend", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ari", |
|
"middle": [], |
|
"last": "Rappoport", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Omri Abend and Ari Rappoport. 2013. Universal conceptual cognitive annotation (UCCA). In Association for Computational Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "ComQA: A community-sourced dataset for complex factoid question answering with paraphrase clusters", |
|
"authors": [ |
|
{ |
|
"first": "Abdalghani", |
|
"middle": [], |
|
"last": "Abujabal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rishiraj", |
|
"middle": [], |
|
"last": "Saha Roy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohamed", |
|
"middle": [], |
|
"last": "Yahya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerhard", |
|
"middle": [], |
|
"last": "Weikum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "North American Association for Computational Linguistics (NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abdalghani Abujabal, Rishiraj Saha Roy, Mohamed Yahya, and Gerhard Weikum. 2019. ComQA: A community-sourced dataset for complex factoid question answering with para- phrase clusters. In North American Association for Computational Linguistics (NAACL).", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Learning to compose neural networks for question answering", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Andreas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcus", |
|
"middle": [], |
|
"last": "Rohrbach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Darrell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Human Language Technology and North American Association for Computational Linguistics (HLT/NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Andreas, Marcus Rohrbach, Trevor Darrell, and Dan Klein. 2016. Learning to compose neu- ral networks for question answering. In Human Language Technology and North American Association for Computational Linguistics (HLT/NAACL).", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Natural language interfaces to databases -an introduction", |
|
"authors": [ |
|
{ |
|
"first": "Ion", |
|
"middle": [], |
|
"last": "Androutsopoulos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graeme", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Ritchie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Thanisch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Journal of Natural Language Engineering", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "29--81", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ion Androutsopoulos, Graeme D. Ritchie, and Peter Thanisch. 1995. Natural language inter- faces to databases -an introduction. Journal of Natural Language Engineering, 1:29-81.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Vqa: Visual question answering", |
|
"authors": [ |
|
{ |
|
"first": "Lawrence", |
|
"middle": [], |
|
"last": "Zitnick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "International Conference on Computer Vision (ICCV)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2425--2433", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lawrence Zitnick, and Devi Parikh. 2015. Vqa: Visual question answering. In International Conference on Computer Vision (ICCV), pages 2425-2433.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Bridging the semantic gap with SQL query logs in natural language interfaces to databases", |
|
"authors": [ |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Baik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hosagrahar", |
|
"middle": [], |
|
"last": "Visvesvaraya Jagadish", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yunyao", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "IEEE 35th International Conference on Data Engineering (ICDE)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "374--385", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher Baik, Hosagrahar Visvesvaraya Jagadish, and Yunyao Li. 2019. Bridging the semantic gap with SQL query logs in natural language interfaces to databases. 2019 IEEE 35th International Conference on Data Engi- neering (ICDE), pages 374-385.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Abstract meaning representation for sembanking", |
|
"authors": [ |
|
{ |
|
"first": "Laura", |
|
"middle": [], |
|
"last": "Banarescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Bonial", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shu", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Madalina", |
|
"middle": [], |
|
"last": "Georgescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kira", |
|
"middle": [], |
|
"last": "Griffitt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ulf", |
|
"middle": [], |
|
"last": "Hermjakob", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "7th Linguistic Annotation Workshop and Interoperability with Discourse", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Laura Banarescu, Claire Bonial, Shu Cai, Madalina Georgescu, Kira Griffitt, Ulf Hermjakob, Kevin Knight, Philipp Koehn, Martha Palmer, and Nathan Schneider. 2013. Abstract meaning representation for sembank- ing. In 7th Linguistic Annotation Workshop and Interoperability with Discourse.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "SEQUEL: A structured English query language", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Donald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raymond", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Chamberlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Boyce", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1974, |
|
"venue": "Proceedings of the 1974 ACM SIGFIDET (now SIGMOD) Workshop on Data Description, Access and Control", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "249--264", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Donald D. Chamberlin and Raymond F. Boyce. 1974. SEQUEL: A structured English query language. In Proceedings of the 1974 ACM SIGFIDET (now SIGMOD) Workshop on Data Description, Access and Control, pages 249-264. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Reading Wikipedia to answer open-domain questions", |
|
"authors": [ |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Fisch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Danqi Chen, Adam Fisch, Jason Weston, and Antoine Bordes. 2017. Reading Wikipedia to answer open-domain questions. In Association for Computational Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Understanding dataset design choices for multi-hop reasoning", |
|
"authors": [ |
|
{ |
|
"first": "Jifan", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Durrett", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jifan Chen and Greg Durrett. 2019. Understanding dataset design choices for multi-hop reasoning.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Association for Computational Linguistics (ACL)", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "In Association for Computational Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Building a neural semantic parser from a domain ontology", |
|
"authors": [ |
|
{ |
|
"first": "Jianpeng", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siva", |
|
"middle": [], |
|
"last": "Reddy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ArXiv", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jianpeng Cheng, Siva Reddy, and Mirella Lapata. 2018. Building a neural semantic parser from a domain ontology. ArXiv, abs/1812.10037. Version 1.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Scalable semantic parsing with partial ontologies", |
|
"authors": [ |
|
{ |
|
"first": "Eunsol", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Kwiatkowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eunsol Choi, Tom Kwiatkowski, and Luke Zettlemoyer. 2015. Scalable semantic parsing with partial ontologies. In Association for Computational Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Driving semantic parsing from the world's response", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Clarke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Goldwasser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [ |
|
"Roth" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Computational Natural Language Learning (CoNLL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "18--27", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James Clarke, Dan Goldwasser, Ming-Wei Chang, and Dan Roth. 2010. Driving semantic parsing from the world's response. In Compu- tational Natural Language Learning (CoNLL), pages 18-27.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "A relational model of data for large shared data banks", |
|
"authors": [ |
|
{ |
|
"first": "Edgar", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Codd", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1970, |
|
"venue": "Communications of the ACM", |
|
"volume": "13", |
|
"issue": "6", |
|
"pages": "377--387", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Edgar F. Codd. 1970. A relational model of data for large shared data banks. Communications of the ACM, 13(6):377-387.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Expanding the scope of the ATIS task: The ATIS-3 corpus", |
|
"authors": [ |
|
{ |
|
"first": "Deborah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Dahl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Madeleine", |
|
"middle": [], |
|
"last": "Bates", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Brown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Fisher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kate", |
|
"middle": [], |
|
"last": "Hunicke-Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Pallett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christine", |
|
"middle": [], |
|
"last": "Pao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "Rudnicky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Shriberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "Workshop on Human Language Technology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "43--48", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deborah A. Dahl, Madeleine Bates, Michael Brown, William M. Fisher, Kate Hunicke- Smith, David S. Pallett, Christine Pao, Alexander I. Rudnicky, and Elizabeth Shriberg. 1994. Expanding the scope of the ATIS task: The ATIS-3 corpus. In Workshop on Human Language Technology, pages 43-48.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "North American Association for Computational Linguistics (NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understanding. In North American Association for Computational Linguistics (NAACL).", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "DROP: A reading comprehension benchmark requiring discrete reasoning over paragraphs", |
|
"authors": [ |
|
{ |
|
"first": "Dheeru", |
|
"middle": [], |
|
"last": "Dua", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yizhong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pradeep", |
|
"middle": [], |
|
"last": "Dasigi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabriel", |
|
"middle": [], |
|
"last": "Stanovsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Human Language Technology and North American Association for Computational Linguistics (HLT/NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dheeru Dua, Yizhong Wang, Pradeep Dasigi, Gabriel Stanovsky, Sameer Singh, and Matt Gardner. 2019. DROP: A reading comprehen- sion benchmark requiring discrete reasoning over paragraphs. In Human Language Tech- nology and North American Association for Computational Linguistics (HLT/NAACL).", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Largescale QA-SRL parsing", |
|
"authors": [ |
|
{ |
|
"first": "Nicholas", |
|
"middle": [], |
|
"last": "Fitzgerald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luheng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nicholas FitzGerald, Julian Michael, Luheng He, and Luke S. Zettlemoyer. 2018. Large- scale QA-SRL parsing. In Association for Computational Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "AllenNLP: A deep semantic natural language processing platform", |
|
"authors": [ |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joel", |
|
"middle": [], |
|
"last": "Grus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oyvind", |
|
"middle": [], |
|
"last": "Tafjord", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pradeep", |
|
"middle": [], |
|
"last": "Dasigi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nelson", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Schmitz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matt Gardner, Joel Grus, Mark Neumann, Oyvind Tafjord, Pradeep Dasigi, Nelson F. Liu, Matthew Peters, Michael Schmitz, and Luke S. Zettlemoyer. 2017. In AllenNLP: A deep semantic natural language processing platform, arXiv, abs/1803.07640v2.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Incorporating copying mechanism in sequence-to-sequence learning", |
|
"authors": [ |
|
{ |
|
"first": "Jiatao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhengdong", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Victor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiatao Gu, Zhengdong Lu, Hang Li, and Victor O. K. Li. 2016. Incorporating copying mechanism in sequence-to-sequence learning. In Association for Computational Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Towards complex text-to-SQL in cross-domain database with intermediate representation", |
|
"authors": [ |
|
{ |
|
"first": "Jiaqi", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zecheng", |
|
"middle": [], |
|
"last": "Zhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yan", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yan", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian-Guang", |
|
"middle": [], |
|
"last": "Lou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongmei", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiaqi Guo, Zecheng Zhan, Yan Gao, Yan Xiao, Jian-Guang Lou, Ting Liu, and Dongmei Zhang. 2019. Towards complex text-to-SQL in cross-domain database with intermediate rep- resentation. In Association for Computational Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Neural compositional denotational semantics for question answering", |
|
"authors": [ |
|
{ |
|
"first": "Nitish", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nitish Gupta and Mike Lewis. 2018. Neural com- positional denotational semantics for question answering. In Empirical Methods in Natural Language Processing (EMNLP).", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "A formal basis for the heuristic determination of minimum cost paths", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Hart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nils", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Nilsson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bertram", |
|
"middle": [], |
|
"last": "Raphael", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1968, |
|
"venue": "IEEE Transactions on Systems Science and Cybernetics", |
|
"volume": "4", |
|
"issue": "2", |
|
"pages": "100--107", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter E. Hart, Nils J. Nilsson, and Bertram Raphael. 1968. A formal basis for the heuristic determination of minimum cost paths. IEEE Transactions on Systems Science and Cyber- netics, 4(2):100-107.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Human-in-the-loop parsing", |
|
"authors": [ |
|
{ |
|
"first": "Luheng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luheng He, Julian Michael, Mike Lewis, and Luke Zettlemoyer. 2016. Human-in-the-loop parsing. In Empirical Methods in Natural Language Processing (EMNLP).", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Learning to reason: End-to-end module networks for visual question answering", |
|
"authors": [ |
|
{ |
|
"first": "Ronghang", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Andreas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcus", |
|
"middle": [], |
|
"last": "Rohrbach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Darrell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kate", |
|
"middle": [], |
|
"last": "Saenko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "International Conference on Computer Vision (ICCV)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ronghang Hu, Jacob Andreas, Marcus Rohrbach, Trevor Darrell, and Kate Saenko. 2017. Learn- ing to reason: End-to-end module networks for visual question answering. In International Conference on Computer Vision (ICCV).", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "GQA: A new dataset for real-world visual reasoning and compositional question answering", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Drew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hudson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Drew A. Hudson and Christopher D. Manning. 2019. GQA: A new dataset for real-world visual reasoning and compositional question an- swering. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR).", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Learning a neural semantic parser from user feedback", |
|
"authors": [ |
|
{ |
|
"first": "Srini", |
|
"middle": [], |
|
"last": "Iyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ioannis", |
|
"middle": [], |
|
"last": "Konstas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alvin", |
|
"middle": [], |
|
"last": "Cheung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jayant", |
|
"middle": [], |
|
"last": "Krishnamurthy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Srini Iyer, Ioannis Konstas, Alvin Cheung, Jayant Krishnamurthy, and Luke Zettlemoyer. 2017. Learning a neural semantic parser from user feedback. In Association for Computational Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Search-based neural structured learning for sequential question answering", |
|
"authors": [ |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yih", |
|
"middle": [], |
|
"last": "Wen-Tau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1821--1831", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohit Iyyer, Wen-tau Yih, and Ming-Wei Chang. 2017. Search-based neural structured learning for sequential question answering. In Pro- ceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1821-1831.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Avoiding reasoning shortcuts: Adversarial evaluation, training, and model development for multi-hop QA", |
|
"authors": [ |
|
{ |
|
"first": "Yichen", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yichen Jiang and Mohit Bansal. 2019. Avoiding reasoning shortcuts: Adversarial evaluation, training, and model development for multi-hop QA. In Association for Computational Lin- guistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Clevr: A diagnostic dataset for compositional language and elementary visual reasoning", |
|
"authors": [ |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bharath", |
|
"middle": [], |
|
"last": "Hariharan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurens", |
|
"middle": [], |
|
"last": "Van Der Maaten", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Fei-Fei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"Lawrence" |
|
], |
|
"last": "Zitnick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ross", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Girshick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Computer Vision and Pattern Recognition (CVPR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Justin Johnson, Bharath Hariharan, Laurens van der Maaten, Li Fei-Fei, C. Lawrence Zitnick, and Ross B. Girshick. 2017. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In Computer Vision and Pattern Recognition (CVPR).", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Scaling semantic parsers with on-the-fly ontology matching", |
|
"authors": [ |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Kwiatkowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eunsol", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Artzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom Kwiatkowski, Eunsol Choi, Yoav Artzi, and Luke Zettlemoyer. 2013. Scaling semantic parsers with on-the-fly ontology matching. In Empirical Methods in Natural Language Processing (EMNLP).", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Natural questions: A benchmark for question answering research", |
|
"authors": [ |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Kwiatkowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jennimaria", |
|
"middle": [], |
|
"last": "Palomaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olivia", |
|
"middle": [], |
|
"last": "Redfield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Alberti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danielle", |
|
"middle": [], |
|
"last": "Epstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "453--466", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Kenton Lee, et al. 2019. Natural questions: A benchmark for question answering research. Transactions of the Association for Computational Linguistics, 7:453-466.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "NaLIR: An interactive natural language interface for querying relational databases", |
|
"authors": [ |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hosagrahar", |
|
"middle": [], |
|
"last": "Visvesvaraya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jagadish", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "International Conference on Management of Data, SIGMOD", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fei Li and Hosagrahar Visvesvaraya Jagadish. 2014. NaLIR: An interactive natural language interface for querying relational databases. In International Conference on Management of Data, SIGMOD.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Schema-free SQL", |
|
"authors": [ |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianyin", |
|
"middle": [], |
|
"last": "Pan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hosagrahar Visvesvaraya", |
|
"middle": [], |
|
"last": "Jagadish", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "International Conference on Management of Data, SIGMOD", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1051--1062", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fei Li, Tianyin Pan, and Hosagrahar Visvesvaraya Jagadish. 2014. Schema-free SQL. In Interna- tional Conference on Management of Data, SIGMOD, pages 1051-1062.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Learning dependency-based compositional semantics", |
|
"authors": [ |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "Jordan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Computational Linguistics", |
|
"volume": "39", |
|
"issue": "", |
|
"pages": "389--446", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Percy Liang, Michael I. Jordan, and Dan Klein. 2013. Learning dependency-based composi- tional semantics. Computational Linguistics, 39:389-446.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Crowdsourcing question-answer meaning representations", |
|
"authors": [ |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabriel", |
|
"middle": [], |
|
"last": "Stanovsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luheng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "North American Association for Computational Linguistics (NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julian Michael, Gabriel Stanovsky, Luheng He, Ido Dagan, and Luke Zettlemoyer. 2018. Crowdsourcing question-answer meaning rep- resentations. In North American Association for Computational Linguistics (NAACL).", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Compositional questions do not necessitate multi-hop reasoning", |
|
"authors": [ |
|
{ |
|
"first": "Sewon", |
|
"middle": [], |
|
"last": "Min", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Wallace", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sewon Min, Eric Wallace, Sameer Singh, Matt Gardner, Hannaneh Hajishirzi, and Luke Zettlemoyer. 2019a. Compositional questions do not necessitate multi-hop reasoning. In Asso- ciation for Computational Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Multihop reading comprehension through question decomposition and rescoring", |
|
"authors": [ |
|
{ |
|
"first": "Sewon", |
|
"middle": [], |
|
"last": "Min", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Zhong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sewon Min, Victor Zhong, Luke Zettlemoyer, and Hannaneh Hajishirzi. 2019b. Multi- hop reading comprehension through question decomposition and rescoring. In Association for Computational Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Compositional semantic parsing on semi-structured tables", |
|
"authors": [ |
|
{ |
|
"first": "Panupong", |
|
"middle": [], |
|
"last": "Pasupat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Panupong Pasupat and Percy Liang. 2015. Com- positional semantic parsing on semi-structured tables. In Association for Computational Lin- guistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "The principle of semantic compositionality", |
|
"authors": [ |
|
{ |
|
"first": "Francis Jeffry", |
|
"middle": [], |
|
"last": "Pelletier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "", |
|
"volume": "13", |
|
"issue": "", |
|
"pages": "11--24", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Francis Jeffry Pelletier. 1994. The principle of semantic compositionality. Topoi, 13(1):11-24.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Evaluation of spoken language systems: The ATIS domain", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Price", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "Proceedings of the Third DARPA Speech and Natural Language Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "91--95", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P. J. Price. 1990. Evaluation of spoken language systems: The ATIS domain. In Proceedings of the Third DARPA Speech and Natural Lan- guage Workshop, pages 91-95.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Answering complex open-domain questions through iterative query generation", |
|
"authors": [ |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Qi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaowen", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leo", |
|
"middle": [], |
|
"last": "Mehr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zijian", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2590--2602", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peng Qi, Xiaowen Lin, Leo Mehr, Zijian Wang, and Christopher D. Manning. 2019. Answering complex open-domain questions through iter- ative query generation. In Empirical Methods in Natural Language Processing (EMNLP), pages 2590-2602. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "SQuAD: 100,000+ questions for machine comprehension of text", |
|
"authors": [ |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Rajpurkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Konstantin", |
|
"middle": [], |
|
"last": "Lopyrev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. SQuAD: 100,000+ questions for machine comprehen- sion of text. In Empirical Methods in Natural Language Processing (EMNLP).", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Transforming dependency structures to logical forms for semantic parsing", |
|
"authors": [ |
|
{ |
|
"first": "Siva", |
|
"middle": [], |
|
"last": "Reddy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oscar", |
|
"middle": [], |
|
"last": "T\u00e4ckstr\u00f6m", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Kwiatkowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dipanjan", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Steedman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Siva Reddy, Oscar T\u00e4ckstr\u00f6m, Michael Collins, Tom Kwiatkowski, Dipanjan Das, Mark Steedman, and Mirella Lapata. 2016. Trans- forming dependency structures to logical forms for semantic parsing. In Association for Com- putational Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "A corpus for reasoning about natural language grounded in photographs", |
|
"authors": [ |
|
{ |
|
"first": "Alane", |
|
"middle": [], |
|
"last": "Suhr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephanie", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iris", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huajun", |
|
"middle": [], |
|
"last": "Bai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Artzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alane Suhr, Stephanie Zhou, Iris Zhang, Huajun Bai, and Yoav Artzi. 2019. A corpus for reasoning about natural language grounded in photographs. Association for Computational Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "The web as knowledge-base for answering complex questions", |
|
"authors": [ |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Talmor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Berant", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "North American Association for Computational Linguistics (NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alon Talmor and Jonathan Berant. 2018. The web as knowledge-base for answering complex questions. In North American Association for Computational Linguistics (NAACL).", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Constructing datasets for multihop reading comprehension across documents", |
|
"authors": [ |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "Welbl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pontus", |
|
"middle": [], |
|
"last": "Stenetorp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Riedel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "287--302", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Johannes Welbl, Pontus Stenetorp, and Sebastian Riedel. 2018. Constructing datasets for multi- hop reading comprehension across documents. Transactions of the Association for Computa- tional Linguistics, 6:287-302.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "Optimizing statistical machine translation for text simplification", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Courtney", |
|
"middle": [], |
|
"last": "Napoles", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ellie", |
|
"middle": [], |
|
"last": "Pavlick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quanze", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "401--415", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Xu, Courtney Napoles, Ellie Pavlick, Quanze Chen, and Chris Callison-Burch. 2016. Opti- mizing statistical machine translation for text simplification. Transactions of the Association for Computational Linguistics, 4:401-415.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "Hotpotqa: A dataset for diverse, explainable multi-hop question answering", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Qi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saizheng", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Peng Qi, Saizheng Zhang, Yoshua Bengio, William W. Cohen, Ruslan R. Salakhutdinov, and Christopher D. Manning. 2018. Hotpotqa: A dataset for diverse, explain- able multi-hop question answering. In Empir- ical Methods in Natural Language Processing (EMNLP).", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "The value of semantic parse labeling for knowledge base question answering", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Wen-Tau Yih", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Richardson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Meek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jina", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Suh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wen-tau Yih, Matthew Richardson, Christopher Meek, Ming-Wei Chang, and Jina Suh. 2016. The value of semantic parse labeling for knowl- edge base question answering. In Association for Computational Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF52": { |
|
"ref_id": "b52", |
|
"title": "Spider: A large-scale human-labeled dataset for complex and cross-domain semantic parsing and textto-SQL task", |
|
"authors": [ |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michihiro", |
|
"middle": [], |
|
"last": "Yasunaga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongxu", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zifan", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Irene", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qingning", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shanelle", |
|
"middle": [], |
|
"last": "Roman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zilin", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dragomir", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Radev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tao Yu, Rui Zhang, Kai Yang, Michihiro Yasunaga, Dongxu Wang, Zifan Li, James Ma, Irene Li, Qingning Yao, Shanelle Roman, Zilin Zhang, and Dragomir R. Radev. 2018. Spider: A large-scale human-labeled dataset for complex and cross-domain semantic parsing and text- to-SQL task. In Empirical Methods in Natural Language Processing (EMNLP).", |
|
"links": null |
|
}, |
|
"BIBREF53": { |
|
"ref_id": "b53", |
|
"title": "Learning to parse database queries using inductive logic programming", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "John", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raymond", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Zelle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mooney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "Association for the Advancement of Artificial Intelligence (AAAI)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1050--1055", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John M. Zelle and Raymond J. Mooney. 1996. Learning to parse database queries using induc- tive logic programming. In Association for the Advancement of Artificial Intelligence (AAAI), pages 1050-1055.", |
|
"links": null |
|
}, |
|
"BIBREF54": { |
|
"ref_id": "b54", |
|
"title": "Learning to map sentences to logical form: Structured classification with probabilistic categorial grammars", |
|
"authors": [ |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Uncertainty in Artificial Intelligence (UAI)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "658--666", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luke Zettlemoyer and Michael Collins. 2005. Learning to map sentences to logical form: Structured classification with probabilistic cate- gorial grammars. In Uncertainty in Artificial Intelligence (UAI), pages 658-666.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "Questions over different sources share a similar compositional structure. Natural language questions from multiple sources (top) are annotated with the QDMR formalism (middle) and deterministically mapped into a pseudo-formal language (bottom)." |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "QDMR of the question ''Return the keywords which have been contained by more than 100 ACL papers.'', represented as a decomposition graph." |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "Example of a high-level QDMR." |
|
}, |
|
"FIGREF3": { |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "User interface for decomposing a complex question that uses a closed lexicon of tokens." |
|
}, |
|
"FIGREF4": { |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "Examples and justifications of expert judgment on collected QDMRs in BREAK." |
|
}, |
|
"FIGREF5": { |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "Examples of PROJECT and COMPARISON questions in HOTPOTQA (high-level QDMR)." |
|
}, |
|
"FIGREF6": { |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "Differences in granularity, step order, and wording between two decompositions." |
|
}, |
|
"FIGREF7": { |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "Graph edit operations between the graphs of the two QDMRs inFigure 7." |
|
}, |
|
"FIGREF8": { |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "'s Devils had a cameo from the 'MASH' star who played what role on the show?''" |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"text": "The 13 operator types of QDMR steps. Listed are, the natural language template used to express the operator, the operator signature, and an example question that uses the query operator in its decomposition.", |
|
"content": "<table><tr><td>and images, while allowing in principle to query</td></tr><tr><td>multiple modalities for the same question. 1</td></tr></table>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF2": { |
|
"num": null, |
|
"text": "Functions used for grounding natural language phrases in numerical operators or KB entities.", |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"text": "Return me the total citations of all the papers in the VLDB conference.", |
|
"content": "<table><tr><td>Dataset</td><td>Example</td><td>Original</td><td>BREAK</td></tr><tr><td>ACADEMIC</td><td/><td>195</td><td>195</td></tr><tr><td>(DB)</td><td/><td/><td/></tr><tr><td>ATIS</td><td>What is the first flight from Atlanta</td><td>5,283</td><td>4,906</td></tr><tr><td>(DB)</td><td>to Baltimore that serves lunch?</td><td/><td/></tr><tr><td>GEOQUERY</td><td>How high is the highest point in the</td><td>880</td><td>877</td></tr><tr><td>(DB)</td><td>largest state?</td><td/><td/></tr><tr><td>SPIDER</td><td>How many transactions correspond</td><td>10,181</td><td>7,982</td></tr><tr><td>(DB)</td><td>to each invoice number?</td><td/><td/></tr><tr><td>CLEVR-</td><td>What is the number of cylinders</td><td>32,164</td><td>13,935</td></tr><tr><td>HUMANS</td><td>divided by the number of cubes?</td><td/><td/></tr><tr><td>(Images)</td><td/><td/><td/></tr><tr><td>NLVR2</td><td>If there are only two dogs pulling</td><td>29,680</td><td>13,517</td></tr><tr><td>(IMAGES)</td><td>one of the sleds?</td><td/><td/></tr><tr><td>COMQA</td><td>What was Gandhi's occupation</td><td>11,214</td><td>5,520</td></tr><tr><td>(TEXT)</td><td>before becoming a freedom fighter?</td><td/><td/></tr><tr><td>CWQ</td><td>Robert E Jordan is part of the</td><td>34,689</td><td>2,988,</td></tr><tr><td>(TEXT)</td><td>organization started by whom?</td><td/><td>2,991 high</td></tr><tr><td>DROP</td><td>Approximately how many years did</td><td>96,567</td><td>10,230,</td></tr><tr><td>(TEXT)</td><td>the churches built in 1909 survive?</td><td/><td>10,262 high</td></tr><tr><td>HOTPOTQA-</td><td>Benjamin Halfpenny was a foot-</td><td>23,066</td><td>10,575 high</td></tr><tr><td>HARD</td><td>baller for a club that plays its home</td><td/><td/></tr><tr><td>(TEXT)</td><td>matches where?</td><td/><td/></tr><tr><td>Total:</td><td/><td/><td>83,978</td></tr></table>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF4": { |
|
"num": null, |
|
"text": "The QA datasets in BREAK. Lists the number of examples in the original dataset and in BREAK. Numbers of high-level QDMRs are denoted by high.", |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF6": { |
|
"num": null, |
|
"text": "Operator prevalence in BREAK. Lists the percentage of QDMRs where the operator appears.", |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF7": { |
|
"num": null, |
|
"text": "details the distribution of QDMR sequence length. Most decompositions in QDMR", |
|
"content": "<table><tr><td>Steps</td><td>QDMR</td><td>QDMR high</td></tr><tr><td>1-2</td><td>10.7%</td><td>59.8%</td></tr><tr><td>3-4</td><td>44.9%</td><td>31.6%</td></tr><tr><td>5-6</td><td>27.0%</td><td>7.9%</td></tr><tr><td>7-8</td><td>10.1%</td><td>0.6%</td></tr><tr><td>9+</td><td>7.4%</td><td>0.2%</td></tr></table>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF8": { |
|
"num": null, |
|
"text": "The distribution over QDMR sequence length.", |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF11": { |
|
"num": null, |
|
"text": "Results on PROJECT and COMPARI-SON questions from HOTPOTQA development set.", |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF12": { |
|
"num": null, |
|
"text": "The decomposition rules of RULEBASED. Rules are based on dependency labels, part-ofspeech tags and coreference edges. Text fragments used for decomposition are in boldface.", |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF14": { |
|
"num": null, |
|
"text": "Performance of QDMR parsing models on the development and test set. GED+ is computed only for the subset of QDMR graphs with up to 5 nodes, covering 66.1% of QDMRs and 97.6% of high-level data.", |
|
"content": "<table><tr><td>Question</td><td>Gold</td><td>Prediction (COPYNET)</td><td>Analysis</td></tr><tr><td>''How many objects other than cylinders are</td><td/><td/><td/></tr><tr><td>there?''</td><td/><td/><td/></tr></table>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF15": { |
|
"num": null, |
|
"text": "Manual error analysis of the COPYNET model predictions. Lower examples are of high-level QDMRs.", |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |