|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:55:59.039851Z" |
|
}, |
|
"title": "AMR-To-Text Generation with Graph Transformer", |
|
"authors": [ |
|
{ |
|
"first": "Tianming", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "The MOE Key Laboratory of Computational Linguistics", |
|
"institution": "Peking University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Xiaojun", |
|
"middle": [], |
|
"last": "Wan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "The MOE Key Laboratory of Computational Linguistics", |
|
"institution": "Peking University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Hanqi", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "The MOE Key Laboratory of Computational Linguistics", |
|
"institution": "Peking University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "meaning representation (AMR)-totext generation is the challenging task of generating natural language texts from AMR graphs, where nodes represent concepts and edges denote relations. The current state-of-the-art methods use graph-to-sequence models; however, they still cannot significantly outperform the previous sequence-to-sequence models or statistical approaches. In this paper, we propose a novel graph-to-sequence model (Graph Transformer) to address this task. The model directly encodes the AMR graphs and learns the node representations. A pairwise interaction function is used for computing the semantic relations between the concepts. Moreover, attention mechanisms are used for aggregating the information from the incoming and outgoing neighbors, which help the model to capture the semantic information effectively. Our model outperforms the state-of-the-art neural approach by 1.5 BLEU points on LDC2015E86 and 4.8 BLEU points on LDC2017T10 and achieves new state-of-the-art performances.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "meaning representation (AMR)-totext generation is the challenging task of generating natural language texts from AMR graphs, where nodes represent concepts and edges denote relations. The current state-of-the-art methods use graph-to-sequence models; however, they still cannot significantly outperform the previous sequence-to-sequence models or statistical approaches. In this paper, we propose a novel graph-to-sequence model (Graph Transformer) to address this task. The model directly encodes the AMR graphs and learns the node representations. A pairwise interaction function is used for computing the semantic relations between the concepts. Moreover, attention mechanisms are used for aggregating the information from the incoming and outgoing neighbors, which help the model to capture the semantic information effectively. Our model outperforms the state-of-the-art neural approach by 1.5 BLEU points on LDC2015E86 and 4.8 BLEU points on LDC2017T10 and achieves new state-of-the-art performances.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Abstract meaning representation (AMR) is a semantic formalism that abstracts away from the syntactic realization of a sentence, and encodes its definition as a rooted, directed, and acyclic graph. In the graph, the nodes represent the concepts, and edges denote the relations between the concepts. The root of an AMR binds its contents to a single traversable graph and serves as a rudimentary representation of the overall focus. The existence of co-references and control structures results in nodes with multiple incoming edges, called reentrancies, and causes an AMR to possess a graph structure, instead of a tree structure. Numerous natural language processing (NLP) tasks can benefit from using AMR, such as machine translation (Jones et al., 2012; Song et al., 2019) , question answering (Mitra and Baral, 2016) , summarization (Liu et al., 2015; Takase et al., 2016) , and event extraction (Huang et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 735, |
|
"end": 755, |
|
"text": "(Jones et al., 2012;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 756, |
|
"end": 774, |
|
"text": "Song et al., 2019)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 796, |
|
"end": 819, |
|
"text": "(Mitra and Baral, 2016)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 836, |
|
"end": 854, |
|
"text": "(Liu et al., 2015;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 855, |
|
"end": 875, |
|
"text": "Takase et al., 2016)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 899, |
|
"end": 919, |
|
"text": "(Huang et al., 2016)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "AMR-to-text generation is the task of recovering a text representing the same definition as a given AMR graph. Because the function words and structures are abstracted away, the AMR graph can correspond to multiple realizations. Numerous important details are underspecified, including tense, number, and definiteness, which makes this task extremely challenging (Flanigan et al., 2016) . Figure 1 shows an example AMR graph and its corresponding sentence.", |
|
"cite_spans": [ |
|
{ |
|
"start": 363, |
|
"end": 386, |
|
"text": "(Flanigan et al., 2016)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 389, |
|
"end": 397, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Early works relied on grammar-based or statistical approaches (Flanigan et al., 2016; Pourdamghani et al., 2016; Lampouras and Vlachos, 2017; Gruzitis et al., 2017) . Such approaches generally require alignments between the graph nodes and surface tokens, which are automatically generated and can lead to error accumulation. In recent research, the graphs are first transformed into linear sequences, and then the text is generated from the inputs (Konstas et al., 2017) . Such a method may lose information from the graph structure. The current state-of-theart neural methods are graph-to-sequence models and hybrid variants (Beck et al., 2018; Song et al., 2018; Damonte and Cohen, 2019) . These methods use a graph state long short-term memory (LSTM) network, gated graph neural network (GGNN), or graph convolution network (GCN) to encode AMR graphs directly, and they can explicitly utilize the information provided by the graph structure. However, these graph encoders still cannot significantly outperform sequence encoders. The AMR-to-text generation task can be regarded as a distinct translation task, and basing it on the concepts of off-the-shelf methods Figure 1 : An example AMR graph and its corresponding sentence. The graph is rooted by ''expect-01'', which means the AMR is about the expecting. The node ''create'' is a reentrance and it plays two roles simultaneously (i.e., .", |
|
"cite_spans": [ |
|
{ |
|
"start": 62, |
|
"end": 85, |
|
"text": "(Flanigan et al., 2016;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 86, |
|
"end": 112, |
|
"text": "Pourdamghani et al., 2016;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 113, |
|
"end": 141, |
|
"text": "Lampouras and Vlachos, 2017;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 142, |
|
"end": 164, |
|
"text": "Gruzitis et al., 2017)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 449, |
|
"end": 471, |
|
"text": "(Konstas et al., 2017)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 627, |
|
"end": 646, |
|
"text": "(Beck et al., 2018;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 647, |
|
"end": 665, |
|
"text": "Song et al., 2018;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 666, |
|
"end": 690, |
|
"text": "Damonte and Cohen, 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 828, |
|
"end": 833, |
|
"text": "(GCN)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1168, |
|
"end": 1176, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "for neural machine translation can be helpful. The Transformer model (Vaswani et al., 2017) is a stacked attention architecture and has shown its effectiveness in translation tasks; however, applying it to AMR-to-text generation has a major problem: It can only deal with sequential inputs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 69, |
|
"end": 91, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To address these issues, we propose a novel graph network (Graph Transformer) for AMRto-text generation. Graph Transformer is an adaptation of the Transformer model, and it has a stacked attention-based encoder-decoder architecture. The encoder considers the AMR graph as the input and learns the node representations from the node attributes by the aggregation of the neighborhood information. The global semantic information is captured by stacked graph attention layers, which allow a node to deal with the hidden states of the neighbor nodes and their corresponding relations. Multiple stacked graph attention layers enable the nodes to utilize the information of those nodes that are not directly adjacent, allowing the global information to propagate. We consider that the AMR graph is a directed graph in which the directions hold extremely important information. Therefore, for encoding the information from the incoming and outgoing edges, we use two individual graph attentions in each layer. Then we utilize a fusion layer to incorporate the information from the incoming and outgoing relations, followed by a feed-forward network. Residual connections are used for connecting adjacent layers. The final node representations are formed by concatenating the two individual representations encoded by multiple layers. The decoder is similar to the original decoder in Transformer, performing multi-head attentions and self-attentions over the representations of the nodes in the encoder and over the hidden states of the decoder, respectively. For the decoder stack, we adopt a copy mechanism to generate the texts, which can help copy low-frequency tokens, such as named entities and numbers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We perform experiments on two benchmark datasets (LDC2015E86 and LDC2017T10). Our model significantly outperforms the prior methods and achieves a new state-of-the-art performance. Without external data, our model improves the BLEU scores of the state-of-the-art and a mostly recently proposed neural model (i.e., g-GCNSEQ [Damonte and Cohen, 2019] ) by 1.5 points on LDC2015E86 and 4.8 points on LDC2017T10. When using the Gigaword corpus as the additional training data, which is automatically labeled by a pre-trained AMR parser, our model achieves a BLEU score of 36.4 on LDC2015E86, which is the highest result on the dataset. The experimental result also shows that the improved structural representation encoding by our proposed graph encoder is most useful when the amount of training data is small. The variations in our model are evaluated to verify its robustness as well as the importance of the proposed modules. In addition, we study the performances of our model and baselines under different structures of the input graphs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 323, |
|
"end": 348, |
|
"text": "[Damonte and Cohen, 2019]", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our contributions can be summarized as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 For AMR-to-text generation, we propose Graph Transformer, a novel graph-tosequence model based on the attention mechanism. Our model uses a pairwise interaction function to compute the semantic relations and uses separate graph attentions on the incoming and outgoing neighbors, which help in enhanced capturing of the semantic information provided in the graph. The code is available at https://github. com/sodawater/GraphTransformer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 The experimental results show that our model achieves a new state-of-the-art performance on benchmark datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "2 Related Work", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Early work on AMR-to-text generation focused on statistical methods. Flanigan et al. (2016) transformed AMR graphs to appropriate spanning trees and applied tree-to-string transducers to generate texts. Song et al. (2016) partitioned an AMR graph into small fragments and generated the translations for all the fragments, whose order was finally decided by solving an asymmetric generalized traveling salesman problem. Song et al. (2017) used synchronous node replacement grammar to parse AMR graphs and generate output sentences. Pourdamghani et al. (2016) adopted a phrase-based machine translation model on the input of a linearized graph. Recent works propose using neural networks for generation. Konstas et al. (2017) used a sequence-to-sequence model to generate texts, leveraging an LSTM for encoding a linearized AMR structure. Graph-to-sequence models outperform sequence-to-sequence models, including a graph state LSTM (Song et al., 2018) and GGNN (Beck et al., 2018) . A most recently developed hybrid neural model achieved the stateof-the-art performance by applying a BiLSTM on the output of a graph encoder GCN, to utilize both structural and sequential information (Damonte and Cohen, 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 69, |
|
"end": 91, |
|
"text": "Flanigan et al. (2016)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 203, |
|
"end": 221, |
|
"text": "Song et al. (2016)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 419, |
|
"end": 437, |
|
"text": "Song et al. (2017)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 531, |
|
"end": 557, |
|
"text": "Pourdamghani et al. (2016)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 702, |
|
"end": 723, |
|
"text": "Konstas et al. (2017)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 931, |
|
"end": 950, |
|
"text": "(Song et al., 2018)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 960, |
|
"end": 979, |
|
"text": "(Beck et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 1182, |
|
"end": 1207, |
|
"text": "(Damonte and Cohen, 2019)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "AMR-to-Text Generation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Neural network methods for processing the data represented in graph domains have been studied for several years. Graph neural networks (GNNs) have also been proposed, which are an extension of recursive neural networks and can be applied to most of the practically useful types of graphs (Gori et al., 2005; Scarselli et al., 2009) . GCNs are the main alternatives for neural-based graph representations, and are widely used to address various problems (Bruna et al., 2014; Duvenaud et al., 2015; Kipf and Welling, 2017) . Li et al. (2015) further extended a GNN and modified it to use gated recurrent units for processing the data represented in graphs; this method is known as a GGNN. Beck et al. (2018) followed their concept and applied a GGNN to string generation. Another neural architecture based on gated units is the graph state LSTM (Song et al., 2018) , which uses an LSTM structure for encoding graph-level semantics. Our model is most similar to graph attention networks (GATs) (Velickovic et al., 2018) ; it incorporates the attention mechanism in the information aggregation. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 288, |
|
"end": 307, |
|
"text": "(Gori et al., 2005;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 308, |
|
"end": 331, |
|
"text": "Scarselli et al., 2009)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 453, |
|
"end": 473, |
|
"text": "(Bruna et al., 2014;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 474, |
|
"end": 496, |
|
"text": "Duvenaud et al., 2015;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 497, |
|
"end": 520, |
|
"text": "Kipf and Welling, 2017)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 523, |
|
"end": 539, |
|
"text": "Li et al. (2015)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 687, |
|
"end": 705, |
|
"text": "Beck et al. (2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 843, |
|
"end": 862, |
|
"text": "(Song et al., 2018)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 991, |
|
"end": 1016, |
|
"text": "(Velickovic et al., 2018)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Networks for Graphs", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The overall architecture of Graph Transformer is shown in Figure 2 , with an example AMR graph and its corresponding sentence. We begin by providing the formal definition of the AMRto-text generation and the notations we use, and then reviewing the Transformer model. Then we introduce the graph encoder and sentence decoder used in our model. Finally, we describe the training and decoding procedures.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 58, |
|
"end": 66, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Graph Transformer", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Given an AMR graph, G, our goal is to generate a natural language sentence that represents the same definition as G. Our model is trained to maximize the probability, P (S|G), where S is the gold sentence. In the following, we define the notations used in this study. We assume a directed graph,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Notations", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "G = (V, E), where V is a set of N nodes, E", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Notations", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "is a set of M edges, and N and M are the numbers of nodes and edges, respectively. Each edge in E can be represented as (i, j, l) , where i and j are the indices of the source and target nodes, respectively, and l is the edge label. We further denote the incoming neighborhoods (i.e., reached by an incoming edge) of Figure 2 : Left: Graph attention mechanism. We take the node ''accelerate'' in Figure 1 as an example. Head representation is marked with yellow and tail representation is marked with blue. The node ''accelerate'' has one incoming relation and two outgoing relations to be attend respectively; Right: The overall architecture of our proposed Graph Transformer.", |
|
"cite_spans": [ |
|
{ |
|
"start": 120, |
|
"end": 129, |
|
"text": "(i, j, l)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 317, |
|
"end": 325, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 396, |
|
"end": 404, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Notations", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "node v i \u2208 V", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Notations", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "as N in i and outgoing neighborhoods (i.e., reached by an outgoing edge) as N out i . The corresponding sentence is S = {s 1 , s 2 , ..., s T }, where s i is the i-th token of the sentence and T is the number of the tokens.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Notations", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Our model is adapted from the Transformer model, and here, we briefly review this model. The original Transformer network uses an encoderdecoder architecture, with each layer consisting of a multi-head attention mechanism and a feedforward network. Both the components are described here.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transformer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The multi-head attention mechanism builds on scaled dot-product attention, which operates on a package of queries Q and keys K of dimension", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transformer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "d k and values V of dimension d v , Attention(Q, K, V ) = softmax( QK \u221a d k )V (1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transformer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The multi-head attention linearly projects d model -dimensional queries, keys, and values d h times with different projections, and it performs scaled dot-product attention on each projected pair. The outputs of the attention are concatenated and again projected, resulting in the final output,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transformer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "head x = Attention(QW x q , KW x k , V W x v ) MultiHead(Q, K, V ) = d h x=1 head x W o", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Transformer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where denotes the concatenation of", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transformer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "the d h attention heads. Projection matrices W x q \u2208 R d k \u00d7d model , W x k \u2208 R d k \u00d7d model , W x v \u2208 R d v \u00d7d model , and W o \u2208 R d h * d v \u00d7d model . d k = d v = d model /d h .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transformer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The other component of each layer is a feed-forward network. It consists of two linear transformations, with a ReLU activation in between.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transformer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "= max(0, xW 1 + b 1 )W 2 + b 2 (3)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "FFN(x)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For constructing a deep network and regularization, a residual connection and layer normalization are used to connect adjacent layers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "FFN(x)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Our model also has an encoder-decoder architecture. In our model, the graph encoder is composed of a stack of L 1 identical graph layers that use different parameters from layer to layer. Each layer has three sub-layers: a graph attention mechanism, fusion layer, and feed-forward network. The encoder takes the nodes as the input and learns the node representations by aggregating the neighborhood information. Considering that an AMR graph is a directed graph, our model learns two distinct representations for each node. The first is a head representation, which represents a node when it works as a head node (i.e., a source node) in a semantic relation and only aggregates the information from the outgoing edges and corresponding nodes. The second is a tail representation, which represents a node when it works as a tail node (i.e., a target node) and only aggregates the information from the outgoing edges and corresponding nodes. Specifically, we denote \u2212 \u2192 h t i and \u2190 \u2212 h t i as the head representation and tail representation of each node v i at the t-th layer, respectively. The embedding of each node (i.e., the word embedding of the concept) is fed to the graph encoder as the initial hidden state of the node,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph Encoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u2212 \u2192 h 0 i = \u2190 \u2212 h 0 i = e i W e + b e", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Graph Encoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "where e i is the embedding of Different from previous methods, we propose using graph attention as the aggregator, instead of a gated unit or pooling layer. In an AMR graph, the semantic representation of a node is determined by its own concept definition and relations to other concepts. Graph attention is used for capturing such global semantic information in a graph. Specifically, it allows each node to deal with the triples that are composed of the embeddings of the neighbor nodes, embeddings of the corresponding edges, and its own embedding. We represent the triple of two adjacent nodes connected by edge", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph Encoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "node v i , W e \u2208 R d", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph Encoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "(i, j, l) as r t ij = \u2212 \u2192 h t\u22121 i e l \u2190 \u2212 h t\u22121 j W r + b r (5) r t ij = \u2212 \u2192 h t\u22121 i e l \u2190 \u2212 h t\u22121 j W r + b r ,", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Graph Encoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph Encoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "e l \u2208 R d model is the embedding of edge label l and \u2212 \u2192 h t\u22121 i e l \u2190 \u2212 h t\u22121 j", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph Encoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "is the concatenation of these three representations. W r \u2208 R 3d model \u00d7d model and b r \u2208 R d model are the parameters. r t ij is the representations of the triple, which will be deal with both source", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph Encoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "node v i and target node v j .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph Encoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Using such a pairwise-interaction function to compute a relation has three advantages: 1) it does not encounter the parameter explosion problem (Beck et al., 2018) because the linear transformation for the triple is independent of the edge label, 2) the edge information is encoded by edge embedding so that there is no loss of information, and 3) the representation incorporates the context information of the nodes. Then we perform graph attentions over the incoming and outgoing relations (i.e., incoming and outgoing edges and the corresponding nodes). The multihead graph attentions for", |
|
"cite_spans": [ |
|
{ |
|
"start": 144, |
|
"end": 163, |
|
"text": "(Beck et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph Encoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "node v i are computed as \u2212 \u2192 g t i = d h x=1 \u239b \u239d j\u2208N out i \u03b1 x ij r t ij W x v \u239e \u23a0 W o \u03b1 x ij = exp \u2212 \u2192 h t\u22121 i W x q \u2022(r t ij W x k ) \u221a d k z\u2208N out i exp \u2212 \u2192 h t\u22121 i W x q \u2022(r t iz W x k ) \u221a d k (7)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph Encoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "where \u2212 \u2192 g t i is the output of the graph attention on the outgoing relations for", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph Encoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "node v i . Similarly, \u2190 \u2212 g t i", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph Encoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "is computed over all the incoming relations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph Encoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Following the graph attention sub-layer, we use a fusion layer to incorporate the information aggregated from the incoming and outgoing relations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph Encoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "s t i = sigmoid \u2212 \u2192 g t i \u2190 \u2212 g t i W s + b s g t i = s t i * \u2212 \u2192 g t i + (1 \u2212 s t i ) * \u2190 \u2212 g t i (8)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph Encoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph Encoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "W s \u2208 R 2 * d model \u00d71 and b s \u2208 R 1 are the parameters.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph Encoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The last sub-layer is a fully connected feedforward network, which is applied to each node separately and identically. We use a GeLU activation function instead of the standard ReLU activation. The dimensions of the input, inner layer, and output are d model , 4 * d model , and 2 * d model , respectively. The output is divided into two parts to obtain the head and tail representations, respectively. In addition, a residual connection is used to connect adjacent layers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph Encoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "O t \u2190 \u2212 O t = F F N(G t ) \u2212 \u2192 H t = LayerNorm( \u2212 \u2192 O t + \u2212 \u2192 H t\u22121 ) \u2190 \u2212 H t = LayerNorm( \u2190 \u2212 O t + \u2190 \u2212 H t\u22121 )", |
|
"eq_num": "(9)" |
|
} |
|
], |
|
"section": "\u2212 \u2192", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where G t i is the package of outputs g t i .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2212 \u2192", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2212 \u2192 H t and \u2190 \u2212 H t are the packages of head representation \u2212 \u2192 h t i and tail representation \u2190 \u2212 h t i , respectively. LayerN orm is the layer normalization. Note that using a residual connection and layer normalization around each layer in the graph encoder is more effective than using them around each of the three sub-layers for our model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2212 \u2192", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The final node representation is obtained by concatenating the forward and backward representations. A linear transformation layer is also used for compressing the dimension. For convenience, we denote h i as the final representation of", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2212 \u2192", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "node v i , h i = \u2212 \u2192 h L 1 i \u2190 \u2212 h L 1 i W h (10)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2212 \u2192", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where W h \u2208 R 2d model \u00d7d emb is a parameter and L 1 is the number of layers of the encoder stack.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2212 \u2192", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In our model, the decoder has an architecture similar to that in the original Transformer model, which is composed of L 2 identical layers. Each layer has three sub-layers: a multi-head self-attention mechanism, multi-head attention mechanism over the output of the encoder stack, and position-wise feed-forward network. A residual connection is used for connecting adjacent sub-layers. The decoder generates the natural language sentence, and we denote the hidden state at position i of the t-th layer in the decoder stack as\u0125 t i . Different from the input representation of the encoder, the position information is added and the sum of the embedding and position encoding is fed as the input,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "h 0 i = e i W e + b e + pe i", |
|
"eq_num": "(11)" |
|
} |
|
], |
|
"section": "Sentence Decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "where e i and pe i \u2208 R d model are the embedding and positional encoding of the token at position i, respectively. The self-attention sub-layer is used for encoding the information of the decoded subsequences. We use masking to ensure that the attention and prediction for position i depend only on the known words at positions preceding i,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "A t = MultiHead(\u0124 t\u22121 ,\u0124 t\u22121 ,\u0124 t\u22121 ) B t = LayerNorm(A t +\u0124 t\u22121 )", |
|
"eq_num": "(12)" |
|
} |
|
], |
|
"section": "Sentence Decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "where\u0124 t\u22121 is the package of hidden states\u0125 t\u22121 i in the decoder. Next, the output of the self-attention is further fed into the multi-head attention and feed-forward network, expressed as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "A t = MultiHead(B t , H, H) B t = LayerNorm(\u00c2 t + B t ) O t = FFN(B t ) H t = LayerNorm(\u00d4 t +B t ) (13)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "where H is the package of final node representations h i encoded by the graph encoder.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "For convenience, we denote the final hidden state of the decoder at position i as\u0125 i . Considering that numerous low-frequency open-class tokens such as named entities and numbers in an AMR graph appear in the corresponding sentence, we adopt the copy mechanism (Gu et al., 2016) to solve the problem. A gate is used over the decoder stack for controlling the generation of words from the vocabulary or directly copying them from the graph, expressed as", |
|
"cite_spans": [ |
|
{ |
|
"start": 262, |
|
"end": 279, |
|
"text": "(Gu et al., 2016)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03b8 i = \u03c3(\u0125 i W \u03b8 + b \u03b8 )", |
|
"eq_num": "(14)" |
|
} |
|
], |
|
"section": "Sentence Decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "W \u03b8 \u2208 R d model \u00d71", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "and b \u03b8 \u2208 R 1 are the parameters. Probability distribution p g i of the words to be directly generated at time-step i is computed as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p g i = softmax(\u0125 i W g + b g )", |
|
"eq_num": "(15)" |
|
} |
|
], |
|
"section": "Sentence Decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "where ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "W g \u2208 R d model \u00d7d vocab and b g \u2208 R d", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "p c i = N i * =1 exp \u0125 i \u2022 h i * N j * =1 exp \u0125 i \u2022 h j * z i * (16)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "where z i * is the one-hot vector of", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "node v i * .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "The final probability distribution of the words at time-step i is the interpolation of two probabilities,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p i = \u03b8 i * p g i + (1 \u2212 \u03b8 i ) * p c i", |
|
"eq_num": "(17)" |
|
} |
|
], |
|
"section": "Sentence Decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "For the training, we aim to maximize the likelihood of each gold-standard output sequence, S, given the graph, G.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training and Decoding", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "l(S|G) = T i=1 log P (s i |s i\u22121 , ..., s 1 , G, \u03b8) (18)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training and Decoding", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "where\u03b8is the model parameter.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training and Decoding", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "P (s i |s i\u22121 , ..., s 1 , G, \u03b8)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training and Decoding", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "corresponds to the probability score of word s i in p i computed by Eq. (16). We use the beam search to generate the target sentence during the decoding stage.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training and Decoding", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "In this section, we compare our proposed graph encoders with the existing ones presented in prior works.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to Prior Graph Encoders", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Most models, including a GCN (Damonte and Cohen, 2019), GGNN (Beck et al., 2018) , and GraphLSTM (Song et al., 2018) , use a nonpairwise interaction function to represent the information to be aggregated from the neighborhoods. Specifically, they ignore the receiver node (i.e., the node to be updated), operating only on the sender node (i.e., the neighbor node) and the edge attribute (Battaglia et al., 2018) . They add a self-loop edge for each node so that its own information can be considered. In our model, we compute the pairwise interactions using Eq. 5; hence, no self-loop edge is required.", |
|
"cite_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 80, |
|
"text": "(Beck et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 97, |
|
"end": 116, |
|
"text": "(Song et al., 2018)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 387, |
|
"end": 411, |
|
"text": "(Battaglia et al., 2018)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to Prior Graph Encoders", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In our model, the graph attention mechanism is similar to GAT (Velickovic et al., 2018) . The main differences are that GAT is designed for undirected graphs and neither directions nor labels of edges are considered. We propose using two distinct representations (i.e., head representation and tail representation) for each node and utilizing graph attentions on the incoming and outgoing relations. Accordingly, the model can consider the differences in the incoming and outgoing relations, and the results presented in the next section verify the effectiveness of this proposed modification. In addition, GAT adopts additive attention and uses averages of the outputs of the multi-head attention in the final layer. In our model, we use a scaled dot-product attention for all the attention layers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 62, |
|
"end": 87, |
|
"text": "(Velickovic et al., 2018)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to Prior Graph Encoders", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We used two standard AMR corpora (LDC2015E86 and LDC2017T10) as our experiment datasets. The LDC2015E86 dataset contains 16,833 instances for the training, 1,368 for the development, and 1,371 for the test. The LDC2017T10 dataset is the latest AMR corpus release, which contains 36,521 instances for the training and the same instances for the development and test as in LDC2015E86. Most prior works evaluate their models on the former dataset. Because prior approaches during the same period achieve the state-of-the-art performances on LDC2015E86 and LDC2017T10, respectively, we performed experiments on both the datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data and Preprocessing", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Following Konstas et al. (2017) , we supplemented the gold data with large-scale external data. We used the Gigaword corpus 1 released by Song et al. (2018) as the external data, which was automatically parsed by the JAMR. For the training on both the gold data and automatically labeled data, the same training strategy as that of Konstas et al. (2017) was adopted, which was fine-tuning the model on the gold data after each epoch of the pre-training on the Gigaword data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 31, |
|
"text": "Konstas et al. (2017)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 138, |
|
"end": 156, |
|
"text": "Song et al. (2018)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 332, |
|
"end": 353, |
|
"text": "Konstas et al. (2017)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data and Preprocessing", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We set our model parameters based on preliminary experiments on the development set. d model is set to 256 and d emb is set to 300. The head number of attention is set to 2. The numbers (L 1 and L 2 ) of layers of the encoder and decoder are set to 8 and 6, respectively. The batch size is set to 64. We extract a vocabulary from the training set, which is shared by both the encoder and the decoder. The word embeddings are initialized from GloVe word embeddings (Pennington et al., 2014) . We use the Adam optimizer (Kingma and Ba, 2015) with lr = 0.0002, \u03b2 1 = 0.9, \u03b2 2 = 0.98, and = 10 \u22129 . Learning rate is halved every time perplexity on the development set does not improve for two epochs. We apply dropout to the output of each attention sub-layer and the input embeddings, and use a rate of P drop = 0.3. Beam search with beam size to 6 is used for decoding. During training, we filter out instances with more than 100 nodes in graph or 100 words in sentence for speeding up. Note that d model is set to 512, the head number is set to 4, and the learning rate is set to 0.0001 when training on both gold data and automatically labeled data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 464, |
|
"end": 489, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Settings and Training Details", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Following existing works, we evaluate the results with the BLEU metric (Papineni et al., 2002) . We also report the results using CHRF++ (Popovi\u0107, 2017) , similar to Beck et al. (2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 71, |
|
"end": 94, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 137, |
|
"end": 152, |
|
"text": "(Popovi\u0107, 2017)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 166, |
|
"end": 184, |
|
"text": "Beck et al. (2018)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Metrics and Baselines", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Our direct baseline is the original Transformer, which takes a linearized graph as the input. We use the same linearization as that by Konstas et al. (2017) . We also compare our model with prior statistical approaches (PBMT, Tree2Str, and TSP), sequence-to-sequence approaches (S2S+Anon and S2S+Copy), the current state-of-the-art Methods BLEU PBMT (Pourdamghani et al., 2016) 26.9 Tree2Str (Flanigan et al., 2016) 23.0 TSP (Song et al., 2016) 22.4 S2S+Anon (Konstas et al., 2017) 22.0 GraphLSTM (Song et al., 2018) 23.3 t-GCNSEQ (Damonte and Cohen, 2019) 23.9 g-GCNSEQ (Damonte and Cohen, 2019) 24.4 Transformer 17.7 Graph Transformer 25.9 S2S+Anon (2M) (Konstas et al., 2017) 32.3 S2S+Anon (20M) (Konstas et al., 2017) 33.8 S2S+Copy (2M) (Song et al., 2018) 31.7 GraphLSTM (2M) (Song et al., 2018) 33.6 Transformer (2M) 35.1 Graph Transformer (2M)", |
|
"cite_spans": [ |
|
{ |
|
"start": 135, |
|
"end": 156, |
|
"text": "Konstas et al. (2017)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 350, |
|
"end": 377, |
|
"text": "(Pourdamghani et al., 2016)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 392, |
|
"end": 415, |
|
"text": "(Flanigan et al., 2016)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 425, |
|
"end": 444, |
|
"text": "(Song et al., 2016)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 459, |
|
"end": 481, |
|
"text": "(Konstas et al., 2017)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 497, |
|
"end": 516, |
|
"text": "(Song et al., 2018)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 531, |
|
"end": 556, |
|
"text": "(Damonte and Cohen, 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 571, |
|
"end": 596, |
|
"text": "(Damonte and Cohen, 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 656, |
|
"end": 678, |
|
"text": "(Konstas et al., 2017)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 699, |
|
"end": 721, |
|
"text": "(Konstas et al., 2017)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 741, |
|
"end": 760, |
|
"text": "(Song et al., 2018)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 781, |
|
"end": 800, |
|
"text": "(Song et al., 2018)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Metrics and Baselines", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "36.4 Table 1 : Test results of models. ''(2M)'' / ''(20M)'' denotes using the corresponding number of automatically labeled Gigaword data instances as additional training data.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 5, |
|
"end": 12, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Metrics and Baselines", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "graph-to-sequence approaches (GraphLSTM and GGNN), and hybrid approaches (t-GCNSEQ and g-GCNSEQ). PBMT (Pourdamghani et al., 2016) adopts a phrased-based machine translation model with the input of a linearized AMR graph. Tree2Str (Flanigan et al., 2016) converts AMR graphs into trees by splitting the reentrants and applies a treeto-string transducer to generate text. TSP (Song et al., 2016) solves the generation problem as a traveling salesman problem. S2S+Anon (Konstas et al., 2017 ) is a multi-layer attention-based bidirectional LSTM model, which is trained with anonymized data. S2S+Copy (Song et al., 2018) is also an attention-based LSTM model, but it instead uses the copy mechanism. GGNN (Beck et al., 2018 ) uses a gated graph neural network to encode the AMR graph and an RNN-based decoder to generate the text. GraphLSTM (Song et al., 2018) utilizes a graph state LSTM as the graph encoder and uses the copy mechanism instead of anonymization. T-GCNSEQ (Damonte and Cohen, 2019) also splits the reentrancies and applies stacking of the encoders to encode the tree, in which BiLSTM networks are used on top of the GCN for utilizing both the structure and sequential information. G-GCNSEQ has the same architecture as t-GCNSEQ, but it directly encodes the graph rather than the tree. Tree2Str, TSP, S2S+Anon, S2S+Copy, and GraphLSTM have been trained on LDC2015E86. PBMT has been trained on a previous release of the corpus (LDC2014T12). 2 Note that PBMT, Tree2Str, and TSP also train and use a language model based on an additional Gigaword corpus. GGNN has been trained on LDC2017T10. T-GCNSEQ and g-GCNSEQ have been trained on both LDC2015E86 and LDC2017T10. Table 1 summarizes the results of the models using LDC2015E86 as the gold training data. When trained only on the gold training data, our model achieves the best BLEU score of 25.9 among all the neural models and outperforms S2S+Anon by 3.9 BLEU points. Compared with the graph-tosequence model, GraphLSTM, our model is 2.6 BLEU points higher, which shows the superiority of our proposed architecture. Our model also outperforms hybrid models t-GCNSEQ and g-GCNSEQ by 2.0 points and 1.5 points, respectively. Comparing the two sequence-to-sequence neural models, Transformer underperforms the RNN-based model (S2S+Anon). This is in plain contrast to their performances in machine translation. The reason is attributed to the possible extreme length of the linearized AMR graph and difficulty in performing self-attention to obtain a good context representation of each token with a small training data. Our proposed Graph Transformer does not encounter this problem, which is significantly better than Transformer, and improves the BLEU score by more than 8 points. It also shows that our proposed deep architecture is even effective with a small training data. The results of statistical approaches PBMT, Tree2Str, and TSP are not strictly comparable because they use an additional Gigaword corpus to train the language model. Our model still outperforms the Tree2Str and TSP and performs close to the PBMT. Following the approach of Konstas et al. (2017) , we also evaluate our model using automatically labeled Gigaword data as additional training data. When using external data, the performance of our model is improved significantly. Utilizing 2M gigaword data, the performance of our model improves by 10.5. With the 2M additional data, our model achieves the new state-of-the-art BLEU score of 36.4 points, which is 4.7 and 2.8 points", |
|
"cite_spans": [ |
|
{ |
|
"start": 103, |
|
"end": 130, |
|
"text": "(Pourdamghani et al., 2016)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 231, |
|
"end": 254, |
|
"text": "(Flanigan et al., 2016)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 375, |
|
"end": 394, |
|
"text": "(Song et al., 2016)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 467, |
|
"end": 488, |
|
"text": "(Konstas et al., 2017", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 598, |
|
"end": 617, |
|
"text": "(Song et al., 2018)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 702, |
|
"end": 720, |
|
"text": "(Beck et al., 2018", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 838, |
|
"end": 857, |
|
"text": "(Song et al., 2018)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 3112, |
|
"end": 3133, |
|
"text": "Konstas et al. (2017)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1677, |
|
"end": 1684, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Metrics and Baselines", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "GGNN (Beck et al., 2018) 23.3 50.4 GGNN(ensemble) (Beck et al., 2018) 27.5 53.5 t-GCNSEQ (Damonte and Cohen, 2019) 24.1 \u2212 g-GCNSEQ (Damonte and Cohen, 2019) 24 higher than those of S2S+Copy and GraphLSTM using the same training data, respectively. Transformer achieves a BLEU score of 35.1, which is much higher compared with that achieved with the one trained on the gold data. This verifies the effectiveness of a deep neural model when the training dataset is sufficiently big. With 20M external data, the S2S+Anon obtains a BLEU score of 33.8, which is much worse than our model score. We speculate the performance can be further improved with a relatively larger number of external data; however, we do not attempt this owing to hardware limitations. Note that the CHRF++ score is not reported for these approaches in previous works; therefore, we do not compare it in this experiment. Table 2 lists the results of the models trained on LDC2017T10. Our model strongly outperforms GGNN, and improves the BLEU score by 6.0 points and the CHRF++ score by 8.6 points. Hybrid models t-GCNSEQ and g-GCNSEQ achieve BLEU scores of 24.1 and 24.5, which are 5.2 and 4.8 points lower than those of our model, respectively. Compared with the same model with smaller gold training data in Table 1 , the BLEU score of our model is also improved by 3.4 points and the scores of t-GCNSEQ and g-GCNSEQ are improved by only 0.2 and 0.1 points, respectively. This indicates that the performance of our model can easily benefit from more gold training data. Beck et al. (2018) also reported the scores of GGNN ensemble, which achieves a BLEU score of 27.5 and a CHRF++ score of 53.5; these scores are even much worse than those of our single model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 5, |
|
"end": 24, |
|
"text": "(Beck et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 50, |
|
"end": 69, |
|
"text": "(Beck et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 89, |
|
"end": 114, |
|
"text": "(Damonte and Cohen, 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 131, |
|
"end": 156, |
|
"text": "(Damonte and Cohen, 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 1543, |
|
"end": 1561, |
|
"text": "Beck et al. (2018)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 891, |
|
"end": 898, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 1281, |
|
"end": 1288, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "BLEU CHRF++", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To evaluate the importance of the different components of our proposed Graph Transformer, we vary our model and perform both hyper-parameter and ablation studies. We train the models on both LDC2015E86 and LDC2017T10 and measure the performance changes on the development set, and the results are listed in Table 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 307, |
|
"end": 314, |
|
"text": "Table 3", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model Variations", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "In Table 3 (A), we vary the number of transition steps (i.e., number of layers), L 1 , in the graph encoder. As we can see, the performance of our model increases as L 1 increases; however, it starts decreasing gradually when L 1 becomes larger than 8. Our model achieves the best performance when L 1 equals 8. This shows that incorporating the information from the nodes with a long distance can help improve capture of the global semantic information. The reason for this performance drop when L 1 is larger than 8 may be attributed to the over-fitting because the amount of training data is not large. In addition, we also compare the BLEU scores of our model and GraphLSTM with the same number of transition steps. These models are only trained on LDC2015E86. Results on the development set are shown in Figure 3 . Compared with the performance of the GraphLSTM, our model performs consistently and significantly better when L 1 , which varies from 1 to 10. This indicates that our proposed graph encoder has a stronger ability of utilizing both local and global semantic information.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 3", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 809, |
|
"end": 817, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Hyper-Parameter Tuning", |
|
"sec_num": "5.5.1" |
|
}, |
|
{ |
|
"text": "In Table 3 (B), we vary the number of layers in the decoder, L 2 . Our model achieves the best performance when L 2 equals 6, and its performance drops significantly when L 2 decreases. With few layers, the decoder might not be able to utilize the information provided by the graph encoder and generate fluent sentences. An extremely large L 2 also adversely affects the performance, particularly when training on the smaller dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 3", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Hyper-Parameter Tuning", |
|
"sec_num": "5.5.1" |
|
}, |
|
{ |
|
"text": "L 1 L 2 d model d h P", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hyper-Parameter Tuning", |
|
"sec_num": "5.5.1" |
|
}, |
|
{ |
|
"text": "In Table 3 (C), we observe that larger models do not lead to better performance. We attribute the reason to the number of training pairs being quite small. In Table 3 (D), we observe that the models, trained on a small dataset, are extremely sensitive to the number of heads, d h . The singlehead attention is 1.9 BLEU points worse than the best setting. The performance also deceases with too many heads. Using more training data, our model becomes more stable and insensitive to d h . In Table 3 (E), we can see that a suitable rate of dropout is extremely helpful for avoiding over-fitting.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 3", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 159, |
|
"end": 166, |
|
"text": "Table 3", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 490, |
|
"end": 497, |
|
"text": "Table 3", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Hyper-Parameter Tuning", |
|
"sec_num": "5.5.1" |
|
}, |
|
{ |
|
"text": "We further perform an ablation study on two datasets to investigate the influence of the modules in the graph encoder. We fix the sentence decoder of our model because it is similar to the original one in Transformer. The modules in the graph encoder are tested by two methods: using a single representation for each node (i.e., the head representation and tail representation are updated with shared parameters), and using a single representation and performing the inseparate graph attention over the incoming and outgoing relations simultaneously (i.e., the output of the attention in Eq. 7", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ablation Study", |
|
"sec_num": "5.5.2" |
|
}, |
|
{ |
|
"text": "is g t i = d h x=1 ( j\u2208N in i N out i \u03b1 x ij r t ij W x v", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ablation Study", |
|
"sec_num": "5.5.2" |
|
}, |
|
{ |
|
"text": ")W o and the fusion layer is discarded). These modifications test the effectiveness of the separate graph attentions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ablation Study", |
|
"sec_num": "5.5.2" |
|
}, |
|
{ |
|
"text": "The results are presented in Table 3 (F). We can see that using a single representation for each node results in a loss of 0.4 and 0.5 BLEU points on the two datasets, respectively. It indicates that learning the head representation and tail representation for each node is helpful. We further observe that without separated graph attentions (i.e., in inseparate graphattention), the performance of our model drops, suffering a loss of 2.4 BLEU points on the LDC2015E86 dataset and 1.2 on LDC2017T10. We consider that the relations represented by the incoming edges and outgoing edges are different. Moreover, projecting them into the same space for the graph attentions might cause confusion, particularly when the number of training data is small. Separate graph attentions can help the model better capture the semantics.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 29, |
|
"end": 36, |
|
"text": "Table 3", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Study", |
|
"sec_num": "5.5.2" |
|
}, |
|
{ |
|
"text": "Compared with the prior methods, there are two changes in our model: the graph encoder and the Transformer decoder. To study the influences of the different encoders and decoders, we implement three encoders (RNN encoder, Transformer +3.9 +2.1 +0.7 Graph Transformer +6.0 +4.5 +3.7 Table 5 : Counts of AMR graphs with different depth for the test split and the BLEU scores of different models on these graphs. encoder, and our graph encoder) and two decoders (RNN decoder and Transformer decoder). We also perform a study of their combinations. Table 4 presents the results. We find an interesting phenomenon that simply mixing Transformer-based networks with RNNs can lead to a large decrease in the performance. Irrespective of replacing the RNN decoder with the Transformer decoder in S2S or replacing the Transformer decoder with the RNN decoder in Transformer and Graph Transformer, the replaced models perform much worse than the original ones. This indicates that there is a mismatch in using an RNN (or Transformer) to decode a sentence from the representations encoded by Transformer-based networks (or RNNs). The superior performance of our model is owing to the interplay of the Transformer-based graph encoder and the Transformer decoder.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1003, |
|
"end": 1023, |
|
"text": "RNN (or Transformer)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 282, |
|
"end": 289, |
|
"text": "Table 5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 545, |
|
"end": 552, |
|
"text": "Table 4", |
|
"ref_id": "TABREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Study", |
|
"sec_num": "5.5.2" |
|
}, |
|
{ |
|
"text": "To study the advantages of our proposed model against prior sequence-to-sequence models and graph models further, we compare the results of the models on different sizes and structures of the AMR graphs. All the models are trained on LDC2015E86. We consider the size and structure of a graph in three approaches: depth, number of edges, and number of reentrancies. The depth of an AMR graph is defined as the longest distance between the AMR node and its root. The deeper the graph, the longer the dependency. Table 5 lists the counts of the AMR graphs with different depths for the test split and the results of different models on these graphs. We can see that the graph models outperform the sequence-to-sequence model, but the gap narrows when the depth increases. GraphLSTM outperforms the S2S by 3.9 points when the depth is less than 6, and the gap is only 0.7 points when the depth is larger than 10. Compared with GraphLSTM, Graph Transformer returns better performance on deeper graphs, which shows that our model is more powerful for capturing long-term dependencies.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 510, |
|
"end": 517, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Performance Against Size and Structure of AMR Graph", |
|
"sec_num": "5.6" |
|
}, |
|
{ |
|
"text": "The edges in an AMR graph represent the semantic relations of the concepts. The more the edges in the graph, the more semantic information is represented and usually the larger the graph. the corresponding BLEU scores of the different models. We observe that all the models have much better performances on small graphs than on large ones. Similar to the phenomena based on Table 4 , our model shows a stronger ability in dealing with more semantic relations than GraphLSTM.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 374, |
|
"end": 381, |
|
"text": "Table 4", |
|
"ref_id": "TABREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Performance Against Size and Structure of AMR Graph", |
|
"sec_num": "5.6" |
|
}, |
|
{ |
|
"text": "Following Damonte and Cohen (2019) , we also study the influence of the reentrancies in the graph. Reentrancies represent the co-references and control structures in AMR and make it a graph rather than a tree. A graph with more reentrancies is typically more complex. From Table 7 , we can see that the performance of all the models drop significantly when the number of reentrancies increases. With more reentrancies, the lead of the graph-to-sequence models over the sequenceto-sequence model also narrows. We consider that this is because the reentrancies increase the complexity of the graph structure and make the graph models difficult to learn the semantic representation. Our model exhibits an extremely strong performance when the input degrades into a tree. This is because we use two graph attentions over the incoming and outgoing edges, respectively, and only one incoming edge makes the model easy to learn and train. In addition, our model outperforms S2S by 2.9 points when the input graphs have more than 3 reentrancies. In comparison, GraphLSTM achieves nearly an identical result to that of S2S, which indicates that our proposed encoder is also better in dealing with complex graphs than GraphLSTM.", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 34, |
|
"text": "Damonte and Cohen (2019)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 273, |
|
"end": 280, |
|
"text": "Table 7", |
|
"ref_id": "TABREF10" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Performance Against Size and Structure of AMR Graph", |
|
"sec_num": "5.6" |
|
}, |
|
{ |
|
"text": "We perform case studies to provide a better understanding of the model performance. We compare the outputs of S2S, GraphLSTM, and our Graph Transformer trained on the gold data of LDC2015E86. We observe that there are several common error types in the outputs of these systems: 1) generating unnatural language or unreadable sentences; 2) missing information from the input graph; 3) generating words or tokens inconsistent with the given semantic representation (i.e., mistranslation of the nodes in the graph); 4) mixing the semantic relations between the entities (i.e., mistranslation of the edges in the graph).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Case Study", |
|
"sec_num": "5.7" |
|
}, |
|
{ |
|
"text": "To exhibit how systematic these errors are explicitly, we manually evaluate 50 randomly sampled outputs from each compared system, and count the ratio of the outputs with each error type. Note that these four types of errors are not mutually exclusive. Table 8 lists the results. We can clearly see that these four types of errors occur in all the three systems, and Graph Transformer performs the best by comparison. Compared with S2S and GraphLSTM, our model Graph Transformer significantly covers more information from the input graph. All the models make more mistakes on the fluency aspect than on other three aspects. This is because both the missing information from the input graph and the mistranslation of the nodes and edges can cause a generated sentence to be unnatural or unreadable.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 253, |
|
"end": 260, |
|
"text": "Table 8", |
|
"ref_id": "TABREF12" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Case Study", |
|
"sec_num": "5.7" |
|
}, |
|
{ |
|
"text": "In addition, we present several example outputs in Table 9 . AMR denotes the input graph and Ref denotes the reference output sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 51, |
|
"end": 58, |
|
"text": "Table 9", |
|
"ref_id": "TABREF13" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Case Study", |
|
"sec_num": "5.7" |
|
}, |
|
{ |
|
"text": "In the first case, we can see that S2S fails to generate a fluent sentence. It also omits the concept, work, and therefore, adversely affects the semantic relation between you and hard. GraphLSTM omits the adverb hard and generates an adverb really for verb work, which is only supposed to modify verb want. Graph Transformer generates a basically correct answer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Case Study", |
|
"sec_num": "5.7" |
|
}, |
|
{ |
|
"text": "In the second case, the AMR graph is more complex. S2S mistranslates the concept, stand, as take away and omits adjective passive. The verb, plant, is also omitted, which might be caused by the long distance between plant and pressure in the linearized input. Moreover, the entire sentence is unreadable owing to numerous grammar mistakes. GraphLSTM also omits the concept, passive, and fails to generate the clause headed by stand. In addition, it makes a mistake at the conjunction between the pressure and AMR: (h / have-condition-91 :ARG1 (w / work-01 :ARG0 (y / you) :ARG1-of (h2 / hard-02)) :ARG2 (w2 / want-01 :ARG0 y :ARG2 (o / out) :mod (r / really)))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Case Study", |
|
"sec_num": "5.7" |
|
}, |
|
{ |
|
"text": "Ref:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Case Study", |
|
"sec_num": "5.7" |
|
}, |
|
{ |
|
"text": "Work hard if you really want out. S2S: If you really want to want out you are hard. GraphLSTM: If you really want out, you really work. Graph Transformer: If you really want out, then you 'll work hard. AMR: (a / and :op1 (s / stand-11 :ARG0 (c / criminal-organization :wiki ''Taliban'' :name (n / name :op1 ''Taliban'')) :ARG1 (p / passive) :ARG2 (c2 / cultivate-01) :time (y / year :mod (t2 / this))) :op2 (p2 / pressure-01 :ARG0 c :ARG1 (p5 / person :mod (c3 / country :wiki ''Afghanistan'' :name (n2 / name :op1 ''Afghanistan'')) :ARG0-of (f / farm-01)) :ARG2 (p3 / plant-01 :ARG0 p5 :ARG1 (p4 / poppy :mod (o / opium))) :degree (l / less))) Ref:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Case Study", |
|
"sec_num": "5.7" |
|
}, |
|
{ |
|
"text": "The Taliban this year are taking a passive stance toward cultivation and putting less pressure on Afghan farmers to plant opium poppy. S2S: the Taliban will take away with cultivation of cultivate this year and pressure on Afghan farmers less with opium poppy in them. GraphLSTM: The Taliban has been standing in the cultivation this year and less pressure from the Afghan farmers to plant opium poppies. Graph Transformer: The Taliban has stood passive in cultivation this year and has less pressured Afghan farmers to plant opium poppies. AMR: (p / participate-01 :ARG0 (p2 / person :quant 40 :ARG1-of (e / employ-01 :ARG0 (c / company :mod (o / oil) :mod (s / state))) :accompanier (s2 / soldier :quant 1200)) :ARG1 (e2 / exercise :ARG1-of (r / resemble-01)) :time (d / date-entity :year 2005 :month 6)) Ref: 40 employees of the state oil company participated in a similar exercise with 1200 soldiers in 050600. S2S: in June 2005 40 people's state company with 1200 soldiers were part of a similar exercise. GraphLSTM: 40 state of oil companies using 1200 soldiers have participated in similar exercises in 6. Graph Transformer: In June 2005 40 oil companies have participated in similar exercises with 1200 soldiers. f armers. Our model does not omit any concept in the graph, but the generated sentence is not highly fluent. It treats the stand and pressure as predicates and fails to generate take and put because they are explicitly given in the graph.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Case Study", |
|
"sec_num": "5.7" |
|
}, |
|
{ |
|
"text": "In the last case, all three models fail to capture the concept, employ, and disturbs the relations between person, employ, and company. S2S omits the adjective, oil, and mistranslates the concept, participate, as part of. GraphLSTM is completely confused in this case and even fails to generate the word, June, from the relation, :month 6. Our Graph Transformer correctly generates the sentence constituents other than the subject.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Case Study", |
|
"sec_num": "5.7" |
|
}, |
|
{ |
|
"text": "Specifically, the four types of errors occur in all the three models, particularly when the input graph is complex. Compared with S2S and GraphLSTM, our model is less likely to miss the information from the input, and it can generate sentences with high quality, in terms of the fluency and fidelity to the input semantics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Case Study", |
|
"sec_num": "5.7" |
|
}, |
|
{ |
|
"text": "In this study, we present a novel graph network (Graph Transformer) for AMR-to-text generation. Our model is solely based on the attention mechanism. Our proposed graph attentions over the neighbor nodes, and the corresponding edges are used for learning the representations of the nodes and capturing global information. The experimental results shows that our model significantly outperforms the prior neural models and achieves a new state-of-the-art performance on benchmark datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In future work, we will incorporate BERT embeddings and multi-task learnings to improve the performance further. We will also apply Graph Transformer to other related text generation tasks like MRS-to-text generation, data-to-text generation, and image captioning.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "https://www.cs.rochester.edu/\u223clsong10/ downloads/2m.json.gz.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The LDC2014T12 dataset contains 10,313 instances for the training and the same instances for the development and test as in case of LDC2015E86.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work was supported by National Natural Science Foundation of China (61772036) and Key Laboratory of Science, Technology and Standard in Press Industry (Key Laboratory of Intelligent Press Media Technology). We thank the anonymous reviewers for their helpful comments. Xiaojun Wan is the corresponding author.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Relational inductive biases, deep learning, and graph networks", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Battaglia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jessica", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Hamrick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Bapst", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alvaro", |
|
"middle": [], |
|
"last": "Sanchezgonzalez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vinicius", |
|
"middle": [], |
|
"last": "Flores Zambaldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mateusz", |
|
"middle": [], |
|
"last": "Malinowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Tacchetti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Raposo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Santoro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Faulkner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Battaglia, Jessica B. Hamrick, Victor Bapst, Alvaro Sanchezgonzalez, Vinicius Flores Zambaldi, Mateusz Malinowski, Andrea Tacchetti, David Raposo, Adam Santoro, Ryan Faulkner, and others. 2018. Relational induc- tive biases, deep learning, and graph networks. arXiv preprint: 1806.01261.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Graph-to-sequence learning using gated graph neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Beck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gholamreza", |
|
"middle": [], |
|
"last": "Haffari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Cohn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "273--283", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Beck, Gholamreza Haffari, and Trevor Cohn. 2018. Graph-to-sequence learning using gated graph neural networks. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), volume 1, pages 273-283.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Spectral networks and locally connected networks on graphs. International Conference on Learning Representations", |
|
"authors": [ |
|
{ |
|
"first": "Joan", |
|
"middle": [], |
|
"last": "Bruna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Zaremba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arthur", |
|
"middle": [], |
|
"last": "Szlam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yann", |
|
"middle": [], |
|
"last": "Lecun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joan Bruna, Wojciech Zaremba, Arthur Szlam, and Yann Lecun. 2014. Spectral networks and locally connected networks on graphs. International Conference on Learning Repre- sentations.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Structural neural encoders for amr-to-text generation", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Damonte", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shay", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Cohen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1903.11410v1" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Damonte and Shay B. Cohen. 2019. Struc- tural neural encoders for amr-to-text generation. arXiv preprint arXiv:1903.11410v1.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Bert: Pretraining of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre- training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Convolutional networks on graphs for learning molecular fingerprints", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Duvenaud", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dougal", |
|
"middle": [], |
|
"last": "Maclaurin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jorge", |
|
"middle": [], |
|
"last": "Iparraguirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rafael", |
|
"middle": [], |
|
"last": "Bombarell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Hirzel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Al\u00e1n", |
|
"middle": [], |
|
"last": "Aspuru-Guzik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Adams", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2224--2232", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David K. Duvenaud, Dougal Maclaurin, Jorge Iparraguirre, Rafael Bombarell, Timothy Hirzel, Al\u00e1n Aspuru-Guzik, and Ryan P. Adams. 2015. Convolutional networks on graphs for learning molecular fingerprints. In Advances in Neural Information Processing Systems, pages 2224-2232.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Generation from abstract meaning representation using tree transducers", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Flanigan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "731--739", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Flanigan, Chris Dyer, Noah A. Smith, and Jaime Carbonell. 2016. Generation from abstract meaning representation using tree transducers. In Proceedings of the 2016 Con- ference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, pages 731-739.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "A new model for learning in graph domains", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Gori", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabriele", |
|
"middle": [], |
|
"last": "Monfardini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Franco", |
|
"middle": [], |
|
"last": "Scarselli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings. 2005 IEEE International Joint Conference on Neural Networks", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "729--734", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Gori, Gabriele Monfardini, and Franco Scarselli. 2005. A new model for learning in graph domains. In Proceedings. 2005 IEEE International Joint Conference on Neural Networks, 2005., volume 2, pages 729-734. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Rigotrio at SemEval-2017 task 9: combining machine learning and grammar engineering for amr parsing and generation", |
|
"authors": [ |
|
{ |
|
"first": "Normunds", |
|
"middle": [], |
|
"last": "Gruzitis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Didzis", |
|
"middle": [], |
|
"last": "Gosko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guntis", |
|
"middle": [], |
|
"last": "Barzdins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 11th International Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Normunds Gruzitis, Didzis Gosko, and Guntis Barzdins. 2017. Rigotrio at SemEval-2017 task 9: combining machine learning and grammar engineering for amr parsing and generation. In Proceedings of the 11th International Work- shop on Semantic Evaluation (SemEval-2017). Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Non-autoregressive neural machine translation. International Conference on Learning Representations", |
|
"authors": [ |
|
{ |
|
"first": "Jiatao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Bradbury", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Victor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiatao Gu, James Bradbury, Caiming Xiong, Victor O. K. Li, and Richard Socher. 2018. Non-autoregressive neural machine transla- tion. International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Incorporating copying mechanism in sequence-to-sequence learning", |
|
"authors": [ |
|
{ |
|
"first": "Jiatao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhengdong", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Victor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1603.06393" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiatao Gu, Zhengdong Lu, Hang Li, and Victor O. K. Li. 2016. Incorporating copying mechanism in sequence-to-sequence learning. arXiv preprint arXiv:1603.06393.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Liberal event extraction and event schema induction", |
|
"authors": [ |
|
{ |
|
"first": "Lifu", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Taylor", |
|
"middle": [], |
|
"last": "Cassidy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaocheng", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clare", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Voss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiawei", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Avirup", |
|
"middle": [], |
|
"last": "Sil", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "258--268", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lifu Huang, Taylor Cassidy, Xiaocheng Feng, Heng Ji, Clare R. Voss, Jiawei Han, and Avirup Sil. 2016. Liberal event extraction and event schema induction. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), volume 1, pages 258-268.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Semantics-based machine translation with hyperedge replacement grammars", |
|
"authors": [ |
|
{ |
|
"first": "Bevan", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Andreas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Bauer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karl", |
|
"middle": [ |
|
"Moritz" |
|
], |
|
"last": "Hermann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of COLING 2012", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1359--1376", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bevan Jones, Jacob Andreas, Daniel Bauer, Karl Moritz Hermann, and Kevin Knight. 2012. Semantics-based machine translation with hyperedge replacement grammars. Proceedings of COLING 2012, pages 1359-1376.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. Interna- tional Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Semisupervised classification with graph convolutional networks", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Thomas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Kipf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Welling", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas N. Kipf and Max Welling. 2017. Semi- supervised classification with graph convolu- tional networks. International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Neural amr: Sequence-to-sequence models for parsing and generation", |
|
"authors": [ |
|
{ |
|
"first": "Ioannis", |
|
"middle": [], |
|
"last": "Konstas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Srinivasan", |
|
"middle": [], |
|
"last": "Iyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Yatskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "146--157", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ioannis Konstas, Srinivasan Iyer, Mark Yatskar, Yejin Choi, and Luke Zettlemoyer. 2017. Neural amr: Sequence-to-sequence models for parsing and generation. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), volume 1, pages 146-157.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Sheffield at SemEval-2017 task 9: Transition-based language generation from AMR", |
|
"authors": [ |
|
{ |
|
"first": "Gerasimos", |
|
"middle": [], |
|
"last": "Lampouras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Vlachos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 11th International Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "586--591", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gerasimos Lampouras and Andreas Vlachos. 2017. Sheffield at SemEval-2017 task 9: Transition-based language generation from AMR. In Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval- 2017), pages 586-591.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Gated graph sequence neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Yujia", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Tarlow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc", |
|
"middle": [], |
|
"last": "Brockschmidt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Zemel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1511.05493" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yujia Li, Daniel Tarlow, Marc Brockschmidt, and Richard Zemel. 2015. Gated graph sequence neural networks. arXiv preprint arXiv:1511.05493.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Toward abstractive summarization using semantic representations", |
|
"authors": [ |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Flanigan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Thomson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Norman", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Sadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "North American Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1077--1086", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fei Liu, Jeffrey Flanigan, Sam Thomson, Norman M. Sadeh, and Noah A. Smith. 2015. Toward abstractive summarization using se- mantic representations. North American Chapter of the Association for Computational Lin- guistics, pages 1077-1086.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Addressing a question answering challenge by combining statistical methods with inductive rule learning and reasoning", |
|
"authors": [ |
|
{ |
|
"first": "Arindam", |
|
"middle": [], |
|
"last": "Mitra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chitta", |
|
"middle": [], |
|
"last": "Baral", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Thirtieth AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arindam Mitra and Chitta Baral. 2016. Addressing a question answering challenge by combining statistical methods with inductive rule learning and reasoning. In Thirtieth AAAI Conference on Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "BLEU: A method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. BLEU: A method for automatic evaluation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pages 311-318. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "GloVe: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. GloVe: Global vectors for word representation. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1532-1543.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "CHRF++: Words helping character n-grams", |
|
"authors": [ |
|
{ |
|
"first": "Maja", |
|
"middle": [], |
|
"last": "Popovi\u0107", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Second Conference on Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "612--618", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maja Popovi\u0107. 2017. CHRF++: Words helping character n-grams. In Proceedings of the Second Conference on Machine Translation, pages 612-618.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Generating English from abstract meaning representations", |
|
"authors": [ |
|
{ |
|
"first": "Nima", |
|
"middle": [], |
|
"last": "Pourdamghani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ulf", |
|
"middle": [], |
|
"last": "Hermjakob", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 9th International Natural Language Generation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "21--25", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nima Pourdamghani, Kevin Knight, and Ulf Hermjakob. 2016. Generating English from abstract meaning representations. In Proceed- ings of the 9th International Natural Language Generation Conference, pages 21-25.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "The graph neural network model", |
|
"authors": [ |
|
{ |
|
"first": "Franco", |
|
"middle": [], |
|
"last": "Scarselli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Gori", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ah", |
|
"middle": [], |
|
"last": "Chung Tsoi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Markus", |
|
"middle": [], |
|
"last": "Hagenbuchner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabriele", |
|
"middle": [], |
|
"last": "Monfardini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "IEEE Transactions on Neural Networks", |
|
"volume": "20", |
|
"issue": "1", |
|
"pages": "61--80", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Franco Scarselli, Marco Gori, Ah Chung Tsoi, Markus Hagenbuchner, and Gabriele Monfardini. 2009. The graph neural network model. IEEE Transactions on Neural Networks, 20(1):61-80.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Semantic neural machine translation using AMR", |
|
"authors": [ |
|
{ |
|
"first": "Linfeng", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Gildea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiguo", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jinsong", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "19--31", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Linfeng Song, Daniel Gildea, Yue Zhang, Zhiguo Wang, and Jinsong Su. 2019. Semantic neural machine translation using AMR. Transactions of the Association for Compu- tational Linguistics, 7:19-31.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "AMR-to-text generation with synchronous node replacement grammar", |
|
"authors": [ |
|
{ |
|
"first": "Linfeng", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaochang", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiguo", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Gildea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "7--13", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Linfeng Song, Xiaochang Peng, Yue Zhang, Zhiguo Wang, and Daniel Gildea. 2017. AMR-to-text generation with synchronous node replacement grammar. Meeting of the Association for Computational Linguistics, 2:7-13.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Amrto-text generation as a traveling salesman problem", |
|
"authors": [ |
|
{ |
|
"first": "Linfeng", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaochang", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiguo", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Gildea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2084--2089", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Linfeng Song, Yue Zhang, Xiaochang Peng, Zhiguo Wang, and Daniel Gildea. 2016. Amr- to-text generation as a traveling salesman problem. In Proceedings of the 2016 Confer- ence on Empirical Methods in Natural Lan- guage Processing, pages 2084-2089.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "A graph-to-sequence model for AMR-to-text generation", |
|
"authors": [ |
|
{ |
|
"first": "Linfeng", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiguo", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Gildea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1616--1626", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Linfeng Song, Yue Zhang, Zhiguo Wang, and Daniel Gildea. 2018. A graph-to-sequence model for AMR-to-text generation. In Proceed- ings of the 56th Annual Meeting of the Asso- ciation for Computational Linguistics (Volume 1: Long Papers), volume 1, pages 1616-1626.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Neural headline generation on abstract meaning representation", |
|
"authors": [ |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Sho Takase", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naoaki", |
|
"middle": [], |
|
"last": "Suzuki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tsutomu", |
|
"middle": [], |
|
"last": "Okazaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Masaaki", |
|
"middle": [], |
|
"last": "Hirao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nagata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1054--1059", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sho Takase, Jun Suzuki, Naoaki Okazaki, Tsutomu Hirao, and Masaaki Nagata. 2016. Neural headline generation on abstract meaning representation. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 1054-1059.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Processing Systems, pages 5998-6008.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Graph attention networks. International Conference on Learning Representations", |
|
"authors": [ |
|
{ |
|
"first": "Petar", |
|
"middle": [], |
|
"last": "Velickovic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillem", |
|
"middle": [], |
|
"last": "Cucurull", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arantxa", |
|
"middle": [], |
|
"last": "Casanova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adriana", |
|
"middle": [], |
|
"last": "Romero", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pietro", |
|
"middle": [], |
|
"last": "Lio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Petar Velickovic, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Lio, and Yoshua Bengio. 2018. Graph attention net- works. International Conference on Learn- ing Representations.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Improving the transformer translation model with document-level context", |
|
"authors": [ |
|
{ |
|
"first": "Jiacheng", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huanbo", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Feifei", |
|
"middle": [], |
|
"last": "Zhai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfang", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "533--542", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiacheng Zhang, Huanbo Luan, Maosong Sun, Feifei Zhai, Jingfang Xu, Min Zhang, and Yang Liu. 2018. Improving the transformer translation model with document-level context. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 533-542.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Development results of Graph Transformer and GraphLSTM against transition steps in the graph encoder.", |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"text": "emb \u00d7d model and b e \u2208 R d model are the parameters, and d emb is the dimension of the embedding.", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF4": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"text": "Test results of models trained on LDC2017T10.", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF6": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"text": "Development results of the variations on Graph Transformer. Unlisted values are identical to those of the base model. Both models trained on LDC2015E86 and LDC2017T10 are evaluated.", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF8": { |
|
"type_str": "table", |
|
"content": "<table><tr><td colspan=\"4\">A+B represents the model with A encoder and</td></tr><tr><td colspan=\"4\">B decoder. RNN encoder and decoder are abbre-</td></tr><tr><td colspan=\"4\">viated to RNN, Transformer encoder and decoder</td></tr><tr><td colspan=\"4\">are abbreviated to TFM and our Graph encoder are</td></tr><tr><td>abbreviated to Graph.</td><td/><td/><td/></tr><tr><td>Model</td><td colspan=\"3\">Depth 1\u22125 6\u221210 11\u2212</td></tr><tr><td>#count</td><td>278</td><td>828</td><td>265</td></tr><tr><td>S2S</td><td colspan=\"3\">37.2 21.2 19.3</td></tr><tr><td>GraphLSTM</td><td/><td/><td/></tr></table>", |
|
"text": "Development results of models with three different encoders and two different decoders.", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF10": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"text": "Counts of AMR graphs with different number of reentrancies for the test split and the BLEU scores of different models on these graphs.", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF11": { |
|
"type_str": "table", |
|
"content": "<table><tr><td>Model</td><td colspan=\"4\">Unnatural Language % Missing Information % Node Mistranslation % Edge Mistranslation %</td></tr><tr><td>S2S</td><td>62.0</td><td>60.0</td><td>34.0</td><td>52.0</td></tr><tr><td>GraphLSTM</td><td>54.0</td><td>54.0</td><td>28.0</td><td>46.0</td></tr><tr><td>Graph Transformer</td><td>48.0</td><td>44.0</td><td>28.0</td><td>40.0</td></tr></table>", |
|
"text": "lists the counts of the AMR graphs with different number of edges for the test split and", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF12": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"text": "The ratio of outputs with each error type for each compared system. Lower percentage is better.", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF13": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"text": "Example outputs of different systems are compared, including S2S, GraphLSTM, and Graph Transformer.", |
|
"num": null, |
|
"html": null |
|
} |
|
} |
|
} |
|
} |