|
{ |
|
"paper_id": "D08-1010", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T16:29:47.420616Z" |
|
}, |
|
"title": "Maximum Entropy based Rule Selection Model for Syntax-based Statistical Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Qun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Chinese Academy of Sciences", |
|
"location": { |
|
"postCode": "100190", |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Zhongjun", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Chinese Academy of Sciences", |
|
"location": { |
|
"postCode": "100190", |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Chinese Academy of Sciences", |
|
"location": { |
|
"postCode": "100190", |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Shouxun", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Chinese Academy of Sciences", |
|
"location": { |
|
"postCode": "100190", |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper proposes a novel maximum entropy based rule selection (MERS) model for syntax-based statistical machine translation (SMT). The MERS model combines local contextual information around rules and information of sub-trees covered by variables in rules. Therefore, our model allows the decoder to perform context-dependent rule selection during decoding. We incorporate the MERS model into a state-of-the-art linguistically syntax-based SMT model, the treeto-string alignment template model. Experiments show that our approach achieves significant improvements over the baseline system.", |
|
"pdf_parse": { |
|
"paper_id": "D08-1010", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper proposes a novel maximum entropy based rule selection (MERS) model for syntax-based statistical machine translation (SMT). The MERS model combines local contextual information around rules and information of sub-trees covered by variables in rules. Therefore, our model allows the decoder to perform context-dependent rule selection during decoding. We incorporate the MERS model into a state-of-the-art linguistically syntax-based SMT model, the treeto-string alignment template model. Experiments show that our approach achieves significant improvements over the baseline system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Syntax-based statistical machine translation (SMT) models (Liu et al., 2006; Galley et al., 2006; Huang et al., 2006) capture long distance reorderings by using rules with structural and linguistical information as translation knowledge. Typically, a translation rule consists of a source-side and a target-side. However, the source-side of a rule usually corresponds to multiple target-sides in multiple rules. Therefore, during decoding, the decoder should select a correct target-side for a source-side. We call this rule selection.", |
|
"cite_spans": [ |
|
{ |
|
"start": 58, |
|
"end": 76, |
|
"text": "(Liu et al., 2006;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 77, |
|
"end": 97, |
|
"text": "Galley et al., 2006;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 98, |
|
"end": 117, |
|
"text": "Huang et al., 2006)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Rule selection is of great importance to syntaxbased SMT systems. Comparing with word selection in word-based SMT and phrase selection in phrase-based SMT, rule selection is more generic and important. This is because that a rule not only contains terminals (words or phrases), but also con-", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "NP DNP NP X 1 DEG NPB NN X 2 NN NP DNP NP X 1 DEG NPB NN X 2 NN X 1 X 2 levels X 2 standard of X 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Figure 1: Example of translation rules tains nonterminals and structural information. Terminals indicate lexical translations, while nonterminals and structural information can capture short or long distance reorderings. See rules in Figure 1 for illustration. These two rules share the same syntactic tree on the source side. However, on the target side, either the translations for terminals or the phrase reorderings for nonterminals are quite different. During decoding, when a rule is selected and applied to a source text, both lexical translations (for terminals) and reorderings (for nonterminals) are determined. Therefore, rule selection affects both lexical translation and phrase reordering. However, most of the current syntax-based systems ignore contextual information when they selecting rules during decoding, especially the information of sub-trees covered by nonterminals. For example, the information of X 1 and X 2 is not recorded when the rules in Figure 1 extracted from the training examples in Figure 2 . This makes the decoder hardly distinguish the two rules. Intuitively, information of sub-trees covered by nonterminals as well as contextual information of rules are believed to be helpful for rule selection. Recent research showed that contextual information can help perform word or phrase selection. Carpuat and Wu (2007b) and Chan et al. (2007) showed improvents by integrating wordsense-disambiguation (WSD) system into a phrasebased (Koehn, 2004 ) and a hierarchical phrasebased (Chiang, 2005) SMT system, respectively. Similar to WSD, Carpuat and Wu (2007a) used contextual information to solve the ambiguity problem for phrases. They integrated a phrase-sensedisambiguation (PSD) model into a phrase-based SMT system and achieved improvements.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1333, |
|
"end": 1355, |
|
"text": "Carpuat and Wu (2007b)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 1360, |
|
"end": 1378, |
|
"text": "Chan et al. (2007)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1469, |
|
"end": 1481, |
|
"text": "(Koehn, 2004", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1515, |
|
"end": 1529, |
|
"text": "(Chiang, 2005)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1572, |
|
"end": 1594, |
|
"text": "Carpuat and Wu (2007a)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 234, |
|
"end": 242, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 970, |
|
"end": 978, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1019, |
|
"end": 1027, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we propose a novel solution for rule selection for syntax-based SMT. We use the maximum entropy approach to combine rich contextual information around a rule and the information of sub-trees covered by nonterminals in a rule. For each ambiguous source-side of translation rules, a maximum entropy based rule selection (MERS) model is built. Thus the MERS models can help the decoder to perform a context-dependent rule selection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Comparing with WSD (or PSD), there are some advantages of our approach:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Our approach resolves ambiguity for rules with multi-level syntactic structure, while WSD resolves ambiguity for strings that have no structures;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Our approach can help the decoder perform both lexical selection and phrase reorderings, while WSD can help the decoder only perform lexical selection;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Our method takes WSD as a special case, since a rule may only consists of terminals.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In our previous work (He et al., 2008) , we reported improvements by integrating a MERS model into a formally syntax-based SMT model, the hierarchical phrase-based model (Chiang, 2005) . In this paper, we incorporate the MERS model into a stateof-the-art linguistically syntax-based SMT model, the tree-to-string alignment template (TAT) model (Liu et al., 2006) . The basic differences are:", |
|
"cite_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 38, |
|
"text": "(He et al., 2008)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 170, |
|
"end": 184, |
|
"text": "(Chiang, 2005)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 344, |
|
"end": 362, |
|
"text": "(Liu et al., 2006)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 The MERS model here combines rich information of source syntactic tree as features since the translation model is linguistically syntaxbased. He et al. 2008did not use this information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 In this paper, we build MERS models for all ambiguous source-sides, including lexicalized (source-side which only contains terminals), partially lexicalized (source-side which contains both terminals and nonterminals), and unlexicalized (source-side which only contains nonterminals). He et al. 2008only built MERS models for partially lexicalized sourcesides.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In the TAT model, a TAT can be considered as a translation rule which describes correspondence between source syntactic tree and target string. TAT can capture linguistically motivated reorderings at short or long distance. Experiments show that by incorporating MERS model, the baseline system achieves statistically significant improvement. This paper is organized as follows: Section 2 reviews the TAT model; Section 3 introduces the MERS model and describes feature definitions; Section 4 demonstrates a method to incorporate the MERS model into the translation model; Section 5 reports and analyzes experimental results; Section 6 gives conclusions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our baseline system is Lynx (Liu et al., 2006) , which is a linguistically syntax-based SMT system. For translating a source sentence f J 1 = f 1 ...f j ...f J , Lynx firstly employs a parser to produce a source syntactic tree T (f J 1 ), and then uses the source syntactic tree as the input to search translations:", |
|
"cite_spans": [ |
|
{ |
|
"start": 28, |
|
"end": 46, |
|
"text": "(Liu et al., 2006)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline System", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "e I 1 = argmax e I 1 P r(e I 1 |f J 1 ) (1) = argmax e I 1 P r(T (f J 1 )|f J 1 )P r(e I 1 |T (f J 1 ))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline System", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In doing this, Lynx uses tree-to-string alignment template to build relationship between source syntactic tree and target string. A TAT is actually a translation rule: the source-side is a parser tree with leaves consisting of words and nonterminals, the target-side is a target string consisting of words and nonterminals.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline System", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "TAT can be learned from word-aligned, sourceparsed parallel corpus. Figure 4 shows three types of TATs extracted from the training example in Figure 3: lexicalized (the left), partially lexicalized (the middle), unlexicalized (the right). Lexicalized TAT contains only terminals, which is similar to phrase-to-phrase translation in phrase-based model except that it is constrained by a syntactic tree on the source-side. Partially lexicalized TAT contains both terminals and non-terminals, which can be used for both lexical translation and phrase reordering. Unlexicalized TAT contains only nonterminals and can only be used for phrase reordering.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 68, |
|
"end": 76, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF4" |
|
}, |
|
{ |
|
"start": 142, |
|
"end": 148, |
|
"text": "Figure", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Baseline System", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Lynx builds translation model in a log-linear framework (Och and Ney, 2002):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline System", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "P (e I 1 |T (f J 1 )) = (2) exp[ m \u03bb m h m (e I 1 , T (f J 1 ))] e exp[ m \u03bb m h m (e I 1 , T (f J 1 ))]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline System", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Following features are used:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline System", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u2022 Translation probabilities: P ( e| T ) and P ( T | e);", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline System", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u2022 Lexical weights: P w ( e| T ) and P w ( T | e);", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline System", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u2022 TAT penalty: exp(1), which is analogous to phrase penalty in phrase-based model;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline System", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u2022 Language model P lm (e I 1 );", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline System", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u2022 Word penalty I.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline System", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In Lynx, rule selection mainly depends on translation probabilities and lexical weights. These four scores describe how well a source tree links to a target string, which are estimated on the training corpus according to occurrence times of e and T . There are no features in Lynx that can capture contextual information during decoding, except for the n-gram language model which considers the left and right neighboring n-1 target words. But this information it very limited.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline System", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "NN NPB NN X 1 NN NN NPB NN X 1 NN X 2 NN X 3 city and village incomes of X 1 resident X 3 X 1 X 2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline System", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In this paper, we focus on using contextual information to help the TAT model perform contextdependent rule selection. We consider the rule selection task as a multi-class classification task: for a source syntactic tree T , each corresponding target string e is a label. Thus during decoding, when a TAT T , e is selected, T is classified into label e , actually.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The model", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "A good way to solve the classification problem is the maximum entropy approach:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The model", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "P rs ( e| T , T (X k )) = (3) exp[ i \u03bb i h i ( e, C( T ), T (X k ))] e exp[ i \u03bb i h i ( e , C( T ), T (X k ))]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The model", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where T and e are the source tree and target string of a TAT, respectively. h i is a binary feature functions and \u03bb i is the feature weight of h i . C( T ) defines local contextual information of T . X k is a nonterminal in the source tree T , where k is an index. T (X k ) is the source sub-tree covered by X k .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The model", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The advantage of the MERS model is that it uses rich contextual information to compute posterior probability for e given T . However, the translation probabilities and lexical weights in Lynx ignore these information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The model", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Note that for each ambiguous source tree, we build a MERS model. That means, if there are N source trees extracted from the training corpus are ambiguous (the source tree which corresponds to multiple translations), thus for each ambiguous source tree", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The model", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "T i (i = 1, ..., N ), a MERS model M i (i = 1, ..., N )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The model", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "is built. Since a source tree may correspond to several hundreds of target translations at most, the feature space of a MERS model is not prohibitively large. Thus the complexity for training a MERS model is low.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The model", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Let T , e be a translation rule in the TAT model. We use f ( T ) to represent the source phrase covered by T . To build a MERS model for the source tree T , we explore various features listed below.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Definition", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "These features are defined on source words. Specifically, there are two kinds of lexical features: external features f \u22121 and f +1 , which are the source words immediately to the left and right of f ( T ), respectively; internal features f L (T (X k )) and f R (T (X k )), which are the left most and right most boundary words of the source phrase covered by T (X k ), respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexical Features (LF)", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "See Figure 5 (a) for illustration. In this example,", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 4, |
|
"end": 13, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Lexical Features (LF)", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "f \u22121 =t\u00edg\u0101o, f +1 =zh\u00ecz\u00e0o, f L (T (X 1 ))=g\u014dngy\u00e8, f R (T (X 1 ))=ch\u01cenp\u01d0n.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexical Features (LF)", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "These features are the POS tags of the source words defined in the lexical features: spectively. POS tags can generalize over all training examples.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parts-of-speech (POS) Features (POSF)", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "P \u22121 , P +1 , P L (T (X k )), P R (T (X k )) are the POS tags of f \u22121 , f +1 , f L (T (X k )), f R (T (X k )),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parts-of-speech (POS) Features (POSF)", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "Figure 5 (b) shows POS features.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parts-of-speech (POS) Features (POSF)", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "P \u22121 =VV, P +1 =NN, P L (T (X 1 ))=NN, P R (T (X 1 ))=NN.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parts-of-speech (POS) Features (POSF)", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "These features are the length of the source phrase f (T (X k )) covered by T (X k ). In Liu's TAT model, the knowledge learned from a short span can be used for a larger span. This is not reliable. Thus we use span features to allow the MERS model to learn a preference for short or large span.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Span Features (SPF)", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "In Figure 5 (c), the span of X 1 is 2.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 11, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Span Features (SPF)", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "The parent node of T in the parser tree of the source sentence. The same source sub-tree may have different parent nodes in different training examples. Therefore, this feature may provide information for distinguishing source sub-trees. Figure 5 (d) shows that the parent is a NP node.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 238, |
|
"end": 250, |
|
"text": "Figure 5 (d)", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Parent Feature (PF)", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "The siblings of the root of T . This feature considers neighboring nodes which share the same parent node.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sibling Features (SBF)", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "In Figure 5 (e), the source tree has one sibling node NPB.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 11, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sibling Features (SBF)", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "Those features make use of rich information around a rule, including the contextual information of a rule and the information of sub-trees covered by nonterminals. They are never used in Liu's TAT model. Figure 5 shows features for a partially lexicalized source tree. Furthermore, we also build MERS models for lexicalized and unlexicalized source trees. Note that for lexicalized tree, features do not include the information of sub-trees since there is no nonterminals.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 204, |
|
"end": 212, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sibling Features (SBF)", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "The features can be easily obtained by modifying the TAT extraction algorithm described in (Liu et al., 2006) . When a TAT is extracted from a word-aligned, source-parsed parallel sentence, we just record the contextual features and the features of the sub-trees. Then we use the toolkit implemented by Zhang (2004) to train MERS models for the ambiguous source syntactic trees separately. We set the iteration number to 100 and Gaussian prior to 1.", |
|
"cite_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 109, |
|
"text": "(Liu et al., 2006)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sibling Features (SBF)", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "We integrate the MERS models into the TAT model during the translation of each source sentence. Thus the MERS models can help the decoder perform context-dependent rule selection during decoding. For integration, we add two new features into the log-linear translation model:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Integrating the MERS Models into the Translation Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 P rs ( e| T , T (X k ))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Integrating the MERS Models into the Translation Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": ". This feature is computed by the MERS model according to equation 3, which gives a probability that the model selecting a target-side e given an ambiguous sourceside T , considering rich contextual information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Integrating the MERS Models into the Translation Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 P ap = exp(1). During decoding, if a source tree has multiple translations, this feature is set to exp(1), otherwise it is set to exp(0). Since the MERS models are only built for ambiguous source trees, the first feature P rs ( e| T , T (X k )) for non-ambiguous source tree will be set to 1.0. Therefore, the decoder will prefer to use non-ambiguous TATs. However, nonambiguous TATs usually occur only once in the training corpus, which are not reliable. Thus we use this feature to reward ambiguous TATs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Integrating the MERS Models into the Translation Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The advantage of our integration is that we need not change the main decoding algorithm of Lynx. Furthermore, the weights of the new features can be trained together with other features of the translation model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Integrating the MERS Models into the Translation Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We carry out experiments on Chinese-to-English translation. The training corpus is the FBIS corpus, which contains 239k sentence pairs with 6.9M Chinese words and 8.9M English words. For the language model, we use SRI Language Modeling Toolkit (Stolcke, 2002) with modified Kneser-Ney smoothing (Chen and Goodman, 1998) Papineni et al., 2002) , as calculated by mteval-v11b.pl with case-insensitive matching of n-grams, where n = 4.", |
|
"cite_spans": [ |
|
{ |
|
"start": 244, |
|
"end": 259, |
|
"text": "(Stolcke, 2002)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 295, |
|
"end": 319, |
|
"text": "(Chen and Goodman, 1998)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 320, |
|
"end": 342, |
|
"text": "Papineni et al., 2002)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpus", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "To train the translation model, we first run GIZA++ (Och and Ney, 2000) to obtain word alignment in both translation directions. Then the word alignment is refined by performing \"grow-diag-final\" method (Koehn et al., 2003) . We use a Chinese parser developed by Deyi Xiong (Xiong et al., 2005) to parse the Chinese sentences of the training corpus. Our TAT extraction algorithm is similar to Liu et al. (2006) , except that we make some tiny modifications to extract contextual features for MERS models. To extract TAT, we set the maximum height of the source sub-tree to h = 3, the maximum number of direct descendants of a node of sub-tree to c = 5. See (Liu et al., 2006) for specific definitions of these parameters. Table 1 shows statistical information of TATs which are filtered by the two test sets. For each type (lexicalized, partially lexicalized, unlexicalized) of TATs, a great portion of the source trees are ambiguous. The number of ambiguous source trees ac-counts for 78.34% of the total source trees. This indicates that the TAT model faces serious rule selection problem during decoding.", |
|
"cite_spans": [ |
|
{ |
|
"start": 52, |
|
"end": 71, |
|
"text": "(Och and Ney, 2000)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 203, |
|
"end": 223, |
|
"text": "(Koehn et al., 2003)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 250, |
|
"end": 294, |
|
"text": "developed by Deyi Xiong (Xiong et al., 2005)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 404, |
|
"end": 410, |
|
"text": "(2006)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 657, |
|
"end": 675, |
|
"text": "(Liu et al., 2006)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 722, |
|
"end": 729, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We use Lynx as the baseline system. Then the MERS models are incorporated into Lynx, and the system is called Lynx+MERS. To run the decoder, Lynx and Lynx+MERS share the same settings: tatTable-limit=30, tatTable-threshold=0, stack-limit=100, stack-threshold=0.00001. The meanings of the pruning parameters are the same to Liu et al. (2006) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 323, |
|
"end": 340, |
|
"text": "Liu et al. (2006)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We perform minimum error rate training (Och, 2003) to tune the feature weights for the log-linear model to maximize the systems's BLEU score on the development set. The weights are shown in Table 2 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 39, |
|
"end": 50, |
|
"text": "(Och, 2003)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 190, |
|
"end": 197, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "These weights are then used to run Lynx and Lynx+MERS on the test sets. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "The baseline system only uses four features for rule selection: the translation probabilities P ( e| T ) and P ( T | e); and the lexical weights P w ( e| T ) and P w ( T | e). These features are estimated on the training corpus by the maximum likelihood approach, which does not allow the decoder to perform a context dependent rule selection. Although Lynx uses language model as feature, the n-gram language model only considers the left and right n-1 neighboring target words. The MERS models combines rich contextual information as features to help the decoder perform rule selection. Table 4 shows the effect of different feature sets. We test two classes of feature sets: the single feature (the top four rows of Table 4 ) and the combination of features (the bottom five rows of Table 4). For the single feature set, the POS tags are the most useful and stable features. Using this feature, Lynx+MERS achieves improvements on both the test sets. The reason is that POS tags can be generalized over all training examples, which can alleviate the data sparseness problem.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 589, |
|
"end": 596, |
|
"text": "Table 4", |
|
"ref_id": "TABREF5" |
|
}, |
|
{ |
|
"start": 719, |
|
"end": 726, |
|
"text": "Table 4", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Although we find that some single features may hurt the BLEU score, they are useful in combination of features. This is because one of the strengths of the maximum entropy model is that it can incorporate various features to perform classification. Therefore, using all features defined in Section 3.2, we obtain statistically significant improvements (the last row of Table 4 ). In order to know how the MERS models improve translation quality, we inspect the 1-best outputs of Lynx and Lynx+MERS. We find that the first way that the MERS models help the decoder is that they can perform better selection for words or phrases, similar to the effect of WSD or PSD. This is because that lexicalized and partially lexicalized TAT contains terminals. following examples:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 369, |
|
"end": 376, |
|
"text": "Table 4", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "\u2022 Source:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "\u2022 Reference: Malta is located in southern Europe", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "\u2022 Lynx: Malta in southern Europe", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "\u2022 Lynx+MERS: Malta is located in southern Europe Here the Chinese word \" \" is incorrectly translated into \"in\" by the baseline system. Lynx+MERS produces the correct translation \"is located in\". That is because, the MERS model considers more contextual information for rule selection. In the MERS model, P rs (in| ) = 0.09, which is smaller than P rs (is located in| ) = 0.14. Therefore, the MERS model prefers the translation \"is located in\". Note that here the source tree (VV ) is lexicalized, and the role of the MERS model is actually the same as WSD.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "The second way that the MERS models help the decoder is that they can perform better phrase reorderings. Considering the following examples: The syntactic tree of the Chinese phrase \" \" is shown in Figure 6 . However, there are two TATs which can be applied to the source tree, as shown in Figure 7 . The baseline system selects the left TAT and produces a monotone translation of the subtrees \"X 1 :PP\" and \"X 2 :NPB\". However, Lynx+MERS uses the right TAT and performs correct phrase reordering by swapping the two source phrases. Here the source tree is partially lexicalized, and both the contextual information and the information of sub-trees covered by nonterminals are considered by the MERS model.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 198, |
|
"end": 206, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 290, |
|
"end": 298, |
|
"text": "Figure 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "In this paper, we propose a maximum entropy based rule selection model for syntax-based SMT. We use two kinds information as features: the localcontextual information of a rule, the information of sub-trees matched by nonterminals in a rule. During decoding, these features allow the decoder to perform a context-dependent rule selection. However, this information is never used in most of the current syntax-based SMT models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The advantage of the MERS model is that it can help the decoder not only perform lexical selection, but also phrase reorderings. We demonstrate one way to incorporate the MERS models into a stateof-the-art linguistically syntax-based SMT model, the tree-to-string alignment model. Experiments show that by incorporating the MERS models, the baseline system achieves statistically significant improvements.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We find that rich contextual information can improve translation quality for a syntax-based SMT system. In future, we will explore more sophisticated features for the MERS model. Moreover, we will test the performance of the MERS model on large scale corpus. Figure 6 : Syntactic tree of the source phrase \" \". Figure 7 : TATs which can be used for the source phrase \" \".", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 259, |
|
"end": 267, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 311, |
|
"end": 319, |
|
"text": "Figure 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "NP DNP PP X 1 DEG NPB X 2 NP DNP PP X 1 DEG NPB X 2 X 1 X 2 X 2 X 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank Yajuan Lv for her valuable suggestions. This work was supported by the National Natural Science Foundation of China (NO. 60573188 and 60736014), and the High Technology Research and Development Program of China (NO. 2006AA010108).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": " Linguistics, pages 33-40. Stanley F. Chen and Joshua Goodman. 1998 2004. Maximum entropy modeling toolkit for python and c++. available at http://homepages.inf.ed.ac.uk/s0450736/maxent toolkit.html.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1, |
|
"end": 67, |
|
"text": "Linguistics, pages 33-40. Stanley F. Chen and Joshua Goodman. 1998", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Meeting of the Association for Computational", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "How phrase sense disambiguation outperforms word sense disambiguation for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Marine", |
|
"middle": [], |
|
"last": "Carpuat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dekai", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "11th Conference on Theoretical and Methodological Issues in Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "43--52", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marine Carpuat and Dekai Wu. 2007a. How phrase sense disambiguation outperforms word sense disam- biguation for statistical machine translation. In 11th Conference on Theoretical and Methodological Issues in Machine Translation, pages 43-52.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Improving statistical machine translation using word sense disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Marine", |
|
"middle": [], |
|
"last": "Carpuat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dekai", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of EMNLP-CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "61--72", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marine Carpuat and Dekai Wu. 2007b. Improving sta- tistical machine translation using word sense disam- biguation. In Proceedings of EMNLP-CoNLL 2007, pages 61-72.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Word sense disambiguation improves statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Yee", |
|
"middle": [], |
|
"last": "Seng Chan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hwee Tou", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Chiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 45th Annual", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yee Seng Chan, Hwee Tou Ng, and David Chiang. 2007. Word sense disambiguation improves statistical ma- chine translation. In Proceedings of the 45th Annual", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF1": { |
|
"text": "Training examples for rules inFigure 1", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"text": "city and village resident continued to grow", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF3": { |
|
"text": "Word-aligned, source-parsed training example.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF4": { |
|
"text": "TATs learned from the training example in Figure 3.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF5": { |
|
"text": "Illustration of features of the MERS model. The source tree of the TAT is DNP(NP X 1 ) (DEG de) . Gray nodes denote information included in the feature.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"content": "<table/>", |
|
"num": null, |
|
"type_str": "table", |
|
"html": null, |
|
"text": "Feature weights obtained by minimum error rate training on the development set. The first 8 features are used by Lynx. TP=TAT penalty, WP=word penalty, AP=ambiguous TAT penalty. Note that in fact, the positive weight for WP and AP indicate a reward. the training corpus and the Xinhua portion of the Gigaword corpus, respectively. NIST MT 2002 test set is used as the development set. NIST MT 2003 and NIST MT 2005 test sets are used as the test sets." |
|
}, |
|
"TABREF2": { |
|
"content": "<table><tr><td>shows the</td></tr></table>", |
|
"num": null, |
|
"type_str": "table", |
|
"html": null, |
|
"text": "" |
|
}, |
|
"TABREF3": { |
|
"content": "<table/>", |
|
"num": null, |
|
"type_str": "table", |
|
"html": null, |
|
"text": "BLEU-4 scores (case-insensitive) on the test sets." |
|
}, |
|
"TABREF5": { |
|
"content": "<table/>", |
|
"num": null, |
|
"type_str": "table", |
|
"html": null, |
|
"text": "BLEU-4 scores on different feature sets." |
|
}, |
|
"TABREF6": { |
|
"content": "<table><tr><td>\u2022 Source:</td><td>[</td><td>] 1 [</td><td>] 2</td></tr><tr><td>...</td><td/><td/><td/></tr><tr><td colspan=\"4\">\u2022 Lynx+MERS: According to the [development</td></tr><tr><td colspan=\"3\">strategy] 2 [in the Chinese market] 1</td><td/></tr></table>", |
|
"num": null, |
|
"type_str": "table", |
|
"html": null, |
|
"text": "Reference: According to its [development strategy] 2 [in the Chinese market] 1 ... \u2022 Lynx: Accordance with [the Chinese market] 1 [development strategy] 2 ..." |
|
} |
|
} |
|
} |
|
} |