|
{ |
|
"paper_id": "P10-1017", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T09:20:11.845228Z" |
|
}, |
|
"title": "Hierarchical Search for Word Alignment", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Riesa", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Information Sciences Institute Viterbi School of Engineering University of Southern California", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Marcu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Information Sciences Institute Viterbi School of Engineering University of Southern California", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We present a simple yet powerful hierarchical search algorithm for automatic word alignment. Our algorithm induces a forest of alignments from which we can efficiently extract a ranked k-best list. We score a given alignment within the forest with a flexible, linear discriminative model incorporating hundreds of features, and trained on a relatively small amount of annotated data. We report results on Arabic-English word alignment and translation tasks. Our model outperforms a GIZA++ Model-4 baseline by 6.3 points in F-measure, yielding a 1.1 BLEU score increase over a state-of-the-art syntax-based machine translation system.", |
|
"pdf_parse": { |
|
"paper_id": "P10-1017", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We present a simple yet powerful hierarchical search algorithm for automatic word alignment. Our algorithm induces a forest of alignments from which we can efficiently extract a ranked k-best list. We score a given alignment within the forest with a flexible, linear discriminative model incorporating hundreds of features, and trained on a relatively small amount of annotated data. We report results on Arabic-English word alignment and translation tasks. Our model outperforms a GIZA++ Model-4 baseline by 6.3 points in F-measure, yielding a 1.1 BLEU score increase over a state-of-the-art syntax-based machine translation system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Automatic word alignment is generally accepted as a first step in training any statistical machine translation system. It is a vital prerequisite for generating translation tables, phrase tables, or syntactic transformation rules. Generative alignment models like IBM Model-4 (Brown et al., 1993) have been in wide use for over 15 years, and while not perfect (see Figure 1 ), they are completely unsupervised, requiring no annotated training data to learn alignments that have powered many current state-of-the-art translation system.", |
|
"cite_spans": [ |
|
{ |
|
"start": 268, |
|
"end": 296, |
|
"text": "Model-4 (Brown et al., 1993)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 365, |
|
"end": 373, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Today, there exist human-annotated alignments and an abundance of other information for many language pairs potentially useful for inducing accurate alignments. How can we take advantage of all of this data at our fingertips? Using feature functions that encode extra information is one good way. Unfortunately, as Moore (2005) points out, it is usually difficult to extend a given generative model with feature functions without changing the entire generative story. This difficulty has motivated much recent work in discriminative modeling for word alignment (Moore, 2005; Ittycheriah and Roukos, 2005; Liu et al., 2005; Taskar et al., 2005; Blunsom and Cohn, 2006; Lacoste-Julien et al., 2006; Moore et al., 2006) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 315, |
|
"end": 327, |
|
"text": "Moore (2005)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 561, |
|
"end": 574, |
|
"text": "(Moore, 2005;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 575, |
|
"end": 604, |
|
"text": "Ittycheriah and Roukos, 2005;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 605, |
|
"end": 622, |
|
"text": "Liu et al., 2005;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 623, |
|
"end": 643, |
|
"text": "Taskar et al., 2005;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 644, |
|
"end": 667, |
|
"text": "Blunsom and Cohn, 2006;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 668, |
|
"end": 696, |
|
"text": "Lacoste-Julien et al., 2006;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 697, |
|
"end": 716, |
|
"text": "Moore et al., 2006)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We present in this paper a discriminative alignment model trained on relatively little data, with a simple, yet powerful hierarchical search procedure. We borrow ideas from both k-best parsing (Klein and Manning, 2001; Huang and Chiang, 2005; Huang, 2008) and forest-based, and hierarchical phrase-based translation (Huang and Chiang, 2007; Chiang, 2007) , and apply them to word alignment.", |
|
"cite_spans": [ |
|
{ |
|
"start": 193, |
|
"end": 218, |
|
"text": "(Klein and Manning, 2001;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 219, |
|
"end": 242, |
|
"text": "Huang and Chiang, 2005;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 243, |
|
"end": 255, |
|
"text": "Huang, 2008)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 316, |
|
"end": 340, |
|
"text": "(Huang and Chiang, 2007;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 341, |
|
"end": 354, |
|
"text": "Chiang, 2007)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Using a foreign string and an English parse tree as input, we formulate a bottom-up search on the parse tree, with the structure of the tree as a backbone for building a hypergraph of possible alignments. Our algorithm yields a forest of Figure 2 : Example of approximate search through a hypergraph with beam size = 5. Each black square implies a partial alignment. Each partial alignment at each node is ranked according to its model score. In this figure, we see that the partial alignment implied by the 1-best hypothesis at the leftmost NP node is constructed by composing the best hypothesis at the terminal node labeled \"the\" and the 2ndbest hypothesis at the terminal node labeled \"man\". (We ignore terminal nodes in this toy example.) Hypotheses at the root node imply full alignment structures.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 238, |
|
"end": 246, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "word alignments, from which we can efficiently extract the k-best. We handle an arbitrary number of features, compute them efficiently, and score alignments using a linear model. We train the parameters of the model using averaged perceptron (Collins, 2002) modified for structured outputs, but can easily fit into a max-margin or related framework. Finally, we use relatively little training data to achieve accurate word alignments. Our model can generate arbitrary alignments and learn from arbitrary gold alignments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 242, |
|
"end": 257, |
|
"text": "(Collins, 2002)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Algorithm input The input to our alignment algorithm is a sentence-pair (e n 1 , f m 1 ) and a parse tree over one of the input sentences. In this work, we parse our English data, and for each sentence E = e n 1 , let T be its syntactic parse. To generate parse trees, we use the Berkeley parser (Petrov et al., 2006) , and use Collins head rules (Collins, 2003) to head-out binarize each tree.", |
|
"cite_spans": [ |
|
{ |
|
"start": 296, |
|
"end": 317, |
|
"text": "(Petrov et al., 2006)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 347, |
|
"end": 362, |
|
"text": "(Collins, 2003)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Alignment as a Hypergraph", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Overview We present a brief overview here and delve deeper in Section 2.1. Word alignments are built bottom-up on the parse tree. Each node v in the tree holds partial alignments sorted by score. In this example, k = 3. Each box represents the combination of two partial alignments to create a larger one. The score in each box is the sum of the scores of the child alignments plus a combination cost.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Alignment as a Hypergraph", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Each partial alignment comprises the columns of the alignment matrix for the e-words spanned by v, and each is scored by a linear combination of feature functions. See Figure 2 for a small example.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 168, |
|
"end": 176, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Word Alignment as a Hypergraph", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Initial partial alignments are enumerated and scored at preterminal nodes, each spanning a single column of the word alignment matrix. To speed up search, we can prune at each node, keeping a beam of size k. In the diagram depicted in Figure 2 , the beam is size k = 5.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 235, |
|
"end": 243, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Word Alignment as a Hypergraph", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "From here, we traverse the tree nodes bottomup, combining partial alignments from child nodes until we have constructed a single full alignment at the root node of the tree. If we are interested in the k-best, we continue to populate the root node until we have k alignments. 1 We use one set of feature functions for preterminal nodes, and another set for nonterminal nodes. This is analogous to local and nonlocal feature functions for parse-reranking used by Huang (2008) . Using nonlocal features at a nonterminal node emits a combination cost for composing a set of child partial alignments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 276, |
|
"end": 277, |
|
"text": "1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 462, |
|
"end": 474, |
|
"text": "Huang (2008)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Alignment as a Hypergraph", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Because combination costs come into play, we use cube pruning (Chiang, 2007) to approximate the k-best combinations at some nonterminal node v. Inference is exact when only local features are used.", |
|
"cite_spans": [ |
|
{ |
|
"start": 62, |
|
"end": 76, |
|
"text": "(Chiang, 2007)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Alignment as a Hypergraph", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Assumptions There are certain assumptions related to our search algorithm that we must make: 1 We use approximate dynamic programming to store alignments, keeping only scored lists of pointers to initial single-column spans. Each item in the list is a derivation that implies a partial alignment.", |
|
"cite_spans": [ |
|
{ |
|
"start": 93, |
|
"end": 94, |
|
"text": "1", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Alignment as a Hypergraph", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "(1) that using the structure of 1-best English syntactic parse trees is a reasonable way to frame and drive our search, and (2) that F-measure approximately decomposes over hyperedges.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Alignment as a Hypergraph", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We perform an oracle experiment to validate these assumptions. We find the oracle for a given (T ,e, f ) triple by proceeding through our search algorithm, forcing ourselves to always select correct links with respect to the gold alignment when possible, breaking ties arbitrarily. The the F 1 score of our oracle alignment is 98.8%, given this \"perfect\" model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Alignment as a Hypergraph", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Initial alignments We can construct a word alignment hierarchically, bottom-up, by making use of the structure inherent in syntactic parse trees. We can think of building a word alignment as filling in an M \u00d7N matrix ( Figure 1 ), and we begin by visiting each preterminal node in the tree. Each of these nodes spans a single e word. (Line 2 in Algorithm 1).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 219, |
|
"end": 227, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Hierarchical search", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "From here we can assign links from each e word to zero or more f words (Lines 6-14). At this level of the tree the span size is 1, and the partial alignment we have made spans a single column of the matrix. We can make many such partial alignments depending on the links selected. Lines 5 through 9 of Algorithm 1 enumerate either the null alignment, single-link alignments, or two-link alignments. Each partial alignment is scored and stored in a sorted heap (Lines 9 and 13).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical search", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "In practice enumerating all two-link alignments can be prohibitive for long sentence pairs; we set a practical limit and score only pairwise combina- A k-best list of alignments over e n 1 and f m 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical search", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "1 function A\uf76c\uf769\uf767\uf76e(e n 1 , f m 1 , T ) 2 for v \u2208 T in bottom-up order do 3 \u03b1 v \u2190 \u2205 4 if \uf769\uf773-P\uf772\uf765\uf774\uf765\uf772\uf76d\uf769\uf76e\uf761\uf76cN\uf76f\uf764\uf765(v) then 5 i \u2190 index-of(v) 6 for j = 0 to m do 7 links \u2190 (i, j) 8 score \u2190 w \u2022 h(links, v, e n 1 , f m 1 ) 9 P\uf775\uf773\uf768(\u03b1 v , score, links , k ) 10 for k = j + 1 to m do 11 links \u2190 (i, j), (i, k) 12 score \u2190 w \u2022 h(links, v, e n 1 , f m 1 ) 13 P\uf775\uf773\uf768(\u03b1 v , score, links , k ) 14 end 15 end 16 else 17 \u03b1 v \u2190 G\uf772\uf76f\uf777S\uf770\uf761\uf76e(children(v), k) 18 end 19 end 20 end 21 function G\uf772\uf76f\uf777S\uf770\uf761\uf76e( u 1 , u 2 , k) 22 return C\uf775\uf762\uf765P\uf772\uf775\uf76e\uf769\uf76e\uf767( \u03b1 u 1 , \u03b1 u 2 , k,w,h) 23 end", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical search", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "tions of the top n = max | f | 2 , 10 scoring singlelink alignments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical search", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We limit the number of total partial alignments \u03b1 v kept at each node to k. If at any time we wish to push onto the heap a new partial alignment when the heap is full, we pop the current worst off the heap and replace it with our new partial alignment if its score is better than the current worst.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical search", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Building the hypergraph We now visit internal nodes (Line 16) in the tree in bottom-up order. At each nonterminal node v we wish to combine the partial alignments of its children u 1 , . . . , u c . We use cube pruning (Chiang, 2007; Huang and Chiang, 2007) to select the k-best combinations of the partial alignments of u 1 , . . . , u c (Line 19). Note that Algorithm 1 assumes a binary tree 2 , but is not necessary. In the general case, cube pruning will operate on a d-dimensional hypercube, where d is the branching factor of node v. We cannot enumerate and score every possibility; without the cube pruning approximation, we will have k c possible combinations at each node, exploding the search space exponentially. Figure 3 depicts how we select the top-k alignments at a node v from its children u 1 , u 2 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 219, |
|
"end": 233, |
|
"text": "(Chiang, 2007;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 234, |
|
"end": 257, |
|
"text": "Huang and Chiang, 2007)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 724, |
|
"end": 732, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Hierarchical search", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We incorporate all our new features into a linear model and learn weights for each using the online averaged perceptron algorithm (Collins, 2002) with a few modifications for structured outputs inspired by Chiang et al. (2008) . We define:", |
|
"cite_spans": [ |
|
{ |
|
"start": 130, |
|
"end": 145, |
|
"text": "(Collins, 2002)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 206, |
|
"end": 226, |
|
"text": "Chiang et al. (2008)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discriminative training", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "i n i n !\" !\" . . . ...", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discriminative training", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "... Figure 5 : A common problem with GIZA++ Model 4 alignments is a weak distortion model. The second English \"in\" is aligned to the wrong Arabic token. Circles show the gold alignment.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 4, |
|
"end": 12, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discriminative training", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03b3(y) = (y i , y) + w \u2022 (h(y i ) \u2212 h(y))", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Discriminative training", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "where (y i ,y) is a loss function describing how bad it is to guess y when the correct answer is y i . In our case, we define (y i ,y) as 1\u2212F 1 (y i ,y). We select the oracle alignment according to:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discriminative training", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "y + = arg min y\u2208\uf763\uf761\uf76e\uf764(x) \u03b3(y)", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Discriminative training", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "where \uf763\uf761\uf76e\uf764(x) is a set of hypothesis alignments generated from input x. Instead of the traditional oracle, which is calculated solely with respect to the loss (y i ,y), we choose the oracle that jointly minimizes the loss and the difference in model score to the true alignment. Note that Equation 2 is equivalent to maximizing the sum of the Fmeasure and model score of y:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discriminative training", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "y + = arg max y\u2208\uf763\uf761\uf76e\uf764(x) (F 1 (y i , y) + w \u2022 h(y))", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Discriminative training", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Let\u0177 be the 1-best alignment according to our model:\u0177 = arg max", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discriminative training", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "y\u2208\uf763\uf761\uf76e\uf764(x) w \u2022 h(y)", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Discriminative training", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Then, at each iteration our weight update is:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discriminative training", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "w \u2190 w + \u03b7(h(y + ) \u2212 h(\u0177))", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Discriminative training", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "where \u03b7 is a learning rate parameter. 3 We find that this more conservative update gives rise to a much more stable search. After each iteration, we expect y + to get closer and closer to the true y i .", |
|
"cite_spans": [ |
|
{ |
|
"start": 38, |
|
"end": 39, |
|
"text": "3", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discriminative training", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Our simple, flexible linear model makes it easy to throw in many features, mapping a given complex alignment structure into a single high-dimensional feature vector. Our hierarchical search framework allows us to compute these features when needed, and affords us extra useful syntactic information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We use two classes of features: local and nonlocal. Huang (2008) defines a feature h to be local if and only if it can be factored among the local productions in a tree, and non-local otherwise. Analogously for alignments, our class of local features are those that can be factored among the local partial alignments competing to comprise a larger span of the matrix, and non-local otherwise. These features score a set of links and the words connected by them.", |
|
"cite_spans": [ |
|
{ |
|
"start": 52, |
|
"end": 64, |
|
"text": "Huang (2008)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Feature development Our features are inspired by analysis of patterns contained among our gold alignment data and automatically generated parse trees. We use both local lexical and nonlocal structural features as described below.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "These features fire on single-column spans.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Local features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 From the output of GIZA++ Model 4, we compute lexical probabilities p(e | f ) and p( f | e), as well as a fertility table \u03c6(e). From the fertility table, we fire features \u03c6 0 (e), \u03c6 1 (e), and \u03c6 2+ (e) when a word e is aligned to zero, one, or two or more words, respectively. Lexical probability features p(e | f ) and p( f | e) fire when a word e is aligned to a word f .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Local features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 Based on these features, we include a binary lexical-zero feature that fires if both p(e | f ) and p( f | e) are equal to zero for a given word pair (e, f ). Negative weights essentially penalize alignments with links never seen before in the Model 4 alignment, and positive weights encourage such links. We employ a separate instance of this feature for each English part-of-speech tag: p( f | e, t).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Local features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We learn a different feature weight for each. Figure 6 : Features PP-NP-head, NP-DT-head, and VP-VP-head fire on these tree-alignment patterns. For example, PP-NP-head fires exactly when the head of the PP is aligned to exactly the same f words as the head of it's sister NP. Table 1 : A sampling of learned weights for the lexical zero feature. Negative weights penalize links never seen before in a baseline alignment used to initialize lexical p(e | f ) and p( f | e) tables. Positive weights outright reward such links.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 46, |
|
"end": 54, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 276, |
|
"end": 283, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Local features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "than those likely to be function words (e.g. TO, RP, EX), where the use of such words is often radically different across languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Local features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 We also include a measure of distortion.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Local features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "This feature returns the distance to the diagonal of the matrix for any link in a partial alignment. If there is more than one link, we return the distance of the link farthest from the diagonal.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Local features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 As a lexical backoff, we include a tag probability feature, p(t | f ) that fires for some link (e, f ) if the part-of-speech tag of e is t.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Local features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The conditional probabilities in this table are computed from our parse trees and the baseline Model 4 alignments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Local features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 In cases where the lexical probabilities are too strong for the distortion feature to overcome (see Figure 5) , we develop the multiple-distortion feature. Although local features do not know the partial alignments at other spans, they do have access to the entire English sentence at every step because our input is constant. If some e exists more than once in e n 1 we fire this feature on all links containing word e, returning again the distance to the diagonal for that link. We learn a strong negative weight for this feature.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 102, |
|
"end": 111, |
|
"text": "Figure 5)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Local features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 We find that binary identity and punctuation-mismatch features are important. The binary identity feature fires if e = f , and proves useful for untranslated numbers, symbols, names, and punctuation in the data. Punctuation-mismatch fires on any link that causes nonpunctuation to be aligned to punctuation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Local features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Additionally, we include fine-grained versions of the lexical probability, fertility, and distortion features. These fire for for each link (e, f ) and partof-speech tag. That is, we learn a separate weight for each feature for each part-of-speech tag in our data. Given the tag of e, this affords the model the ability to pay more or less attention to the features described above depending on the tag given to e. ... English preposition structure commonly matches that of Arabic in our gold data. This family of features captures these observations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Local features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 We observe the Arabic prefix , transliterated w-and generally meaning and, to prepend to most any word in the lexicon, so we define features p \u00acw (e | f ) and p \u00acw ( f | e). If f begins with w-, we strip off the prefix and return the values of p(e | f ) and p( f | e). Otherwise, these features return 0.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Local features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 We also include analogous feature functions for several functional and pronominal prefixes and suffixes. 4", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Local features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "These features comprise the combination cost component of a partial alignment score and may fire when concatenating two partial alignments to create a larger span. Because these features can look into any two arbitrary subtrees, they are considered nonlocal features as defined by Huang (2008) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 281, |
|
"end": 293, |
|
"text": "Huang (2008)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Nonlocal features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 Features PP-NP-head, NP-DT-head, and VP-VP-head ( Figure 6 ) all exploit headwords on the parse tree. We observe English prepositions and determiners to often align to the headword of their sister. Likewise, we observe the head of a VP to align to the head of an immediate sister VP.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 52, |
|
"end": 60, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Nonlocal features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In Figure 4 , when the search arrives at the left-most NPB node, the NP-DT-head feature will fire given this structure and links over the span [the ... tests] . When search arrives at the second NPB node, it will fire given the structure and links over the span [the ... missle], but will not fire at the right-most NPB node.", |
|
"cite_spans": [ |
|
{ |
|
"start": 143, |
|
"end": 158, |
|
"text": "[the ... tests]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 11, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Nonlocal features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 Local lexical preference features compete with the headword features described above. However, we also introduce nonlocal lexicalized features for the most common types of English and foreign prepositions to also compete with these general headword features.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Nonlocal features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "PP features PP-of-prep, PP-from-prep, PPto-prep, PP-on-prep, and PP-in-prep fire at any PP whose left child is a preposition and right child is an NP. The head of the PP is one of the enumerated English prepositions and is aligned to any of the three most common foreign words to which it has also been observed aligned in the gold alignments. The last constraint on this pattern is that all words under the span of the sister NP, if aligned, must align to words following the foreign preposition. Figure 7 illustrates this pattern.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 498, |
|
"end": 506, |
|
"text": "Figure 7", |
|
"ref_id": "FIGREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Nonlocal features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 Finally, we have a tree-distance feature to avoid making too many many-to-one (from many English words to a single foreign word) links. This is a simplified version of and similar in spirit to the tree distance metric used in (DeNero and Klein, 2007) . For any pair of links (e i , f ) and (e j , f ) in which the e words differ but the f word is the same token in each, return the tree height of first common ancestor of e i and e j .", |
|
"cite_spans": [ |
|
{ |
|
"start": 240, |
|
"end": 252, |
|
"text": "Klein, 2007)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Nonlocal features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "This feature captures the intuition that it is much worse to align two English words at different ends of the tree to the same foreign word, than it is to align two English words under the same NP to the same foreign word.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Nonlocal features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "To see why a string distance feature that counts only the flat horizontal distance from e i to e j is not the best strategy, consider the following. We wish to align a determiner to the same f word as its sister head noun under the same NP. Now suppose there are several intermediate adjectives separating the determiner and noun. A string distance met-ric, with no knowledge of the relationship between determiner and noun will levy a much heavier penalty than its tree distance analog.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Nonlocal features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Recent work has shown the potential for syntactic information encoded in various ways to support inference of superior word alignments. Very recent work in word alignment has also started to report downstream effects on BLEU score. Cherry and Lin (2006) introduce soft syntactic ITG (Wu, 1997) constraints into a discriminative model, and use an ITG parser to constrain the search for a Viterbi alignment. Haghighi et al. (2009) confirm and extend these results, showing BLEU improvement for a hierarchical phrasebased MT system on a small Chinese corpus. As opposed to ITG, we use a linguistically motivated phrase-structure tree to drive our search and inform our model. And, unlike ITG-style approaches, our model can generate arbitrary alignments and learn from arbitrary gold alignments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 232, |
|
"end": 253, |
|
"text": "Cherry and Lin (2006)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 283, |
|
"end": 293, |
|
"text": "(Wu, 1997)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 406, |
|
"end": 428, |
|
"text": "Haghighi et al. (2009)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "DeNero and Klein (2007) refine the distortion model of an HMM aligner to reflect tree distance instead of string distance. Fossum et al. (2008) start with the output from GIZA++ Model-4 union, and focus on increasing precision by deleting links based on a linear discriminative model exposed to syntactic and lexical information. Fraser and Marcu (2007) take a semi-supervised approach to word alignment, using a small amount of gold data to further tune parameters of a headword-aware generative model. They show a significant improvement over a Model-4 union baseline on a very large corpus.", |
|
"cite_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 23, |
|
"text": "Klein (2007)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 123, |
|
"end": 143, |
|
"text": "Fossum et al. (2008)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 330, |
|
"end": 353, |
|
"text": "Fraser and Marcu (2007)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We evaluate our model and and resulting alignments on Arabic-English data against those induced by IBM Model-4 using GIZA++ (Och and Ney, 2003) with both the union and grow-diagfinal heuristics. We use 1,000 sentence pairs and gold alignments from LDC2006E86 to train model parameters: 800 sentences for training, 100 for testing, and 100 as a second held-out development set to decide when to stop perceptron training. We also align the test data using GIZA++ 5 along with 50 million words of English. 5 We use a standard training procedure: 5 iterations of Model-1, 5 iterations of HMM, 3 iterations of Model-3, and 3 iterations of Model-4. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 124, |
|
"end": 143, |
|
"text": "(Och and Ney, 2003)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 503, |
|
"end": 504, |
|
"text": "5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We empirically choose our beam size k from the results of a series of experiments, setting k=1, 2, 4, 8, 16, 32, and 64. We find setting k = 16 to yield the highest accuracy on our held-out test data. Using wider beams results in higher F-measure on training data, but those gains do not translate into higher accuracy on held-out data. The first three columns of Table 2 show the balanced F-measure, Precision, and Recall of our alignments versus the two GIZA++ Model-4 baselines. We report an F-measure 8.6 points over Model-4 union, and 6.3 points over Model-4 growdiag-final. Table 2 : F-measure, Precision, Recall, the resulting BLEU score, and number of unknown words on a held-out test corpus for three types of alignments. BLEU scores are case-insensitive IBM BLEU. We show a 1.1 BLEU increase over the strongest baseline, Model-4 grow-diag-final. This is statistically significant at the p < 0.01 level. Figure 8 shows the stability of the search procedure over ten random restarts of parallel averaged perceptron training with 40 CPUs. Training examples are randomized at each epoch, leading to slight variations in learning curves over time but all converge into the same general neighborhood. Figure 9 shows the robustness of the model to initial alignments used to derive lexical features p(e | f ) and p( f | e). In addition to IBM Model 4, we experiment with alignments from Model 1 and the HMM model. In each case, we significantly outperform the baseline GIZA++ Model 4 alignments on a heldout test set.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 364, |
|
"end": 371, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 580, |
|
"end": 587, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 913, |
|
"end": 921, |
|
"text": "Figure 8", |
|
"ref_id": "FIGREF8" |
|
}, |
|
{ |
|
"start": 1205, |
|
"end": 1213, |
|
"text": "Figure 9", |
|
"ref_id": "FIGREF9" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Alignment Quality", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "We align a corpus of 50 million words with GIZA++ Model-4, and extract translation rules from a 5.4 million word core subset. We align the same core subset with our trained hypergraph alignment model, and extract a second set of translation rules. For each set of translation rules, we train a machine translation system and decode a held-out test corpus for which we report results below.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "MT Experiments", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "We use a syntax-based translation system for these experiments. This system transforms Arabic strings into target English syntax trees Translation rules are extracted from (e-tree, f -string, alignment) triples as in (Galley et al., 2004; Galley et al., 2006) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 217, |
|
"end": 238, |
|
"text": "(Galley et al., 2004;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 239, |
|
"end": 259, |
|
"text": "Galley et al., 2006)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "MT Experiments", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "We use a randomized language model (similar to that of Talbot and Brants (2008) ) of 472 million English words. We tune the the parameters of the MT system on a held-out development corpus of 1,172 parallel sentences, and test on a heldout parallel corpus of 746 parallel sentences. Both corpora are drawn from the NIST 2004 and 2006 evaluation data, with no overlap at the document or segment level with our training data. Table 2 show the results of our MT experiments. Our hypergraph alignment algorithm allows us a 1.1 BLEU increase over the best baseline system, Model-4 grow-diag-final. This is statistically significant at the p < 0.01 level. We also report a 2.4 BLEU increase over a system trained with alignments from Model-4 union.", |
|
"cite_spans": [ |
|
{ |
|
"start": 55, |
|
"end": 79, |
|
"text": "Talbot and Brants (2008)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 424, |
|
"end": 431, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "MT Experiments", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "We have opened up the word alignment task to advances in hypergraph algorithms currently used in parsing and machine translation decoding. We treat word alignment as a parsing problem, and by taking advantage of English syntax and the hypergraph structure of our search algorithm, we report significant increases in both F-measure and BLEU score over standard baselines in use by most state-of-the-art MT systems today.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We find empirically that using binarized trees reduces search errors in cube pruning.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We set \u03b7 to 0.05 in our experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Affixes used by our model are currently: , , , ,, , , , . Others eitherwe did not experiment with, or seemed to provide no significant benefit, and are not included.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank our colleagues in the Natural Language Group at ISI for many meaningful discussions and the anonymous reviewers for their thoughtful suggestions. This research was supported by DARPA contract HR0011-06-C-0022 under subcontract to BBN Technologies, and a USC CREATE Fellowship to the first author.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Discriminative Word Alignment with Conditional Random Fields", |
|
"authors": [ |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Cohn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 44th Annual Meeting of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Phil Blunsom and Trevor Cohn. 2006. Discriminative Word Alignment with Conditional Random Fields. In Proceedings of the 44th Annual Meeting of the ACL. Sydney, Australia.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "The mathematics of statistical machine translation: Parameter estimation", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Brown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Della Pietra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Della", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Pietra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mercer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "Computational Linguistics", |
|
"volume": "19", |
|
"issue": "2", |
|
"pages": "263--312", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter F. Brown, Stephen A. Della Pietra, Vincent Della J. Pietra, and Robert L. Mercer. 1993. The mathe- matics of statistical machine translation: Parameter estimation. Computational Linguistics, 19(2):263- 312. MIT Press. Camrbidge, MA. USA.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Soft Syntactic Constraints for Word Alignment through Discriminative Training", |
|
"authors": [ |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Cherry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dekang", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 44th Annual Meeting of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Colin Cherry and Dekang Lin. 2006. Soft Syntactic Constraints for Word Alignment through Discrimi- native Training. In Proceedings of the 44th Annual Meeting of the ACL. Sydney, Australia.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Hierarchical phrase-based translation", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Chiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Computational Linguistics", |
|
"volume": "33", |
|
"issue": "2", |
|
"pages": "201--228", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Chiang. 2007. Hierarchical phrase-based trans- lation. Computational Linguistics. 33(2):201-228. MIT Press. Cambridge, MA. USA.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Online Large-Margin Training of Syntactic and Structural Translation Features", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Chiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuval", |
|
"middle": [], |
|
"last": "Marton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Resnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Chiang, Yuval Marton, and Philip Resnik. 2008. Online Large-Margin Training of Syntactic and Structural Translation Features. In Proceedings of EMNLP. Honolulu, HI. USA.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Head-Driven Statistical Models for Natural Language Parsing", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Computational Linguistics", |
|
"volume": "29", |
|
"issue": "4", |
|
"pages": "589--637", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Collins. 2003. Head-Driven Statistical Mod- els for Natural Language Parsing. Computational Linguistics. 29(4):589-637. MIT Press. Cam- bridge, MA. USA.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Discriminative training methods for hidden markov models: Theory and experiments with perceptron algorithms", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Collins 2002. Discriminative training meth- ods for hidden markov models: Theory and exper- iments with perceptron algorithms. In Proceedings of the Conference on Empirical Methods in Natural Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Tailoring Word Alignments to Syntactic Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Denero", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 45th Annual Meeting of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John DeNero and Dan Klein. 2007. Tailoring Word Alignments to Syntactic Machine Translation. In Proceedings of the 45th Annual Meeting of the ACL. Prague, Czech Republic.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Getting the Structure Right for Word Alignment: LEAF", |
|
"authors": [ |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Fraser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Marcu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of EMNLP-CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexander Fraser and Daniel Marcu. 2007. Getting the Structure Right for Word Alignment: LEAF. In Proceedings of EMNLP-CoNLL. Prague, Czech Re- public.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Using Syntax to Improve Word Alignment Precision for Syntax-Based Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Victoria", |
|
"middle": [], |
|
"last": "Fossum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Abney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the Third Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Victoria Fossum, Kevin Knight, and Steven Abney. 2008. Using Syntax to Improve Word Alignment Precision for Syntax-Based Machine Translation. In Proceedings of the Third Workshop on Statistical Machine Translation. Columbus, Ohio.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Parsing and Hypergraphs", |
|
"authors": [ |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the 7th International Workshop on Parsing Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dan Klein and Christopher D. Manning. 2001. Parsing and Hypergraphs. In Proceedings of the 7th Interna- tional Workshop on Parsing Technologies. Beijing, China.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Better Word Alignments with Supervised ITG Models", |
|
"authors": [ |
|
{ |
|
"first": "Aria", |
|
"middle": [], |
|
"last": "Haghighi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Blitzer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of ACL-IJCNLP 2009", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aria Haghighi, John Blitzer, and Dan Klein. 2009. Better Word Alignments with Supervised ITG Mod- els. In Proceedings of ACL-IJCNLP 2009. Singa- pore.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Better k-best Parsing", |
|
"authors": [ |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Chiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the 9th International Workshop on Parsing Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Liang Huang and David Chiang. 2005. Better k-best Parsing. In Proceedings of the 9th International Workshop on Parsing Technologies. Vancouver, BC. Canada.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Forest Rescoring: Faster Decoding with Integrated Language Models", |
|
"authors": [ |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Chiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 45th Annual Meeting of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Liang Huang and David Chiang. 2007. Forest Rescor- ing: Faster Decoding with Integrated Language Models. In Proceedings of the 45th Annual Meet- ing of the ACL. Prague, Czech Republic.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Forest Reranking: Discriminative Parsing with Non-Local Features", |
|
"authors": [ |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 46th Annual Meeting of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Liang Huang. 2008. Forest Reranking: Discriminative Parsing with Non-Local Features. In Proceedings of the 46th Annual Meeting of the ACL. Columbus, OH. USA.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "What's in a Translation Rule", |
|
"authors": [ |
|
{ |
|
"first": "Michel", |
|
"middle": [], |
|
"last": "Galley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Hopkins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Marcu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michel Galley, Mark Hopkins, Kevin Knight, and Daniel Marcu. 2004. What's in a Translation Rule? In Proceedings of NAACL.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Scalable Inference and Training of Context-Rich Syntactic Models", |
|
"authors": [ |
|
{ |
|
"first": "Michel", |
|
"middle": [], |
|
"last": "Galley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Graehl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Marcu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Deneefe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ignacio", |
|
"middle": [], |
|
"last": "Thayer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 44th Annual Meeting of the ACL. Sydney, Australia", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michel Galley, Jonathan Graehl, Kevin Knight, Daniel Marcu, Steve DeNeefe, Wei Wang, and Ignacio Thayer. 2006. Scalable Inference and Training of Context-Rich Syntactic Models In Proceedings of the 44th Annual Meeting of the ACL. Sydney, Aus- tralia.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "A maximum entropy word aligner for Arabic-English machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Abraham", |
|
"middle": [], |
|
"last": "Ittycheriah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of HLT-EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abraham Ittycheriah and Salim Roukos. 2005. A max- imum entropy word aligner for Arabic-English ma- chine translation. In Proceedings of HLT-EMNLP. Vancouver, BC. Canada.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Word alignment via Quadratic Assignment", |
|
"authors": [ |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Lacoste-Julien", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Taskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "Jordan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of HLT-EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Simon Lacoste-Julien, Ben Taskar, Dan Klein, and Michael I. Jordan. 2006. Word alignment via Quadratic Assignment. In Proceedings of HLT- EMNLP. New York, NY. USA.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Loglinear Models for Word Alignment", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shouxun", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the 43rd Annual Meeting of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang Liu, Qun Liu, and Shouxun Lin. 2005. Log- linear Models for Word Alignment In Proceedings of the 43rd Annual Meeting of the ACL. Ann Arbor, Michigan. USA.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "A Discriminative Framework for Word Alignment", |
|
"authors": [ |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Moore", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert C. Moore. 2005. A Discriminative Framework for Word Alignment. In Proceedings of EMNLP. Vancouver, BC. Canada.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Improved Discriminative Bilingual Word Alignment", |
|
"authors": [ |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Moore", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yih", |
|
"middle": [], |
|
"last": "Wen-Tau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Bode", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 44th Annual Meeting of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert C. Moore, Wen-tau Yih, and Andreas Bode. 2006. Improved Discriminative Bilingual Word Alignment In Proceedings of the 44th Annual Meet- ing of the ACL. Sydney, Australia.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "A Systematic Comparison of Various Statistical Alignment Models", |
|
"authors": [ |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Franz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Och", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Computational Linguistics", |
|
"volume": "29", |
|
"issue": "1", |
|
"pages": "19--52", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Franz Josef Och and Hermann Ney. 2003. A System- atic Comparison of Various Statistical Alignment Models. Computational Linguistics. 29(1):19-52. MIT Press. Cambridge, MA. USA.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Learning Accurate, Compact, and Interpretable Tree Annotation", |
|
"authors": [ |
|
{ |
|
"first": "Slav", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leon", |
|
"middle": [], |
|
"last": "Barrett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Romain", |
|
"middle": [], |
|
"last": "Thibaux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 44th Annual Meeting of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Slav Petrov, Leon Barrett, Romain Thibaux and Dan Klein 2006. Learning Accurate, Compact, and In- terpretable Tree Annotation In Proceedings of the 44th Annual Meeting of the ACL. Sydney, Australia.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "BLEU: A Method for Automatic Evaluation of Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Salim Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W-J", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th Annual Meeting of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, T. Ward, and W-J. Zhu. 2002. BLEU: A Method for Automatic Evalu- ation of Machine Translation In Proceedings of the 40th Annual Meeting of the ACL. Philadelphia, PA. USA.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "A Discriminative Matching Approach to Word Alignment", |
|
"authors": [ |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Taskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Lacoste-Julien", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of HLT-EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ben Taskar, Simon Lacoste-Julien, and Dan Klein. 2005. A Discriminative Matching Approach to Word Alignment. In Proceedings of HLT-EMNLP. Vancouver, BC. Canada.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Randomized Language Models via Perfect Hash Functions", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Talbot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thorsten", |
|
"middle": [], |
|
"last": "Brants", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of ACL-08: HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Talbot and Thorsten Brants. 2008. Random- ized Language Models via Perfect Hash Functions. In Proceedings of ACL-08: HLT. Columbus, OH. USA.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Stochastic inversion transduction grammars and bilingual parsing of parallel corpora", |
|
"authors": [ |
|
{ |
|
"first": "Dekai", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Computational Linguistics", |
|
"volume": "23", |
|
"issue": "3", |
|
"pages": "377--404", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dekai Wu. 1997. Stochastic inversion transduction grammars and bilingual parsing of parallel corpora. Computational Linguistics. 23(3):377-404. MIT Press. Cambridge, MA. USA.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF1": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Model-4 alignment vs. a gold standard. Circles represent links in a human-annotated alignment, and black boxes represent links in the Model-4 alignment. Bold gray boxes show links gained after fully connecting the alignment." |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Score the left corner alignment first. Assume it is the 1best. Numbers in the rest of the boxes are hidden at this point. Expand the frontier further.After this step we have our top k alignments." |
|
}, |
|
"FIGREF3": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Cube pruning with alignment hypotheses to select the top-k alignments at node v with children u 1 , u 2 ." |
|
}, |
|
"FIGREF4": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "e p r e v i o u s t e s t s h a v e b e e n l i m i t e" |
|
}, |
|
"FIGREF5": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Correct version of Figure 1 after hypergraph alignment. Subscripts on the nonterminal labels denote the branch containing the head word for that span." |
|
}, |
|
"FIGREF6": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "This figure depicts the tree/alignment structure for which the feature PP-from-prep fires. The English preposition \"from\" is aligned to Arabic word . Any aligned words in the span of the sister NP are aligned to words following ." |
|
}, |
|
"FIGREF8": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Learning curves for 10 random restarts over time for parallel averaged perceptron training. These plots show the current F-measure on the training set as time passes. Perceptron training here is quite stable, converging to the same general neighborhood each time." |
|
}, |
|
"FIGREF9": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Model robustness to the initial alignments from which the p(e | f ) and p( f | e) features are derived. The dotted line indicates the baseline accuracy of GIZA++ Model 4 alone." |
|
} |
|
} |
|
} |
|
} |