|
{ |
|
"paper_id": "C02-1010", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T12:18:05.407366Z" |
|
}, |
|
"title": "Structure Alignment Using Bilingual Chunking", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Beijing University of Posts and Telecomms", |
|
"location": { |
|
"postCode": "181#, 100876", |
|
"settlement": "Beijing", |
|
"region": "P.R.C" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Research", |
|
"location": { |
|
"addrLine": "Asia Beijing", |
|
"postCode": "100080", |
|
"region": "P.R.C" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Jin-Xia", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Research", |
|
"location": { |
|
"addrLine": "Asia Beijing", |
|
"postCode": "100080", |
|
"region": "P.R.C" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Chang-Ning", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Research", |
|
"location": { |
|
"addrLine": "Asia Beijing", |
|
"postCode": "100080", |
|
"region": "P.R.C" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "A new statistical method called \"bilingual chunking\" for structure alignment is proposed. Different with the existing approaches which align hierarchical structures like sub-trees, our method conducts alignment on chunks. The alignment is finished through a simultaneous bilingual chunking algorithm. Using the constrains of chunk correspondence between source language (SL) 1 and target language (TL), our algorithm can dramatically reduce search space, support time synchronous DP algorithm, and lead to highly consistent chunking. Furthermore, by unifying the POS tagging and chunking in the search process, our algorithm alleviates effectively the influence of POS tagging deficiency to the chunking result. The experimental results with English-Chinese structure alignment show that our model can produce 90% in precision for chunking, and 87% in precision for chunk alignment. \u00a1 This work was done while the author was visiting Microsoft Research Asia 1 In this paper, we take English-Chinese parallel text as example; it is relatively easy, however, to be extended to other language pairs.", |
|
"pdf_parse": { |
|
"paper_id": "C02-1010", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "A new statistical method called \"bilingual chunking\" for structure alignment is proposed. Different with the existing approaches which align hierarchical structures like sub-trees, our method conducts alignment on chunks. The alignment is finished through a simultaneous bilingual chunking algorithm. Using the constrains of chunk correspondence between source language (SL) 1 and target language (TL), our algorithm can dramatically reduce search space, support time synchronous DP algorithm, and lead to highly consistent chunking. Furthermore, by unifying the POS tagging and chunking in the search process, our algorithm alleviates effectively the influence of POS tagging deficiency to the chunking result. The experimental results with English-Chinese structure alignment show that our model can produce 90% in precision for chunking, and 87% in precision for chunk alignment. \u00a1 This work was done while the author was visiting Microsoft Research Asia 1 In this paper, we take English-Chinese parallel text as example; it is relatively easy, however, to be extended to other language pairs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "We address here the problem of structure alignment, which accepts as input a sentence pair, and produces as output the parsed structures of both sides with correspondences between them.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The structure alignment can be used to support machine translation and cross language information retrieval by providing extended phrase translation lexicon and translation templates.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The popular methods for structure alignment try to align hierarchical structures like sub-trees with parsing technology. However, the alignment accuracy cannot be guaranteed since no parser can handle all authentic sentences very well. Furthermore, the strategies which were usually used for structure alignment suffer from serious shortcomings. For instance, parse-to-parse matching which regards parsing and alignment as separate and successive procedures suffers from the inconsistency between grammars of different languages. Bilingual parsing which looks upon parsing and alignment as a simultaneous procedure needs an extra 'bilingual grammar'. It is, however, difficult to write a complex 'bilingual grammar'.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this paper, a new statistical method called \"bilingual chunking\" for structure alignment is proposed. Different with the existing approaches which align hierarchical structures like sub-trees, our method conducts alignment on chunks. The alignment is finished through a simultaneous bilingual chunking algorithm. Using the constrains of chunk correspondence between source language (SL) and target language (TL), our algorithm can dramatically reduce search space, support time synchronous DP algorithm, and lead to highly consistent chunking. Furthermore, by unifying the POS tagging and chunking in the search process, our algorithm alleviates effectively the influence of POS tagging deficiency to the chunking result.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The experimental results with English-Chinese structure alignment show that our model can produce 90% in precision for chunking, and 87% in precision for chunk alignment.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Most of the previous works conduct structure alignment with complex, hierarchical structures, such as phrase structures (e.g., Kaji, Kida & Morimoto, 1992) , or dependency structures (e.g., Matsumoto et al. 1993; Grishman, 1994; Meyers, Yanharber & Grishman 1996; Watanabe, Kurohashi & Aramaki 2000) . However, the mismatching between complex structures across languages and the poor parsing accuracy of the parser will hinder structure alignment algorithms from working out high accuracy results.", |
|
"cite_spans": [ |
|
{ |
|
"start": 127, |
|
"end": 155, |
|
"text": "Kaji, Kida & Morimoto, 1992)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 190, |
|
"end": 212, |
|
"text": "Matsumoto et al. 1993;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 213, |
|
"end": 228, |
|
"text": "Grishman, 1994;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 229, |
|
"end": 263, |
|
"text": "Meyers, Yanharber & Grishman 1996;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 264, |
|
"end": 299, |
|
"text": "Watanabe, Kurohashi & Aramaki 2000)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Works", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "A straightforward strategy for structure alignment is parse-to-parse matching, which regards the parsing and alignment as two separate and successive procedures. First, parsing is conducted on each language, respectively. Then the correspondent structures in different languages are aligned (e.g., Kaji, Kida & Morimoto 1992; Matsumoto et al. 1993; Grishman 1994; Meyers, Yanharber & Grishman 1996; Watanabe, Kurohashi & Aramaki 2000) . Unfortunately, automatic parse-to-parse matching has some weaknesses as described in Wu (2000) . For example, grammar inconsistency exists across languages; and it is hard to handle multiple alignment choices.", |
|
"cite_spans": [ |
|
{ |
|
"start": 298, |
|
"end": 325, |
|
"text": "Kaji, Kida & Morimoto 1992;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 326, |
|
"end": 348, |
|
"text": "Matsumoto et al. 1993;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 349, |
|
"end": 363, |
|
"text": "Grishman 1994;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 364, |
|
"end": 398, |
|
"text": "Meyers, Yanharber & Grishman 1996;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 399, |
|
"end": 434, |
|
"text": "Watanabe, Kurohashi & Aramaki 2000)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 522, |
|
"end": 531, |
|
"text": "Wu (2000)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Works", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To deal with the difficulties in parse-to-parse matching, Wu (1997) utilizes inversion transduction grammar (ITG) for bilingual parsing. Bilingual parsing approach looks upon the parsing and alignment as a single procedure which simultaneously encodes both the parsing and transferring information. It is, however, difficult to write a broad coverage 'bilingual grammar' for bilingual parsing.", |
|
"cite_spans": [ |
|
{ |
|
"start": 58, |
|
"end": 67, |
|
"text": "Wu (1997)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Works", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The chunks, which we will use, are extracted from the Treebank. When converting a tree to the chunk sequence, the chunk types are based on the syntactic category part of the bracket label. Roughly, a chunk contains everything to the left of and including the syntactic head of the constituent of the same name. Besides the head, a chunk also contains pre-modifiers, but no post-modifiers or arguments (Erik. 2000).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Principle", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Using chunk as the alignment structure, we can get around the problems such as PP attachment, structure mismatching across languages. Therefore, we can get high chunking accuracy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Principle", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Using bilingual chunking, we can get both high chunking accuracy and high chunk alignment accuracy by making the SL chunking process and the TL chunking process constrain and improve each other.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Principle", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Our 'bilingual chunking' model for structure alignment comprises three integrated components: chunking models of both languages, and the crossing constraint; it uses chunk as the structure. (See Fig. 1 ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 195, |
|
"end": 201, |
|
"text": "Fig. 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Principle", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The crossing constraint requests a chunk in one language only correspond to at most one chunk in the other language. For instance, in Fig. 2 (the dashed lines represent the word alignments; the brackets indicate the chunk boundaries), the phrase \"the first man\" is a monolingual chunk, it, however, should be divided into \"the first\" and \"man\" to satisfy the crossing constraint. By using crossing constraint, the illegal chunk candidates can be removed in the chunking process.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 134, |
|
"end": 140, |
|
"text": "Fig. 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": ")", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The chunking models for both languages work successively under the crossing constraint. Usually, chunking involves two steps: (1) POS tagging, and (2) chunking. To alleviate effectively the influence of POS tagging deficiency to the chunking result, we integrate the two steps with a unified model for optimal solution. This integration strategy has been proven to be effective for base NP identification (Xun, Huang & Zhou, 2001) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 405, |
|
"end": 430, |
|
"text": "(Xun, Huang & Zhou, 2001)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": ")", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Consequently, our model works in three successive steps: (1) word alignment between SL and TL sentences;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": ")", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(2) source language chunking;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": ")", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(3) target language chunking. Both 2and 3should work under the supervision of crossing constraints.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": ")", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "According to (Wu, 1997) , crossing constraint can be defined in the following.", |
|
"cite_spans": [ |
|
{ |
|
"start": 13, |
|
"end": 23, |
|
"text": "(Wu, 1997)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Crossing Constraint", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "For non-recursive phrases: Suppose two words w1 and w2 in language-1 correspond to two words v1 and v2 in language-2, respectively, and w1 and w2 belong to the same phrase of language-1. Then v1 and v2 must also belong to the same phrase of language-2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Crossing Constraint", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "We can benefit from applying crossing constraint in the following three aspects:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Crossing Constraint", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u2022 Consistent chunking in the view of alignment. For example, in Fig. 2 , \"the first man\" should be divided into \"the first\" and \"man\" for the consistency with the Chinese chunks \" \" and \" \", respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 64, |
|
"end": 70, |
|
"text": "Fig. 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The Crossing Constraint", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u2022 Searching space reduction. The chunking space is reduced by ruling out those illegal fragments like \"the first man\"; and the alignment space is reduced by confining those legal fragments like \"the first\" only to correspond to the Chinese fragments \" \" or \" \" based on word alignment anchors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Crossing Constraint", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u2022 Time synchronous algorithms for structure alignment. Time synchronous algorithms cannot be used due to word permutation problem before. While under the crossing constraint, these algorithms (for example, dynamic programming) can be used for both chunking and alignment.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Crossing Constraint", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Given an English sentence ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mathematical Formulation", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": ", where l is the sentence length. A sequence of chunks can be represented as: In practice, in order to reduce the search space, only N-best results of each step are retained.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "=", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "' 1 2 1 2 1 ,..., ] ],..[., [ ],..., [", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "=", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The HMM based POS tagging model (Kupiec 1992) with the trigram assumption is used to provide possible POS candidates for each word in terms of the N-best lattice.", |
|
"cite_spans": [ |
|
{ |
|
"start": 32, |
|
"end": 45, |
|
"text": "(Kupiec 1992)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Determining the N-Best English POS Sequences", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This step is to find the best chunk sequence based on the N-best POS lattice by decomposing the chunking model into two sub-models (1) inter-chunk model; (2) intra-chunk model. From equation 3, based on Bayes' rule, then Here, the crossing constraint a will remove those illegal candidates.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Determining the N-best English Chunking Result", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": ")) , , | ( ) | ( ) | ( ( max arg ) ( ) ( , a T B e p B T p a B p best M B", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Determining the N-best English Chunking Result", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The second part can be further derived based on two assumptions: (1) bigram for the English POS transition inside a chunk; (2) the first POS tag of a chunk only depends on the previous two tags. Thus 7i' is the index of the chunk the word belongs to.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Determining the N-best English Chunking Result", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": ") ) , | ( ) , | ( ( ) | ( ' 1 1 1 , , 1 , 2 , 1 , \u220f \u220f = = \u2212 \u2212 \u2212 = l i x j i e j i", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Determining the N-best English Chunking Result", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Finally, from (4)(5)(6)(7), we arrive", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Determining the N-best English Chunking Result", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "} )) , | ( ) , , | ( ( ) , | ( ) , , | ( { max arg ) ( . 1 1 , ,", |
|
"eq_num": ", , , ' 1 ." |
|
} |
|
], |
|
"section": "Determining the N-best English Chunking Result", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "1 , 2 , 1 , 1 2 ) ( , \" \" \" \" \" \" \" # \" \" \" \" \" \" \" $ % \" \" \" \" \" \" # \" \" \" \" \" \" $ % Where \u03b2 is a normalization coefficient, and its value is 0.5 in our experiment.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Determining the N-best English Chunking Result", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The N-best Chinese POS sequences are obtained by considering four factors: (1) tag transition probability; (2) tag translation probability; (3) lexical generation probability; (4) lexicon translation probability.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deciding the Chinese N-best POS Sequences", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "From Equation 3 ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deciding the Chinese N-best POS Sequences", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "t t t p a T T p & & ( ' & & ( ) 0 & & 1 ' & & 1 ) 0 (9)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deciding the Chinese N-best POS Sequences", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Where, conn is the word alignment result. And We assume the word translation probability is 1 since we are using the word alignment result. Comparing with a typical HMM based tagger, our model also utilizes the POS tag information in the other language.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deciding the Chinese N-best POS Sequences", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Similar to the English chunking model, the Chinese chunking model also includes (1) inter-chunk model; (2) intra-chunk model. They are simplified, however, because of limited training data. Using the derivation similar to equation (4 7 7 7 9 8 7 7 7 9 @ A 7 7 7 7 B 8 7 7 7 7 C @ A 11' i is the word number of the th i Chinese phrase.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Obtaining the Best Chinese Chunking Result", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use three kinds of resources for training and testing: a) The WSJ part of the Penn Treebank II corpus (Marcus, Santorini & Marcinkiewics 1993) . Sections 00-19 are used as the training data, and sections 20-24 as the test data. b) The HIT Treebank 2 , containing 2000 sentences. c) The HIT bilingual corpus 3 , containing 20,000 sentence-pairs (in general domain) annotated with POS and word alignment information. We used 19,000 sentence-pairs for training and 1,000 for testing. These 1000 sentence-pairs are manually chunked and aligned.", |
|
"cite_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 145, |
|
"text": "(Marcus, Santorini & Marcinkiewics 1993)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Estimation", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "From the Penn Treebank, English chunks were extracted with the conversion tool (http://lcg-www.uia.ac.be/conll2000/chunking). From the HIT Treebank, Chinese chunks were extracted with a conversion tool implemented by ourselves. We can obtain an English chunk bank and a Chinese chunk bank.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Estimation", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "With the chunk dataset obtained above, the parameters were estimated with Maximum Likelihood Estimation. The POS tag translation probability in equation 9was estimated from c).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Estimation", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "The English part-of-speech tag set is the same with Penn Treebank. And the Chinese tag set is the same with HIT Treebank.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Estimation", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "13 chunk types were used for English, which are the same with (Erik et al, 2000) . 7 chunk types were used for Chinese, including BDP (adverb phrase), BNP (noun phrase), BAP (adjective 2 http://mtlab.hit.edu.cn/download/4.TXT 3 Created by Harbin Institute of Technology. phrase), BVP (verb phrase), BMP (quantifier phrase), BPP (prepositional phrase) and O (words outside any other chunks).", |
|
"cite_spans": [ |
|
{ |
|
"start": 62, |
|
"end": 80, |
|
"text": "(Erik et al, 2000)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Estimation", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "We conducted experiments to evaluate (1) the overall accuracy; (2) the comparison with isolated strategy;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "(3) the comparison with a score-function approach.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The word aligner developed by Wang et al. (2001) was used to provide word alignment anchors. The 1000 sentence-pairs described in section 2.4 were used as evaluation standard set.", |
|
"cite_spans": [ |
|
{ |
|
"start": 30, |
|
"end": 48, |
|
"text": "Wang et al. (2001)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The result is evaluated in terms of chunking precision and recall, as well as alignment precision and recall, as defined in the following: ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "aligned", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "As described in section 2.3, in each step, N-best candidates were selected. In our experiment, N was set from 1 to 7. Table 1 shows the results with different N. When N=4, we get the best results, we got 93.48% for English chunking, 89.93% for Chinese chunking, and 87.05% for alignment. Table 2 shows the results of individual Chinese chunk types. The second column is the percentage that each type occupies among all the Chinese chunks. Table 3 shows the results of individual English chunk types. The last column shows the alignment precision of each English chunk type.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 125, |
|
"text": "Table 1", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 288, |
|
"end": 295, |
|
"text": "Table 2", |
|
"ref_id": "TABREF5" |
|
}, |
|
{ |
|
"start": 439, |
|
"end": 446, |
|
"text": "Table 3", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Overall Accuracy", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We can see from table 2 and 3 that the precision and recall for chunks of NP, BNP, ADVP, DP, and O are around 90% for both Chinese and English. This reflects that the compositional rules of these chunk types are very regular.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overall Accuracy", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We now compare with the isolated strategy, which separately conduct chunks for English and Chinese.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Chunking Ability Evaluation: Comparison with Isolated Strategy", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In isolated strategy, we carry out the English and Chinese chunking separately, we call this experiment M.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Chunking Ability Evaluation: Comparison with Isolated Strategy", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We next add the crossing constraint to M. In other words, chunk each language under the crossing constraint, without considering the chunking procedure of the correspondent language. We call this experiment M+C.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Chunking Ability Evaluation: Comparison with Isolated Strategy", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Both M and M+C are compared with our integrated mode, which we call I. Table 4 indicates the contribution of the crossing constraint and our integrated strategy. Comparing M+C with M, we see that the accuracies (pre. & rec.) of both languages rise. Comparing I with M+C, the accuracies rise again.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 71, |
|
"end": 78, |
|
"text": "Table 4", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Chunking Ability Evaluation: Comparison with Isolated Strategy", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In table 5, please note that the searching spaces of M+C and I are the same. This is because they all adopt the crossing constraint. Comparing both I and M+C with M, we see that the searching space is reduced 21% ((59790-46937)/59790) for English and 71% ((57043-14746)/57043) for Chinese and 47% ((59790+ 57043-46937-14746) / (59790+57043)) for all.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Chunking Ability Evaluation: Comparison with Isolated Strategy", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The score-function approach is usually used to select the best target language correspondence for a source language fragment. Here, we call it SF. First, we parse the English side under the crossing constraint (as the M+C case in section 3.2). And then use a score function to find the target correspondence for each English chunk.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Alignment Evaluation: Comparing with Score Function Approach", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The score function is:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Alignment Evaluation: Comparing with Score Function Approach", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": ") , | ( ) , | ( ) | ( l m j p l m k p l m p SF \u2206 \u2206 =", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Alignment Evaluation: Comparing with Score Function Approach", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "m and l are the lengths of the English chunk and its correspondent Chinese chunk respectively. k \u2206 is the difference in number of content words between these two chunks, j \u2206 is the difference of functional words. This function achieves the best performance among several lexicalized score functions in (Wang, et al., 2001) . The alignment result is shown in table 6.", |
|
"cite_spans": [ |
|
{ |
|
"start": 302, |
|
"end": 322, |
|
"text": "(Wang, et al., 2001)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Alignment Evaluation: Comparing with Score Function Approach", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The comparison between SF and I indicates that our integrated model obviously outperforms the score function approach in the aspect of finding the target alignment for source language chunks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Alignment Evaluation: Comparing with Score Function Approach", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "A new statistical method called \"bilingual chunking\" for structure alignment is proposed. Different with the existing approaches which align hierarchical structures like sub-trees, our method conducts alignment on chunks. The alignment is finished through a simultaneous bilingual chunking algorithm. Using the constrains of chunk correspondence between source language (SL) and target language(TL), our algorithm can dramatically reduce search space, support time synchronous DP algorithm , and lead to highly consistent chunking. Furthermore, by unifying the POS tagging and chunking in the search process, our algorithm alleviates effectively the influence of POS tagging deficiency to the chunking result.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The experimental results with English-Chinese structure alignment show that our model can produce 90% in precision for chunking, and 87% in precision for chunk alignment. Compared with the isolated strategy, our method achieves much higher precision and recall for bilingual chunking. Compared with the score function approach, our method got much higher precision and recall for chunk alignment.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In the future, we will conduct further research such as the inner-phrase translation modeling, or transferring grammar introduction, bilingual pattern learning, etc, based on the results of our method.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Introduction to the CoNLL-2000 Shared Task: Chunking. CoNL-2000 and LLL-2000", |
|
"authors": [ |
|
{ |
|
"first": "Erik", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Tjong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kim", |
|
"middle": [], |
|
"last": "Sang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sabine", |
|
"middle": [], |
|
"last": "Buchholz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "127--132", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erik F. Tjong Kim Sang and Sabine Buchholz (2000) Introduction to the CoNLL-2000 Shared Task: Chunking. CoNL-2000 and LLL-2000. Lisbon, Portugal, pp. 127-132.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Iterative Alignment of Syntactic Structures for a Bilingual Corpus. WVLC-94", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Grishman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "57--68", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Grishman R. (1994) Iterative Alignment of Syntactic Structures for a Bilingual Corpus. WVLC-94, pp. 57-68.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Chinese-Korean Word Alignment Based on Linguistic Comparison", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Huang, J. and Choi, K. (2000) Chinese-Korean Word Alignment Based on Linguistic Comparison. ACL-2001.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Learning Translation Templates from Bilingual Texts. COLING-92", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Kaji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Kida", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Morimoto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "672--678", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kaji, H., Kida, Y., and Morimoto, Y. (1992) Learning Translation Templates from Bilingual Texts. COLING-92, pp. 672-678.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Structural Matching of Parallel Texts, ACL-93", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Matsumoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Ishimoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Utsuro", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "23--30", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matsumoto, Y., Ishimoto, H., and Utsuro, T. (1993) Structural Matching of Parallel Texts, ACL-93, pp. 23-30.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Robust Part-of-speech tagging using a hidden Markov model", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Kupiec", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "Computer Speech and Language", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kupiec J. (1992) Robust Part-of-speech tagging using a hidden Markov model. Computer Speech and Language 6.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Alignment of Shared Forests for Bilingual Corpora. Colings-96", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Meyers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Yanharber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Grishman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "460--465", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Meyers, A., Yanharber, R., and Grishman, R. (1996) Alignment of Shared Forests for Bilingual Corpora. Colings-96, pp. 460-465.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Finding Target Language Correspondence for Lexical EBMT system", |
|
"authors": [ |
|
{ |
|
"first": "Wang", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhou", |
|
"middle": [], |
|
"last": "Huang Jin-Xia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huang", |
|
"middle": [], |
|
"last": "Ming", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Chang-Ning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wang Wei, Huang Jin-Xia, Zhou Ming and Huang Chang-Ning (2001) Finding Target Language Correspondence for Lexical EBMT system. NLPRS-2001.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Modeling with Structures in Statistical Machine Translation. COLING-ACL", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Waibel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wang Y. and Waibel A. (1998) Modeling with Structures in Statistical Machine Translation. COLING-ACL 1998.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Finding Structural Correspondences from Bilingual Parsed Corpus for Corpus-based Translation", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Watanabe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Kurohashi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Aramaki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Watanabe H., Kurohashi S.., Aramaki E. (2000) Finding Structural Correspondences from Bilingual Parsed Corpus for Corpus-based Translation. COlING-2000.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Alignment. Handbook of Natrual Language Processing", |
|
"authors": [ |
|
{ |
|
"first": "Wu", |
|
"middle": [], |
|
"last": "Dekai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "415--458", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wu Dekai (2000) Alignment. Handbook of Natrual Language Processing, Robet Dale, Hermann Moisl, and Harold Somers ed, Marcel Dekker, Inc. pp. 415-458,", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Stochastic inversion transduction grammars and bilingual parsing of parallel corpora", |
|
"authors": [ |
|
{ |
|
"first": "Dekai", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Computational Linguistics", |
|
"volume": "23", |
|
"issue": "", |
|
"pages": "377--404", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wu, Dekai (1997) Stochastic inversion transduction grammars and bilingual parsing of parallel corpora. Computational Linguistics 23/3, pp. 377-404.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Unified Statistical Model for the Identification of English BaseNP", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Xun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xun, E., Huang, C., and Zhou, M. (2001) A Unified Statistical Model for the Identification of English BaseNP. ACL-2001.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"num": null, |
|
"text": "", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"num": null, |
|
"text": "th i chunk type of e , and ' l is the number of chunks in e . Similarly, for a Chinese sentence c , m denotes the word number of c, ' m is the number of Chinese chunks in c.", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF7": { |
|
"uris": null, |
|
"num": null, |
|
"text": ", c and a ; (2) bigram for chunk type transition; (3) bigram for tag transition inside a chunk; (4) trigram for the POS tag transition between chunks, we get", |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>Language</td><td>Target Language</td></tr><tr><td colspan=\"2\">Chunking Model</td><td>Chunking Model</td></tr><tr><td colspan=\"2\">(Integrated with</td><td>(Integrated with</td></tr><tr><td colspan=\"2\">POS tagging)</td><td>POS tagging)</td></tr><tr><td colspan=\"3\">[the first ][man ][who][would fly across][ the channel]</td></tr><tr><td>[\u00a2 \u00a4 \u00a3</td><td colspan=\"2\">\u00a5 ] [ Fig. 2 the crossing constraint \u00a6 \u00a7 ] [ \u00a9 ] [ ] [ ] \u00a5</td></tr></table>", |
|
"html": null, |
|
"text": "" |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>Note that</td><td>p</td><td>(</td><td>A</td><td>|</td><td>B</td><td>c</td><td>,</td><td>c</td><td>,</td><td>T</td><td>c</td><td>,</td><td>B</td><td>e</td><td>,</td><td>T</td><td>e</td><td>,</td><td>e</td><td>,</td><td>a</td><td>)</td><td>=</td><td>1</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"16\">The most probable result is expressed as</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td><</td><td/><td colspan=\"2\">B</td><td/><td/><td colspan=\"2\">* e</td><td/><td/><td>,</td><td/><td/><td colspan=\"3\">B</td><td>* c</td><td>,</td><td>A</td><td>*</td><td>>=</td><td>arg B</td><td>e</td><td>,</td><td>max , A B c</td><td>p</td><td>(</td><td>B</td><td>e</td><td>,</td><td>B</td><td>c</td><td>,</td><td>A</td><td>,</td><td>c</td><td>|</td><td>e</td><td>,</td><td>a</td><td>)</td><td>(1)</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"16\">Where, A is the alignment between e B and c B .</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"16\">a refers to the crossing constraint. Equation (1)</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"16\">can be further derived into</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>=</td><td/><td colspan=\"6\">arg <</td><td colspan=\"4\">B</td><td colspan=\"4\">max , * B e</td><td>, ! * A c</td><td>*</td><td>></td><td>p</td><td>(</td><td>B</td><td>e</td><td>,</td><td>B</td><td>c</td><td>,</td><td>A</td><td>,</td><td>T</td><td>e</td><td>,</td><td>T</td><td>c</td><td>,</td><td>c</td><td>|</td><td>e</td><td>,</td><td>a</td><td>)</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>\u2248</td><td/><td colspan=\"8\">arg B e</td><td>,</td><td colspan=\"5\">max , A B c</td><td>T</td><td>p e</td><td>(</td><td>B T c</td><td>e</td><td>,</td><td>B</td><td>c</td><td>,</td><td>A</td><td>,</td><td>T</td><td>e</td><td>,</td><td>T</td><td>c</td><td>,</td><td>c</td><td>|</td><td>e</td><td>,</td><td>a</td><td>)</td><td>(2)</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>B</td><td colspan=\"2\">e</td><td/><td>,</td><td colspan=\"3\">B</td><td>c</td><td/><td/><td>,</td><td colspan=\"2\">A</td><td>,</td><td>T</td><td>e</td><td>,</td><td>T</td><td>c</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"16\">Using Bayes' rule, equation (2) will be</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td><</td><td/><td colspan=\"4\">* e B</td><td colspan=\"2\">,</td><td colspan=\"5\">B</td><td colspan=\"2\">* c</td><td>,</td><td>A</td><td>*</td><td>>=</td><td>arg</td><td>max</td><td>{</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>B</td><td>e</td><td>,</td><td>B</td><td>c</td><td>,</td><td>T</td><td>e</td><td>,</td><td>T</td><td>c</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>\u00d7</td><td colspan=\"13\">| e T ( T ( p p c</td><td colspan=\"2\">, , a T ) e e B | e</td><td>) e a B , ( e p ,</td><td>( , p e c , T e \u00d7 |</td><td>| a T ) c</td><td>,</td><td>B e</td><td>,</td><td>T e</td><td>,</td><td>e</td><td>,</td><td>a</td><td>)</td><td>(3)</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>\u00d7</td><td colspan=\"2\">p</td><td colspan=\"2\">(</td><td colspan=\"4\">B</td><td colspan=\"2\">c</td><td/><td/><td colspan=\"2\">|</td><td>c</td><td>,</td><td>T c</td><td>,</td><td>B e</td><td>,</td><td>T e</td><td>,</td><td>e</td><td>,</td><td>a</td><td>)</td><td>\u00d7</td><td>p</td><td>(</td><td>A</td><td>|</td><td>B</td><td>c</td><td>,</td><td>c</td><td>,</td><td>T c</td><td>,</td><td>B</td><td>e</td><td>,</td><td>T e</td><td>,</td><td>e</td><td>,</td><td>a</td><td>)}</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"16\">In this formula,</td><td>( T p e</td><td>|</td><td>e</td><td>,</td><td>a</td><td>)</td><td>aims to determine</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"16\">the best POS tag sequence for e .</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"2\">p</td><td colspan=\"2\">(</td><td colspan=\"6\">B</td><td>e</td><td/><td/><td colspan=\"2\">,</td><td>T</td><td>e</td><td>|</td><td>e</td><td>,</td><td>a</td><td>)</td><td>aims to determine the best chunk</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"16\">sequence from them.</td><td>p</td><td>(</td><td>c</td><td>|</td><td>T</td><td>c</td><td>,</td><td>B</td><td>e</td><td>,</td><td>T</td><td>e</td><td>,</td><td>e</td><td>,</td><td>a</td><td>)aims</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"16\">to decide the best POS tag sequence for c based</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"16\">on the English POS sequence.</td><td>( p</td><td>B c</td><td>|</td><td>, c</td><td>T c</td><td>,</td><td>B e</td><td>,</td><td>T e</td><td>,</td><td>, e</td><td>) a</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"16\">aims to decide the best Chinese chunking result</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"16\">based on the Chinese POS sequence and the</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"16\">English chunk sequence.</td></tr></table>", |
|
"html": null, |
|
"text": "Let bmi denote the th i positional tag, bmi can be begin of a chunk, inside a chunk, or outside any chunk." |
|
}, |
|
"TABREF4": { |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td colspan=\"2\">English Chunking</td><td colspan=\"2\">Chinese Chunking</td><td colspan=\"2\">Alignment</td></tr><tr><td/><td>P (%)</td><td>R (%)</td><td>P (%)</td><td>R (%)</td><td>P (%)</td><td>R (%)</td></tr><tr><td>N=1</td><td>90.34</td><td>90.67</td><td>88.41</td><td>87.05</td><td>85.31</td><td>81.07</td></tr><tr><td>N=2</td><td>92.34</td><td>92.93</td><td>89.52</td><td>88.80</td><td>86.54</td><td>82.69</td></tr><tr><td>N=3</td><td>93.21</td><td>94.16</td><td>89.90</td><td>89.58</td><td>86.96</td><td>83.58</td></tr><tr><td>N=4</td><td>93.48</td><td>94.94</td><td>89.93</td><td>90.11</td><td>87.05</td><td>84.16</td></tr><tr><td>N=5</td><td>92.91</td><td>94.43</td><td>89.41</td><td>89.77</td><td>86.69</td><td>83.89</td></tr><tr><td>N=6</td><td>92.70</td><td>94.20</td><td>89.29</td><td>89.72</td><td>86.57</td><td>83.79</td></tr><tr><td>N=7</td><td>92.31</td><td>93.88</td><td>88.89</td><td>89.46</td><td>86.17</td><td>83.51</td></tr></table>", |
|
"html": null, |
|
"text": "" |
|
}, |
|
"TABREF5": { |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>Chunk Type</td><td>% in corpus</td><td>Pre. %</td><td>Rec. %</td></tr><tr><td>BNP</td><td>34.60</td><td>89.25</td><td>92.49</td></tr><tr><td>BVP</td><td>23.50</td><td>84.66</td><td>87.03</td></tr><tr><td>BPP</td><td>4.85</td><td>88.54</td><td>87.04</td></tr><tr><td>BDP</td><td>5.99</td><td>90.13</td><td>91.78</td></tr><tr><td>BAP</td><td>2.86</td><td>83.49</td><td>84.69</td></tr><tr><td>BMP</td><td>1.30</td><td>73.45</td><td>87.37</td></tr><tr><td>O</td><td>26.89</td><td>98.02</td><td>90.65</td></tr></table>", |
|
"html": null, |
|
"text": "" |
|
}, |
|
"TABREF6": { |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>Chunk</td><td>% in</td><td colspan=\"2\">Chunking</td><td>Alignment</td></tr><tr><td>Type</td><td>corpus</td><td>Pre. %</td><td>Rec. %</td><td>Pre. %</td></tr><tr><td>NP</td><td>39.34</td><td>93.84</td><td>95.83</td><td>89.08</td></tr><tr><td>VP</td><td>20.02</td><td>90.67</td><td>90.12</td><td>80.66</td></tr><tr><td>PP</td><td>11.48</td><td>92.32</td><td>95.78</td><td>75.64</td></tr><tr><td>ADVP</td><td>4.02</td><td>92.67</td><td>92.98</td><td>86.11</td></tr><tr><td>SBAR</td><td>1.28</td><td>92.08</td><td>97.89</td><td>86.27</td></tr><tr><td>ADJP</td><td>2.49</td><td>86.00</td><td>92.97</td><td>83.43</td></tr><tr><td>PRT</td><td>1.08</td><td>87.34</td><td>86.25</td><td>62.96</td></tr><tr><td>INTJ</td><td>0.05</td><td>97.06</td><td>94.26</td><td>100.00</td></tr><tr><td>O</td><td>19.81</td><td>97.77</td><td>98.51</td><td>91.61</td></tr></table>", |
|
"html": null, |
|
"text": "" |
|
}, |
|
"TABREF7": { |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>English</td><td/><td/><td>Chinese</td></tr><tr><td/><td colspan=\"2\">Chunking Accuracy</td><td colspan=\"2\">Chunking Accuracy</td></tr><tr><td/><td>Pre. %</td><td>Rec. %</td><td>Pre. %</td><td>Rec. %</td></tr><tr><td>M</td><td>92.52</td><td>90.81</td><td>72.30</td><td>81.60</td></tr><tr><td>M+C</td><td>92.84</td><td>92.68</td><td>79.88</td><td>83.61</td></tr><tr><td>I</td><td>93.48</td><td>94.94</td><td>89.93</td><td>90.11</td></tr></table>", |
|
"html": null, |
|
"text": "" |
|
}, |
|
"TABREF8": { |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>English</td><td>Chinese</td></tr><tr><td/><td>(#chunk candidate)</td><td>(#chunk candidate)</td></tr><tr><td>M</td><td>59790</td><td>57043</td></tr><tr><td>M+C</td><td>46937</td><td>14746</td></tr><tr><td>I</td><td>46937</td><td/></tr></table>", |
|
"html": null, |
|
"text": "" |
|
} |
|
} |
|
} |
|
} |